mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 15:38:18 -05:00
Compare commits
31 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6b837c01f3 | ||
|
|
5b6b145753 | ||
|
|
c07975acdf | ||
|
|
dfdb2ecf07 | ||
|
|
a6f2457040 | ||
|
|
fa0927c5dc | ||
|
|
f92029aaeb | ||
|
|
45b23edde9 | ||
|
|
33b1b3cb51 | ||
|
|
51c930d7da | ||
|
|
4cfc5511fb | ||
|
|
06beb5dca3 | ||
|
|
968a396b5e | ||
|
|
fa2401c081 | ||
|
|
438a9fb1d6 | ||
|
|
1c22307f08 | ||
|
|
22dd3901f0 | ||
|
|
54d823677f | ||
|
|
e3cf2cb82b | ||
|
|
b6025425ac | ||
|
|
3ab5752276 | ||
|
|
c4ba0f9178 | ||
|
|
f0e8fbe738 | ||
|
|
2059b49624 | ||
|
|
bc8f9dbc83 | ||
|
|
cc2441d42d | ||
|
|
5d965d49db | ||
|
|
233fff0333 | ||
|
|
41ce22be05 | ||
|
|
3353e36d16 | ||
|
|
f2a656d67b |
32
.github/workflows/docker.yml
vendored
32
.github/workflows/docker.yml
vendored
@@ -49,8 +49,8 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
@@ -94,8 +94,8 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
@@ -139,8 +139,8 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
@@ -184,8 +184,8 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
@@ -229,8 +229,8 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
@@ -274,8 +274,8 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
@@ -318,8 +318,8 @@ jobs:
|
||||
file: ./build/dockerfiles/coordinator-api.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
@@ -363,7 +363,7 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
2
.github/workflows/prover.yml
vendored
2
.github/workflows/prover.yml
vendored
@@ -43,7 +43,7 @@ jobs:
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
|
||||
@@ -8,7 +8,7 @@ require (
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250225152658-bcfdb48dd939
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.7.0
|
||||
@@ -17,7 +17,7 @@ require (
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
@@ -30,19 +30,18 @@ require (
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible // indirect
|
||||
github.com/docker/docker v26.1.0+incompatible // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-kit/kit v0.9.0 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
@@ -63,7 +62,7 @@ require (
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
@@ -89,10 +88,11 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb // indirect
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250210041951-d028c537b995 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/sethvargo/go-retry v0.2.4 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.12 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
|
||||
@@ -11,9 +11,11 @@ github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
@@ -65,8 +67,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
@@ -86,8 +88,8 @@ github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T
|
||||
github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E=
|
||||
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
|
||||
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
@@ -108,9 +110,8 @@ github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw=
|
||||
github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw=
|
||||
github.com/go-faster/errors v0.6.1 h1:nNIPOBkprlKzkThvS/0YaX8Zs9KewLCOSFQS5BU06FI=
|
||||
github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+Jpi2LAyY=
|
||||
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
@@ -202,8 +203,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
|
||||
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
@@ -308,10 +309,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb h1:uOKdmDT0LsuS3gfynEjR4zA3Ooh6p2Z3O+IMRj2r8LA=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250210041951-d028c537b995 h1:Zo1p42CUS9pADSKoDD0ZoDxf4dQ3gttqWZlV+RSeImk=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250210041951-d028c537b995/go.mod h1:UZhhjzqYsyEhcvY0Y+SP+oMdeOUqFn/UXpbAYuPGzg0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250225152658-bcfdb48dd939 h1:KODmYD4s4BY/SBheCHqGbATnGPLQKzTJVuAElA8Eh+0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250225152658-bcfdb48dd939/go.mod h1:AgU8JJxC7+nfs7R7ma35AU7dMAGW7wCw3dRZRefIKyQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
@@ -324,6 +325,8 @@ github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5g
|
||||
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
|
||||
@@ -415,6 +418,7 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
||||
@@ -2,6 +2,7 @@ package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -45,7 +46,7 @@ func (c *BatchEvent) GetBatchEventSyncedHeightInDB(ctx context.Context) (uint64,
|
||||
db = db.Model(&BatchEvent{})
|
||||
db = db.Order("l1_block_number desc")
|
||||
if err := db.First(&batch).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("failed to get batch synced height in db, error: %w", err)
|
||||
@@ -62,7 +63,7 @@ func (c *BatchEvent) GetLastUpdatedFinalizedBlockHeight(ctx context.Context) (ui
|
||||
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUpdated)
|
||||
db = db.Order("batch_index desc")
|
||||
if err := db.First(&batch).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
// No finalized batch found, return genesis batch's end block number.
|
||||
return 0, nil
|
||||
}
|
||||
@@ -81,7 +82,7 @@ func (c *BatchEvent) GetUnupdatedFinalizedBatchesLEBlockHeight(ctx context.Conte
|
||||
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUnupdated)
|
||||
db = db.Order("batch_index asc")
|
||||
if err := db.Find(&batches).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get unupdated finalized batches >= block height, error: %w", err)
|
||||
|
||||
@@ -2,6 +2,7 @@ package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -84,7 +85,7 @@ func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageTy
|
||||
db = db.Order("l2_block_number desc")
|
||||
}
|
||||
if err := db.First(&message).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("failed to get latest processed height, type: %v, error: %w", messageType, err)
|
||||
@@ -108,7 +109,7 @@ func (c *CrossMessage) GetL2LatestFinalizedWithdrawal(ctx context.Context) (*Cro
|
||||
db = db.Where("rollup_status = ?", btypes.RollupStatusTypeFinalized)
|
||||
db = db.Order("message_nonce desc")
|
||||
if err := db.First(&message).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
|
||||
@@ -127,10 +128,10 @@ func (c *CrossMessage) GetL2WithdrawalsByBlockRange(ctx context.Context, startBl
|
||||
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
|
||||
db = db.Order("message_nonce asc")
|
||||
if err := db.Find(&messages).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
|
||||
return nil, fmt.Errorf("failed to get L2 withdrawals by block range, error: %v", err)
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM golang:1.21-alpine3.19 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY ./bridge-history-api/go.* ./
|
||||
@@ -10,10 +10,11 @@ FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge-history-api/cmd/db_cli && go build -v -p 4 -o /bin/db_cli
|
||||
cd /src/bridge-history-api/cmd/db_cli && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/db_cli
|
||||
|
||||
# Pull db_cli into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
# Pull db_cli into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
COPY --from=builder /bin/db_cli /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["db_cli"]
|
||||
@@ -40,7 +40,7 @@ FROM ubuntu:20.04
|
||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/lib
|
||||
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
|
||||
# ENV CHAIN_ID=534353
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl -y
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl jq -y
|
||||
RUN mkdir -p /src/coordinator/internal/logic/verifier/lib
|
||||
COPY --from=builder /bin/lib /src/coordinator/internal/logic/verifier/lib
|
||||
COPY --from=builder /bin/coordinator_api /bin/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
@@ -16,10 +16,11 @@ FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/database/cmd && go build -v -p 4 -o /bin/db_cli
|
||||
cd /src/database/cmd && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/db_cli
|
||||
|
||||
# Pull db_cli into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
# Pull db_cli into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
COPY --from=builder /bin/db_cli /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["db_cli"]
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
package forks
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
)
|
||||
|
||||
// GetHardforkName returns the name of the hardfork active at the given block height and timestamp.
|
||||
// It checks the chain configuration to determine which hardfork is active.
|
||||
func GetHardforkName(config *params.ChainConfig, blockHeight, blockTimestamp uint64) string {
|
||||
if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) {
|
||||
return "homestead"
|
||||
} else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) {
|
||||
return "bernoulli"
|
||||
} else if !config.IsDarwin(blockTimestamp) {
|
||||
return "curie"
|
||||
} else if !config.IsDarwinV2(blockTimestamp) {
|
||||
return "darwin"
|
||||
} else {
|
||||
return "darwinV2"
|
||||
}
|
||||
}
|
||||
|
||||
// GetCodecVersion returns the encoding codec version for the given block height and timestamp.
|
||||
// It determines the appropriate codec version based on the active hardfork.
|
||||
func GetCodecVersion(config *params.ChainConfig, blockHeight, blockTimestamp uint64) encoding.CodecVersion {
|
||||
if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) {
|
||||
return encoding.CodecV0
|
||||
} else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) {
|
||||
return encoding.CodecV1
|
||||
} else if !config.IsDarwin(blockTimestamp) {
|
||||
return encoding.CodecV2
|
||||
} else if !config.IsDarwinV2(blockTimestamp) {
|
||||
return encoding.CodecV3
|
||||
} else {
|
||||
return encoding.CodecV4
|
||||
}
|
||||
}
|
||||
|
||||
// GetMaxChunksPerBatch returns the maximum number of chunks allowed per batch for the given block height and timestamp.
|
||||
// This value may change depending on the active hardfork.
|
||||
func GetMaxChunksPerBatch(config *params.ChainConfig, blockHeight, blockTimestamp uint64) uint64 {
|
||||
if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) {
|
||||
return 15
|
||||
} else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) {
|
||||
return 15
|
||||
} else if !config.IsDarwin(blockTimestamp) {
|
||||
return 45
|
||||
} else if !config.IsDarwinV2(blockTimestamp) {
|
||||
return 45
|
||||
} else {
|
||||
return 45
|
||||
}
|
||||
}
|
||||
@@ -13,8 +13,7 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241011150208-4742882675d8
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/testcontainers/testcontainers-go v0.30.0
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
|
||||
@@ -29,6 +28,7 @@ require (
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/DataDog/zstd v1.4.5 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/Microsoft/hcsshim v0.11.4 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
|
||||
@@ -46,6 +46,7 @@ require (
|
||||
github.com/aws/smithy-go v1.15.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
||||
github.com/buger/goterm v1.0.4 // indirect
|
||||
github.com/bytedance/sonic v1.10.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
@@ -53,6 +54,11 @@ require (
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/cloudflare/cfssl v1.6.5 // indirect
|
||||
github.com/cockroachdb/errors v1.11.1 // indirect
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||
github.com/cockroachdb/pebble v1.1.0 // indirect
|
||||
github.com/cockroachdb/redact v1.1.5 // indirect
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
|
||||
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.12.1 // indirect
|
||||
@@ -65,7 +71,8 @@ require (
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
github.com/docker/buildx v0.12.0-rc2.0.20231219140829-617f538cb315 // indirect
|
||||
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible // indirect
|
||||
@@ -76,15 +83,16 @@ require (
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsevents v0.1.1 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/fvbommel/sortorder v1.0.2 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/getsentry/sentry-go v0.18.0 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
@@ -100,6 +108,7 @@ require (
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gogo/googleapis v1.4.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
@@ -116,7 +125,7 @@ require (
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
@@ -135,12 +144,14 @@ require (
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
@@ -178,11 +189,11 @@ require (
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/prometheus/tsdb v0.7.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/da-codec v0.1.2 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
|
||||
@@ -191,6 +202,7 @@ require (
|
||||
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/cobra v1.8.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.4.0 // indirect
|
||||
@@ -228,7 +240,9 @@ require (
|
||||
go.opentelemetry.io/otel/sdk/metric v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
|
||||
@@ -248,8 +262,7 @@ require (
|
||||
google.golang.org/grpc v1.59.0 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/api v0.26.7 // indirect
|
||||
|
||||
@@ -15,6 +15,10 @@ github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1r
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
|
||||
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
|
||||
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
@@ -30,6 +34,8 @@ github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:H
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
@@ -75,6 +81,8 @@ github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
@@ -115,6 +123,18 @@ github.com/cloudflare/cfssl v1.6.5/go.mod h1:Bk1si7sq8h2+yVEDrFJiz3d7Aw+pfjjJSZV
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4=
|
||||
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
|
||||
github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=
|
||||
github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/pebble v1.1.0 h1:pcFh8CdCIt2kmEpK0OIatq67Ln9uGDYY3d5XnE0LJG4=
|
||||
github.com/cockroachdb/pebble v1.1.0/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E=
|
||||
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
|
||||
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
|
||||
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60 h1:NlkpaLBPFr05mNJWVMH7PP4L30gFG6k4z1QpypLUSh8=
|
||||
@@ -163,8 +183,12 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI=
|
||||
github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
@@ -199,8 +223,6 @@ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU
|
||||
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ=
|
||||
@@ -212,8 +234,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
@@ -230,6 +252,8 @@ github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
|
||||
github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/pprof v1.4.0 h1:XxiBSf5jWZ5i16lNOPbMTVdgHBdhfGRD5PZ1LWazzvg=
|
||||
github.com/gin-contrib/pprof v1.4.0/go.mod h1:RrehPJasUVBPK6yTUwOl8/NP6i0vbUgmxtis+Z5KE90=
|
||||
@@ -238,13 +262,12 @@ github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm
|
||||
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
|
||||
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
|
||||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
@@ -293,6 +316,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||
@@ -371,12 +396,12 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
|
||||
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
|
||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
|
||||
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw=
|
||||
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
@@ -434,8 +459,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
|
||||
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
@@ -453,6 +478,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
@@ -469,8 +496,9 @@ github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0V
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
|
||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
@@ -581,6 +609,8 @@ github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko
|
||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -617,26 +647,24 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
|
||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb h1:uOKdmDT0LsuS3gfynEjR4zA3Ooh6p2Z3O+IMRj2r8LA=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/da-codec v0.1.2 h1:QyJ+dQ4zWVVJwuqxNt4MiKyrymVc6rHe4YPtURkjiRc=
|
||||
github.com/scroll-tech/da-codec v0.1.2/go.mod h1:odz1ck3umvYccCG03osaQBISAYGinZktZYbpk94fYRE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241011150208-4742882675d8 h1:pEP6+ThQIgSRO5SILiO6iBpWnxAUjoRNBA9Nc6ooOS0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241011150208-4742882675d8/go.mod h1:MBHX2RcAV9KLWblo9DSa/xyPYd1Wpwnt64JSDOy85po=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
@@ -660,6 +688,8 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spdx/tools-golang v0.5.1 h1:fJg3SVOGG+eIva9ZUBm/hvyA7PIPVFjRxUKe6fdAgwE=
|
||||
github.com/spdx/tools-golang v0.5.1/go.mod h1:/DRDQuBfB37HctM29YtrX1v+bXiVmT2OpQDalRmX9aU=
|
||||
@@ -801,11 +831,15 @@ go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
|
||||
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y=
|
||||
@@ -900,6 +934,7 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -995,15 +1030,13 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1 h1:d4KQkxAaAiRY2h5Zqis161Pv91A37uZyJOx73duwUwM=
|
||||
gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllEHtsNHS6y7vFc7iw=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
@@ -1021,7 +1054,6 @@ gorm.io/driver/postgres v1.5.7 h1:8ptbNJTDbEmhdr62uReG5BGkdQyeasu/FZHxI0IMGnM=
|
||||
gorm.io/driver/postgres v1.5.7/go.mod h1:3e019WlBaYI5o5LIdNV+LyxCMNtLOQETBXL2h4chKpA=
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde h1:9DShaph9qhkIYw7QF91I/ynrr4cOO2PZra2PFD7Mfeg=
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
|
||||
gotest.tools v1.4.0 h1:BjtEgfuw8Qyd+jPvQz8CfoxiO/UjFEidWinwEXZiWv0=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
||||
@@ -195,7 +195,7 @@ func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// GetL2GethEndPoint returns the endpoint of the running L2Geth container
|
||||
// GetWeb3SignerEndpoint returns the endpoint of the running L2Geth container
|
||||
func (t *TestcontainerApps) GetWeb3SignerEndpoint() (string, error) {
|
||||
if t.web3SignerContainer == nil || !t.web3SignerContainer.IsRunning() {
|
||||
return "", errors.New("web3signer is not running")
|
||||
|
||||
@@ -21,6 +21,7 @@ var (
|
||||
// RollupRelayerFlags contains flags only used in rollup-relayer
|
||||
RollupRelayerFlags = []cli.Flag{
|
||||
&ImportGenesisFlag,
|
||||
&MinCodecVersionFlag,
|
||||
}
|
||||
// ConfigFileFlag load json type config file.
|
||||
ConfigFileFlag = cli.StringFlag{
|
||||
@@ -90,4 +91,10 @@ var (
|
||||
Usage: "Genesis file of the network",
|
||||
Value: "./conf/genesis.json",
|
||||
}
|
||||
// MinCodecVersionFlag defines the minimum codec version required for the chunk/batch/bundle proposers
|
||||
MinCodecVersionFlag = cli.UintFlag{
|
||||
Name: "min-codec-version",
|
||||
Usage: "Minimum required codec version for the chunk/batch/bundle proposers",
|
||||
Required: true,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -9,6 +9,10 @@ import (
|
||||
|
||||
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
|
||||
func CheckScrollProverVersion(proverVersion string) bool {
|
||||
if strings.HasPrefix(proverVersion, "sdk") {
|
||||
return CheckProverSDKVersion(proverVersion)
|
||||
}
|
||||
|
||||
// note the version is in fact in the format of "tag-commit-scroll_prover-halo2",
|
||||
// so split-by-'-' length should be 4
|
||||
remote := strings.Split(proverVersion, "-")
|
||||
@@ -23,8 +27,18 @@ func CheckScrollProverVersion(proverVersion string) bool {
|
||||
return remote[2] == local[2]
|
||||
}
|
||||
|
||||
// CheckProverSDKVersion check prover sdk version, it simply returns true for now,
|
||||
// and more checks will be added as we evolve.
|
||||
func CheckProverSDKVersion(proverVersion string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// CheckScrollRepoVersion checks if the proverVersion is at least the minimum required version.
|
||||
func CheckScrollRepoVersion(proverVersion, minVersion string) bool {
|
||||
if strings.HasPrefix(proverVersion, "sdk") {
|
||||
return CheckProverSDKWithMinVersion(proverVersion, minVersion)
|
||||
}
|
||||
|
||||
c, err := semver.NewConstraint(">= " + minVersion + "-0")
|
||||
if err != nil {
|
||||
log.Error("failed to initialize constraint", "minVersion", minVersion, "error", err)
|
||||
@@ -39,3 +53,9 @@ func CheckScrollRepoVersion(proverVersion, minVersion string) bool {
|
||||
|
||||
return c.Check(v)
|
||||
}
|
||||
|
||||
// CheckProverSDKWithMinVersion check prover sdk version is at least the minimum required version, it simply returns true for now,
|
||||
// and more checks will be added as we evolve.
|
||||
func CheckProverSDKWithMinVersion(proverVersion string, minVersion string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.4.65"
|
||||
var tag = "v4.4.89"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 5,
|
||||
"external_prover_threshold": 32,
|
||||
"bundle_collection_time_sec": 180,
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
|
||||
@@ -9,8 +9,8 @@ require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240819100936-c6af3bbe7068
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
|
||||
github.com/scroll-tech/da-codec v0.1.2
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241011150208-4742882675d8
|
||||
github.com/shopspring/decimal v1.3.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
@@ -49,17 +49,20 @@ require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.12.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
@@ -74,6 +77,7 @@ require (
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
|
||||
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/appleboy/gin-jwt/v2 v2.9.1 h1:l29et8iLW6omcHltsOP6LLk4s3v4g2FbFs0koxGWVZs=
|
||||
github.com/appleboy/gin-jwt/v2 v2.9.1/go.mod h1:jwcPZJ92uoC9nOUTOKWoN/f6JZOgMSKlFSHw5/FrRUk=
|
||||
github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4=
|
||||
@@ -11,6 +15,8 @@ github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJR
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
@@ -30,6 +36,16 @@ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
|
||||
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
|
||||
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
|
||||
github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=
|
||||
github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/pebble v1.1.0 h1:pcFh8CdCIt2kmEpK0OIatq67Ln9uGDYY3d5XnE0LJG4=
|
||||
github.com/cockroachdb/pebble v1.1.0/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E=
|
||||
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
|
||||
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
|
||||
@@ -43,11 +59,17 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
|
||||
github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
|
||||
@@ -77,6 +99,10 @@ github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGF
|
||||
github.com/goccy/go-json v0.10.0/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
@@ -91,8 +117,6 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
|
||||
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
@@ -107,6 +131,8 @@ github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlT
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
@@ -120,6 +146,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
@@ -129,8 +157,8 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
|
||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
|
||||
@@ -163,20 +191,18 @@ github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSz
|
||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240819100936-c6af3bbe7068 h1:oVGwhg4cCq35B04eG/S4OBXDwXiFH7+LezuH2ZTRBPs=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240819100936-c6af3bbe7068/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/da-codec v0.1.2 h1:QyJ+dQ4zWVVJwuqxNt4MiKyrymVc6rHe4YPtURkjiRc=
|
||||
github.com/scroll-tech/da-codec v0.1.2/go.mod h1:odz1ck3umvYccCG03osaQBISAYGinZktZYbpk94fYRE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241011150208-4742882675d8 h1:pEP6+ThQIgSRO5SILiO6iBpWnxAUjoRNBA9Nc6ooOS0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241011150208-4742882675d8/go.mod h1:MBHX2RcAV9KLWblo9DSa/xyPYd1Wpwnt64JSDOy85po=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
@@ -234,6 +260,8 @@ golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
|
||||
@@ -16,6 +16,8 @@ type ProverManager struct {
|
||||
// Number of attempts that a session can be retried if previous attempts failed.
|
||||
// Currently we only consider proving timeout as failure here.
|
||||
SessionAttempts uint8 `json:"session_attempts"`
|
||||
// Threshold for activating the external prover based on unassigned task count.
|
||||
ExternalProverThreshold int64 `json:"external_prover_threshold"`
|
||||
// Zk verifier config.
|
||||
Verifier *VerifierConfig `json:"verifier"`
|
||||
// BatchCollectionTimeSec batch Proof collection time (in seconds).
|
||||
|
||||
@@ -70,10 +70,11 @@ func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
|
||||
}
|
||||
|
||||
return jwt.MapClaims{
|
||||
types.HardForkName: v.HardForkName,
|
||||
types.PublicKey: v.PublicKey,
|
||||
types.ProverName: v.Message.ProverName,
|
||||
types.ProverVersion: v.Message.ProverVersion,
|
||||
types.HardForkName: v.HardForkName,
|
||||
types.PublicKey: v.PublicKey,
|
||||
types.ProverName: v.Message.ProverName,
|
||||
types.ProverVersion: v.Message.ProverVersion,
|
||||
types.ProverProviderTypeKey: v.Message.ProverProviderType,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,5 +97,9 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
c.Set(types.HardForkName, hardForkName)
|
||||
}
|
||||
|
||||
if providerType, ok := claims[types.ProverProviderTypeKey]; ok {
|
||||
c.Set(types.ProverProviderTypeKey, providerType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -106,6 +106,17 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if login.Message.ProverProviderType != types.ProverProviderTypeInternal && login.Message.ProverProviderType != types.ProverProviderTypeExternal {
|
||||
// for backward compatibility, set ProverProviderType as internal
|
||||
if login.Message.ProverProviderType == types.ProverProviderTypeUndefined {
|
||||
login.Message.ProverProviderType = types.ProverProviderTypeInternal
|
||||
} else {
|
||||
log.Error("invalid prover_provider_type", "value", login.Message.ProverProviderType, "prover name", login.Message.ProverName, "prover version", login.Message.ProverVersion)
|
||||
return errors.New("invalid prover provider type.")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,14 +10,11 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv3"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv4"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
@@ -25,6 +22,7 @@ import (
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
cutils "scroll-tech/coordinator/internal/utils"
|
||||
)
|
||||
|
||||
// BatchProverTask is prover task implement for batch proof
|
||||
@@ -66,6 +64,18 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
|
||||
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
|
||||
if taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) {
|
||||
unassignedBatchCount, getCountError := bp.batchOrm.GetUnassignedBatchCount(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getCountError != nil {
|
||||
log.Error("failed to get unassigned batch proving tasks count", "height", getTaskParameter.ProverHeight, "err", getCountError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
// Assign external prover if unassigned task number exceeds threshold
|
||||
if unassignedBatchCount < bp.cfg.ProverManager.ExternalProverThreshold {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
var batchTask *orm.Batch
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
@@ -91,6 +101,20 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Don't dispatch the same failing job to the same prover
|
||||
proverTasks, getTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBatch, tmpBatchTask.Hash, 2)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBatch.String(), "task ID", tmpBatchTask.Hash, "error", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
for i := 0; i < len(proverTasks); i++ {
|
||||
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
|
||||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
|
||||
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
@@ -175,7 +199,7 @@ func (bp *BatchProverTask) hardForkName(ctx *gin.Context, batchTask *orm.Batch)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
hardForkName := forks.GetHardforkName(bp.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
hardForkName := encoding.GetHardforkName(bp.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
@@ -235,7 +259,7 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *orm.Batch) {
|
||||
if err := bp.chunkOrm.DecreaseActiveAttemptsByHash(ctx.Copy(), batchTask.Hash); err != nil {
|
||||
if err := bp.batchOrm.DecreaseActiveAttemptsByHash(ctx.Copy(), batchTask.Hash); err != nil {
|
||||
log.Error("failed to recover batch active attempts", "hash", batchTask.Hash, "error", err)
|
||||
}
|
||||
}
|
||||
@@ -250,23 +274,17 @@ func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*
|
||||
return taskDetail, nil
|
||||
}
|
||||
|
||||
if encoding.CodecVersion(dbBatch.CodecVersion) == encoding.CodecV3 {
|
||||
batchHeader, decodeErr := codecv3.NewDABatchFromBytes(dbBatch.BatchHeader)
|
||||
if decodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to decode batch header (v3) for batch %d: %w", dbBatch.Index, decodeErr)
|
||||
}
|
||||
|
||||
taskDetail.BatchHeader = batchHeader
|
||||
taskDetail.BlobBytes = dbBatch.BlobBytes
|
||||
} else {
|
||||
batchHeader, decodeErr := codecv4.NewDABatchFromBytes(dbBatch.BatchHeader)
|
||||
if decodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to decode batch header (v4) for batch %d: %w", dbBatch.Index, decodeErr)
|
||||
}
|
||||
|
||||
taskDetail.BatchHeader = batchHeader
|
||||
taskDetail.BlobBytes = dbBatch.BlobBytes
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
|
||||
}
|
||||
|
||||
batchHeader, decodeErr := codec.NewDABatchFromBytes(dbBatch.BatchHeader)
|
||||
if decodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to decode batch header version %d: %w", dbBatch.CodecVersion, decodeErr)
|
||||
}
|
||||
taskDetail.BatchHeader = batchHeader
|
||||
taskDetail.BlobBytes = dbBatch.BlobBytes
|
||||
|
||||
return taskDetail, nil
|
||||
}
|
||||
|
||||
@@ -9,11 +9,11 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
cutils "scroll-tech/coordinator/internal/utils"
|
||||
)
|
||||
|
||||
// BundleProverTask is prover task implement for bundle proof
|
||||
@@ -63,6 +64,18 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
|
||||
|
||||
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
|
||||
if taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) {
|
||||
unassignedBundleCount, getCountError := bp.bundleOrm.GetUnassignedBundleCount(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getCountError != nil {
|
||||
log.Error("failed to get unassigned bundle proving tasks count", "height", getTaskParameter.ProverHeight, "err", getCountError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
// Assign external prover if unassigned task number exceeds threshold
|
||||
if unassignedBundleCount < bp.cfg.ProverManager.ExternalProverThreshold {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
var bundleTask *orm.Bundle
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
@@ -88,6 +101,20 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Don't dispatch the same failing job to the same prover
|
||||
proverTasks, getTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBundle, tmpBundleTask.Hash, 2)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBundle.String(), "task ID", tmpBundleTask.Hash, "error", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
for i := 0; i < len(proverTasks); i++ {
|
||||
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
|
||||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
|
||||
log.Debug("get empty bundle, the prover already failed this task", "height", getTaskParameter.ProverHeight)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := bp.bundleOrm.UpdateBundleAttempts(ctx.Copy(), tmpBundleTask.Hash, tmpBundleTask.ActiveAttempts, tmpBundleTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update bundle attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
@@ -178,7 +205,7 @@ func (bp *BundleProverTask) hardForkName(ctx *gin.Context, bundleTask *orm.Bundl
|
||||
return "", getBlockErr
|
||||
}
|
||||
|
||||
hardForkName := forks.GetHardforkName(bp.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
hardForkName := encoding.GetHardforkName(bp.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -9,11 +9,11 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
cutils "scroll-tech/coordinator/internal/utils"
|
||||
)
|
||||
|
||||
// ChunkProverTask the chunk prover task
|
||||
@@ -61,6 +62,18 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
|
||||
maxActiveAttempts := cp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := cp.cfg.ProverManager.SessionAttempts
|
||||
if taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) {
|
||||
unassignedChunkCount, getCountError := cp.chunkOrm.GetUnassignedChunkCount(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight)
|
||||
if getCountError != nil {
|
||||
log.Error("failed to get unassigned chunk proving tasks count", "height", getTaskParameter.ProverHeight, "err", getCountError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
// Assign external prover if unassigned task number exceeds threshold
|
||||
if unassignedChunkCount < cp.cfg.ProverManager.ExternalProverThreshold {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
var chunkTask *orm.Chunk
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
@@ -86,6 +99,20 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Don't dispatch the same failing job to the same prover
|
||||
proverTasks, getTaskError := cp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeChunk, tmpChunkTask.Hash, 2)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get prover tasks", "proof type", message.ProofTypeChunk.String(), "task ID", tmpChunkTask.Hash, "error", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
for i := 0; i < len(proverTasks); i++ {
|
||||
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
|
||||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
|
||||
log.Debug("get empty chunk, the prover already failed this task", "height", getTaskParameter.ProverHeight)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
@@ -164,7 +191,7 @@ func (cp *ChunkProverTask) hardForkName(ctx *gin.Context, chunkTask *orm.Chunk)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
hardForkName := forks.GetHardforkName(cp.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
hardForkName := encoding.GetHardforkName(cp.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -47,10 +47,11 @@ type BaseProverTask struct {
|
||||
}
|
||||
|
||||
type proverTaskContext struct {
|
||||
PublicKey string
|
||||
ProverName string
|
||||
ProverVersion string
|
||||
HardForkNames map[string]struct{}
|
||||
PublicKey string
|
||||
ProverName string
|
||||
ProverVersion string
|
||||
ProverProviderType uint8
|
||||
HardForkNames map[string]struct{}
|
||||
}
|
||||
|
||||
// checkParameter check the prover task parameter illegal
|
||||
@@ -76,6 +77,12 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, e
|
||||
}
|
||||
ptc.ProverVersion = proverVersion.(string)
|
||||
|
||||
ProverProviderType, ProverProviderTypeExist := ctx.Get(coordinatorType.ProverProviderTypeKey)
|
||||
if !ProverProviderTypeExist {
|
||||
return nil, errors.New("get prover provider type from context failed")
|
||||
}
|
||||
ptc.ProverProviderType = uint8(ProverProviderType.(float64))
|
||||
|
||||
hardForkNamesStr, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
|
||||
if !hardForkNameExist {
|
||||
return nil, errors.New("get hard fork name from context failed")
|
||||
|
||||
@@ -10,11 +10,11 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
@@ -462,6 +462,6 @@ func (m *ProofReceiverLogic) hardForkName(ctx *gin.Context, hash string, proofTy
|
||||
return "", getBlockErr
|
||||
}
|
||||
|
||||
hardForkName := forks.GetHardforkName(m.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
hardForkName := encoding.GetHardforkName(m.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv0"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -96,6 +95,22 @@ func (o *Batch) GetUnassignedBatch(ctx context.Context, maxActiveAttempts, maxTo
|
||||
return &batch, nil
|
||||
}
|
||||
|
||||
// GetUnassignedBatchCount retrieves unassigned batch count.
|
||||
func (o *Batch) GetUnassignedBatchCount(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (int64, error) {
|
||||
var count int64
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
|
||||
db = db.Where("batch.deleted_at IS NULL")
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return 0, fmt.Errorf("Batch.GetUnassignedBatchCount error: %w", err)
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetAssignedBatch retrieves assigned batch based on the specified limit.
|
||||
// The returned batches are sorted in ascending order by their index.
|
||||
func (o *Batch) GetAssignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
@@ -252,11 +267,16 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
return nil, errors.New("invalid args: batch contains 0 chunk")
|
||||
}
|
||||
|
||||
daBatch, err := codecv0.NewDABatch(batch)
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecV0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
daBatch, err := codec.NewDABatch(batch)
|
||||
if err != nil {
|
||||
log.Error("failed to create new DA batch",
|
||||
"index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -264,7 +284,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
parentBatch, err := o.GetLatestBatch(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get latest batch", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
@@ -275,17 +295,17 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
startChunkIndex = parentBatch.EndChunkIndex + 1
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
startDAChunk, err := codec.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
log.Error("failed to create start DA chunk", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
startDAChunkHash, err := startDAChunk.Hash()
|
||||
if err != nil {
|
||||
log.Error("failed to get start DA chunk hash", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
@@ -293,24 +313,24 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
for i := uint64(0); i < numChunks-1; i++ {
|
||||
totalL1MessagePoppedBeforeEndDAChunk += batch.Chunks[i].NumL1Messages(totalL1MessagePoppedBeforeEndDAChunk)
|
||||
}
|
||||
endDAChunk, err := codecv0.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
|
||||
endDAChunk, err := codec.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
|
||||
if err != nil {
|
||||
log.Error("failed to create end DA chunk", "index", batch.Index, "total l1 message popped before", totalL1MessagePoppedBeforeEndDAChunk,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
endDAChunkHash, err := endDAChunk.Hash()
|
||||
if err != nil {
|
||||
log.Error("failed to get end DA chunk hash", "index", batch.Index, "total l1 message popped before", totalL1MessagePoppedBeforeEndDAChunk,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: daBatch.Hash().Hex(),
|
||||
DataHash: daBatch.DataHash.Hex(),
|
||||
DataHash: daBatch.DataHash().Hex(),
|
||||
StartChunkHash: startDAChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endDAChunkHash.Hex(),
|
||||
|
||||
@@ -71,6 +71,22 @@ func (o *Bundle) GetUnassignedBundle(ctx context.Context, maxActiveAttempts, max
|
||||
return &bundle, nil
|
||||
}
|
||||
|
||||
// GetUnassignedBundleCount retrieves unassigned bundle count.
|
||||
func (o *Bundle) GetUnassignedBundleCount(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (int64, error) {
|
||||
var count int64
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("batch_proofs_status = ?", int(types.BatchProofsStatusReady))
|
||||
db = db.Where("bundle.deleted_at IS NULL")
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return 0, fmt.Errorf("Bundle.GetUnassignedBundleCount error: %w", err)
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetAssignedBundle retrieves assigned bundle based on the specified limit.
|
||||
// The returned bundle sorts in ascending order by their index.
|
||||
func (o *Bundle) GetAssignedBundle(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Bundle, error) {
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv0"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -89,6 +88,22 @@ func (o *Chunk) GetUnassignedChunk(ctx context.Context, maxActiveAttempts, maxTo
|
||||
return &chunk, nil
|
||||
}
|
||||
|
||||
// GetUnassignedChunkCount retrieves unassigned chunk count.
|
||||
func (o *Chunk) GetUnassignedChunkCount(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8, height uint64) (int64, error) {
|
||||
var count int64
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("end_block_number <= ?", height)
|
||||
db = db.Where("chunk.deleted_at IS NULL")
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return 0, fmt.Errorf("Chunk.GetUnassignedChunkCount error: %w", err)
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetAssignedChunk retrieves assigned chunk based on the specified limit.
|
||||
// The returned chunks are sorted in ascending order by their index.
|
||||
func (o *Chunk) GetAssignedChunk(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8, height uint64) (*Chunk, error) {
|
||||
@@ -258,7 +273,12 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
parentChunkStateRoot = parentChunk.StateRoot
|
||||
}
|
||||
|
||||
daChunk, err := codecv0.NewDAChunk(chunk, totalL1MessagePoppedBefore)
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecV0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
daChunk, err := codec.NewDAChunk(chunk, totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
log.Error("failed to initialize new DA chunk", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
@@ -270,13 +290,13 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(chunk)
|
||||
totalL1CommitCalldataSize, err := codec.EstimateChunkL1CommitCalldataSize(chunk)
|
||||
if err != nil {
|
||||
log.Error("failed to estimate chunk L1 commit calldata size", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(chunk)
|
||||
totalL1CommitGas, err := codec.EstimateChunkL1CommitGas(chunk)
|
||||
if err != nil {
|
||||
log.Error("failed to estimate chunk L1 commit gas", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
@@ -290,7 +310,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
StartBlockHash: chunk.Blocks[0].Header.Hash().Hex(),
|
||||
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
|
||||
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
|
||||
TotalL2TxGas: chunk.L2GasUsed(),
|
||||
TotalL2TxGas: chunk.TotalGasUsed(),
|
||||
TotalL2TxNum: chunk.NumL2Transactions(),
|
||||
TotalL1CommitCalldataSize: totalL1CommitCalldataSize,
|
||||
TotalL1CommitGas: totalL1CommitGas,
|
||||
|
||||
@@ -2,6 +2,7 @@ package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -65,7 +66,7 @@ func (p *ProverBlockList) IsPublicKeyBlocked(ctx context.Context, publicKey stri
|
||||
db = db.Model(&ProverBlockList{})
|
||||
db = db.Where("public_key = ?", publicKey)
|
||||
if err := db.First(&ProverBlockList{}).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return false, nil // Public key not found, hence it's not blocked.
|
||||
}
|
||||
return true, fmt.Errorf("ProverBlockList.IsPublicKeyBlocked error: %w, public key: %v", err, publicKey)
|
||||
|
||||
@@ -2,6 +2,7 @@ package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -60,7 +61,7 @@ func (o *ProverTask) IsProverAssigned(ctx context.Context, publicKey string) (bo
|
||||
var task ProverTask
|
||||
err := db.Where("prover_public_key = ? AND proving_status = ?", publicKey, types.ProverAssigned).First(&task).Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
@@ -116,6 +117,27 @@ func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, taskType messag
|
||||
return proverTasks, nil
|
||||
}
|
||||
|
||||
// GetFailedProverTasksByHash retrieves the failed ProverTask records associated with the specified hash.
|
||||
// The returned prover task objects are sorted in descending order by their ids.
|
||||
func (o *ProverTask) GetFailedProverTasksByHash(ctx context.Context, taskType message.ProofType, hash string, limit int) ([]*ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_type", int(taskType))
|
||||
db = db.Where("task_id", hash)
|
||||
db = db.Where("proving_status = ?", int(types.ProverProofInvalid))
|
||||
db = db.Order("id desc")
|
||||
|
||||
if limit != 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
|
||||
var proverTasks []*ProverTask
|
||||
if err := db.Find(&proverTasks).Error; err != nil {
|
||||
return nil, fmt.Errorf("ProverTask.GetFailedProverTasksByHash error: %w, hash: %v", err, hash)
|
||||
}
|
||||
return proverTasks, nil
|
||||
}
|
||||
|
||||
// GetProverTaskByUUIDAndPublicKey get prover task taskID by uuid and public key
|
||||
func (o *ProverTask) GetProverTaskByUUIDAndPublicKey(ctx context.Context, uuid, publicKey string) (*ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
|
||||
@@ -18,6 +18,8 @@ const (
|
||||
ProverName = "prover_name"
|
||||
// ProverVersion the prover version for context
|
||||
ProverVersion = "prover_version"
|
||||
// ProverProviderTypeKey the prover provider type for context
|
||||
ProverProviderTypeKey = "prover_provider_type"
|
||||
// HardForkName the hard fork name for context
|
||||
HardForkName = "hard_fork_name"
|
||||
)
|
||||
@@ -28,13 +30,22 @@ type LoginSchema struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
type MessageWithoutProverProviderType struct {
|
||||
Challenge string `json:"challenge"`
|
||||
ProverVersion string `json:"prover_version"`
|
||||
ProverName string `json:"prover_name"`
|
||||
ProverTypes []ProverType `json:"prover_types"`
|
||||
VKs []string `json:"vks"`
|
||||
}
|
||||
|
||||
// Message the login message struct
|
||||
type Message struct {
|
||||
Challenge string `form:"challenge" json:"challenge" binding:"required"`
|
||||
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
|
||||
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
|
||||
ProverTypes []ProverType `form:"prover_types" json:"prover_types"`
|
||||
VKs []string `form:"vks" json:"vks"`
|
||||
Challenge string `form:"challenge" json:"challenge" binding:"required"`
|
||||
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
|
||||
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
|
||||
ProverProviderType ProverProviderType `form:"prover_provider_type" json:"prover_provider_type,omitempty"`
|
||||
ProverTypes []ProverType `form:"prover_types" json:"prover_types"`
|
||||
VKs []string `form:"vks" json:"vks"`
|
||||
}
|
||||
|
||||
// LoginParameterWithHardForkName constructs new payload for login
|
||||
@@ -53,7 +64,7 @@ type LoginParameter struct {
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *LoginParameter) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Message.Hash()
|
||||
hash, err := Hash(a.Message)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -70,7 +81,14 @@ func (a *LoginParameter) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *LoginParameter) Verify() (bool, error) {
|
||||
hash, err := a.Message.Hash()
|
||||
var hash []byte
|
||||
var err error
|
||||
if a.Message.ProverProviderType == ProverProviderTypeUndefined {
|
||||
// for backward compatibility, calculate hash without ProverProviderType
|
||||
hash, err = Hash(a.Message.ToMessageWithoutProverProviderType())
|
||||
} else {
|
||||
hash, err = Hash(a.Message)
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -85,15 +103,14 @@ func (a *LoginParameter) Verify() (bool, error) {
|
||||
return isValid, nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *Message) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (m *Message) ToMessageWithoutProverProviderType() MessageWithoutProverProviderType {
|
||||
return MessageWithoutProverProviderType{
|
||||
Challenge: m.Challenge,
|
||||
ProverVersion: m.ProverVersion,
|
||||
ProverName: m.ProverName,
|
||||
ProverTypes: m.ProverTypes,
|
||||
VKs: m.VKs,
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
|
||||
// DecodeAndUnmarshalPubkey decodes a hex-encoded public key and unmarshal it into an ecdsa.PublicKey
|
||||
@@ -111,3 +128,14 @@ func (i *Message) DecodeAndUnmarshalPubkey(pubKeyHex string) (*ecdsa.PublicKey,
|
||||
}
|
||||
return pubKey, nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func Hash(i interface{}) ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
|
||||
@@ -18,11 +18,12 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
|
||||
t.Run("sign", func(t *testing.T) {
|
||||
authMsg = LoginParameter{
|
||||
Message: Message{
|
||||
ProverName: "test1",
|
||||
ProverVersion: "v0.0.1",
|
||||
Challenge: "abcdef",
|
||||
ProverTypes: []ProverType{ProverTypeBatch},
|
||||
VKs: []string{"vk1", "vk2"},
|
||||
ProverName: "test1",
|
||||
ProverVersion: "v0.0.1",
|
||||
Challenge: "abcdef",
|
||||
ProverProviderType: ProverProviderTypeInternal,
|
||||
ProverTypes: []ProverType{ProverTypeBatch},
|
||||
VKs: []string{"vk1", "vk2"},
|
||||
},
|
||||
PublicKey: publicKeyHex,
|
||||
}
|
||||
@@ -59,11 +60,12 @@ func TestGenerateSignature(t *testing.T) {
|
||||
|
||||
authMsg := LoginParameter{
|
||||
Message: Message{
|
||||
ProverName: "test",
|
||||
ProverVersion: "v4.4.45-37af5ef5-38a68e2-1c5093c",
|
||||
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjQ4Mzg0ODUsIm9yaWdfaWF0IjoxNzI0ODM0ODg1LCJyYW5kb20iOiJ6QmdNZGstNGc4UzNUNTFrVEFsYk1RTXg2TGJ4SUs4czY3ejM2SlNuSFlJPSJ9.x9PvihhNx2w4_OX5uCrv8QJCNYVQkIi-K2k8XFXYmik",
|
||||
ProverTypes: []ProverType{ProverTypeChunk},
|
||||
VKs: []string{"mock_vk"},
|
||||
ProverName: "test",
|
||||
ProverVersion: "v4.4.45-37af5ef5-38a68e2-1c5093c",
|
||||
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjQ4Mzg0ODUsIm9yaWdfaWF0IjoxNzI0ODM0ODg1LCJyYW5kb20iOiJ6QmdNZGstNGc4UzNUNTFrVEFsYk1RTXg2TGJ4SUs4czY3ejM2SlNuSFlJPSJ9.x9PvihhNx2w4_OX5uCrv8QJCNYVQkIi-K2k8XFXYmik",
|
||||
ProverProviderType: ProverProviderTypeInternal,
|
||||
ProverTypes: []ProverType{ProverTypeChunk},
|
||||
VKs: []string{"mock_vk"},
|
||||
},
|
||||
PublicKey: publicKeyHex,
|
||||
}
|
||||
|
||||
@@ -30,8 +30,8 @@ const (
|
||||
)
|
||||
|
||||
// MakeProverType make ProverType from ProofType
|
||||
func MakeProverType(proof_type message.ProofType) ProverType {
|
||||
switch proof_type {
|
||||
func MakeProverType(proofType message.ProofType) ProverType {
|
||||
switch proofType {
|
||||
case message.ProofTypeChunk:
|
||||
return ProverTypeChunk
|
||||
case message.ProofTypeBatch, message.ProofTypeBundle:
|
||||
@@ -40,3 +40,26 @@ func MakeProverType(proof_type message.ProofType) ProverType {
|
||||
return ProverTypeUndefined
|
||||
}
|
||||
}
|
||||
|
||||
// ProverProviderType represents the type of prover provider.
|
||||
type ProverProviderType uint8
|
||||
|
||||
func (r ProverProviderType) String() string {
|
||||
switch r {
|
||||
case ProverProviderTypeInternal:
|
||||
return "prover provider type internal"
|
||||
case ProverProviderTypeExternal:
|
||||
return "prover provider type external"
|
||||
default:
|
||||
return fmt.Sprintf("prover provider type: %d", r)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// ProverProviderTypeUndefined is an unknown prover provider type
|
||||
ProverProviderTypeUndefined ProverProviderType = iota
|
||||
// ProverProviderTypeInternal is an internal prover provider type
|
||||
ProverProviderTypeInternal
|
||||
// ProverProviderTypeExternal is an external prover provider type
|
||||
ProverProviderTypeExternal
|
||||
)
|
||||
|
||||
17
coordinator/internal/utils/prover_name.go
Normal file
17
coordinator/internal/utils/prover_name.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package utils
|
||||
|
||||
import "strings"
|
||||
|
||||
// IsExternalProverNameMatch checks if the local and remote external prover names belong to the same provider.
|
||||
// It returns true if they do, otherwise false.
|
||||
func IsExternalProverNameMatch(localName, remoteName string) bool {
|
||||
local := strings.Split(localName, "_")
|
||||
remote := strings.Split(remoteName, "_")
|
||||
|
||||
if len(local) < 3 || len(remote) < 3 {
|
||||
return false
|
||||
}
|
||||
|
||||
// note the name of cloud prover is in the format of "cloud_prover_{provider-name}_index"
|
||||
return local[0] == remote[0] && local[1] == remote[1] && local[2] == remote[2]
|
||||
}
|
||||
@@ -79,11 +79,12 @@ func (r *mockProver) challenge(t *testing.T) string {
|
||||
func (r *mockProver) login(t *testing.T, challengeString string, proverTypes []types.ProverType) (string, int, string) {
|
||||
authMsg := types.LoginParameter{
|
||||
Message: types.Message{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
ProverTypes: proverTypes,
|
||||
VKs: []string{"mock_vk"},
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
ProverProviderType: types.ProverProviderTypeInternal,
|
||||
ProverTypes: proverTypes,
|
||||
VKs: []string{"mock_vk"},
|
||||
},
|
||||
PublicKey: r.publicKey(),
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ require (
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241011150208-4742882675d8
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
)
|
||||
@@ -20,14 +20,12 @@ require (
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.4 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.22 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sethvargo/go-retry v0.2.4 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
|
||||
@@ -18,7 +18,6 @@ github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7b
|
||||
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible h1:xhVCHXq+P5LhT31+RuDuk0xXEbEnd50Fr37J1bGuyWg=
|
||||
@@ -54,6 +53,8 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaU
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||
@@ -70,8 +71,8 @@ github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST
|
||||
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
|
||||
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
@@ -105,7 +106,6 @@ github.com/paulmach/orb v0.10.0 h1:guVYVqzxHE/CQ1KpfGO077TR0ATHSNjp4s6XGLn3W9s=
|
||||
github.com/paulmach/orb v0.10.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU=
|
||||
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
@@ -116,13 +116,12 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241011150208-4742882675d8 h1:pEP6+ThQIgSRO5SILiO6iBpWnxAUjoRNBA9Nc6ooOS0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241011150208-4742882675d8/go.mod h1:MBHX2RcAV9KLWblo9DSa/xyPYd1Wpwnt64JSDOy85po=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
||||
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
|
||||
|
||||
273
go.work.sum
273
go.work.sum
@@ -9,6 +9,13 @@ cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTj
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y=
|
||||
cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic=
|
||||
cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4=
|
||||
@@ -63,6 +70,10 @@ cloud.google.com/go/beyondcorp v1.0.3/go.mod h1:HcBvnEd7eYr+HGDd5ZbuVmBYX019C6CE
|
||||
cloud.google.com/go/beyondcorp v1.0.4/go.mod h1:Gx8/Rk2MxrvWfn4WIhHIG1NV7IBfg14pTKv1+EArVcc=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/bigquery v1.57.1 h1:FiULdbbzUxWD0Y4ZGPSVCDLvqRSyCIO6zKV7E2nf5uA=
|
||||
cloud.google.com/go/bigquery v1.57.1/go.mod h1:iYzC0tGVWt1jqSzBHqCr3lrRn0u13E8e+AqowBsDgug=
|
||||
cloud.google.com/go/bigquery v1.59.1/go.mod h1:VP1UJYgevyTwsV7desjzNzDND5p6hZB+Z8gZJN1GQUc=
|
||||
@@ -130,6 +141,7 @@ cloud.google.com/go/dataqna v0.8.4 h1:NJnu1kAPamZDs/if3bJ3+Wb6tjADHKL83NUWsaIp2z
|
||||
cloud.google.com/go/dataqna v0.8.4/go.mod h1:mySRKjKg5Lz784P6sCov3p1QD+RZQONRMRjzGNcFd0c=
|
||||
cloud.google.com/go/dataqna v0.8.5/go.mod h1:vgihg1mz6n7pb5q2YJF7KlXve6tCglInd6XO0JGOlWM=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/datastore v1.15.0 h1:0P9WcsQeTWjuD1H14JIY7XQscIPQ4Laje8ti96IC5vg=
|
||||
cloud.google.com/go/datastore v1.15.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8=
|
||||
cloud.google.com/go/datastream v1.10.3 h1:Z2sKPIB7bT2kMW5Uhxy44ZgdJzxzE5uKjavoW+EuHEE=
|
||||
@@ -271,6 +283,8 @@ cloud.google.com/go/privatecatalog v0.9.4/go.mod h1:SOjm93f+5hp/U3PqMZAHTtBtluqL
|
||||
cloud.google.com/go/privatecatalog v0.9.5/go.mod h1:fVWeBOVe7uj2n3kWRGlUQqR/pOd450J9yZoOECcQqJk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||
cloud.google.com/go/pubsub v1.33.0 h1:6SPCPvWav64tj0sVX/+npCBKhUi/UjJehy9op/V3p2g=
|
||||
cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc=
|
||||
cloud.google.com/go/pubsub v1.36.1/go.mod h1:iYjCa9EzWOoBiTdd4ps7QoMtMln5NwaZQpK1hbRfBDE=
|
||||
@@ -326,6 +340,9 @@ cloud.google.com/go/speech v1.21.0/go.mod h1:wwolycgONvfz2EDU8rKuHRW3+wc9ILPsAWo
|
||||
cloud.google.com/go/speech v1.21.1/go.mod h1:E5GHZXYQlkqWQwY5xRSLHw2ci5NMQNG52FfMU1aZrIA=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM=
|
||||
cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E=
|
||||
cloud.google.com/go/storage v1.38.0/go.mod h1:tlUADB0mAb9BgYls9lq+8MGkfzOXuLrnHXlpHmvFJoY=
|
||||
@@ -383,12 +400,17 @@ github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot
|
||||
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 h1:Ut0ZGdOwJDw0npYEg+TLlPls3Pq6JiZaP2/aGKir7Zw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 h1:jp0dGvZ7ZK0mgqnTSClMxa5xuRL7NZgHameVYF6BurY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0/go.mod h1:c+Lifp3EDEamAkPVzMooRNOK6CZjNSdEnf1A7jsI9u4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 h1:QSdcrd/UFJv6Bp/CfoVf2SrENpFn9P6Yh8yb+xNhYMM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1/go.mod h1:eZ4g6GUvXiGulfIbbhh1Xr4XwUYaYaWMqzGD/284wCA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4=
|
||||
github.com/Azure/azure-storage-blob-go v0.7.0 h1:MuueVOYkufCxJw5YZzF842DY2MBsp+hLuh2apKY0mck=
|
||||
github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
@@ -405,13 +427,15 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.6.0 h1:XMEdVDFxgulDDl0lQmAZS6j8gRQ/0pJ+ZpXH2FHVtDc=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.6.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU=
|
||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
|
||||
github.com/CloudyKit/jet/v6 v6.1.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP/Lzo7Ro4=
|
||||
github.com/DATA-DOG/go-sqlmock v1.3.3 h1:CWUqKXe0s8A2z6qCgkP4Kru7wC11YoAnoupUKFDnH08=
|
||||
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
@@ -421,8 +445,10 @@ github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tN
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06/go.mod h1:7erjKLwalezA0k99cWs5L11HWOAPNjdUZ6RxH1BXbbM=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794/go.mod h1:7e+I0LQFUI9AXWxOfsQROs9xPhoJtbsyWcjJqDd4KPY=
|
||||
github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg=
|
||||
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
||||
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||
@@ -437,6 +463,7 @@ github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HR
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
|
||||
@@ -485,11 +512,13 @@ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.24 h1:i4RH8DLv/BHY0
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.24/go.mod h1:N8X45/o2cngvjCYi2ZnvI0P4mU4ZRJfEYC3maCSsPyw=
|
||||
github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1 h1:cKr6St+CtC3/dl/rEBJvlk7A/IN5D5F02GNkGzfbtVU=
|
||||
github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7NkwbjlijluLsrIbu/iyl35RO4=
|
||||
github.com/aws/aws-sdk-go-v2/service/route53 v1.30.2/go.mod h1:TQZBt/WaQy+zTHoW++rnl8JBrmZ0VO6EUbVua1+foCA=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.6 h1:zzTm99krKsFcF4N7pu2z17yCcAZpQYZ7jnJZPIgEMXE=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.6/go.mod h1:PudwVKUTApfm0nYaPutOXaKdPKTlZYClGBQpVIRdcbs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM=
|
||||
github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw=
|
||||
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0=
|
||||
@@ -502,10 +531,9 @@ github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 h1:y4B3+GPxKlrigF1ha
|
||||
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=
|
||||
@@ -519,12 +547,16 @@ github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMr
|
||||
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4=
|
||||
github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
@@ -534,6 +566,7 @@ github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a h1:8d1CEOF1xlde
|
||||
github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2ZOiguV6/NpiDgADjRLPNyZlApIWxKpkT+X8SdY=
|
||||
github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu2VvzMZlOFA=
|
||||
github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
|
||||
github.com/cloudflare/cloudflare-go v0.79.0/go.mod h1:gkHQf9xEubaQPEuerBuoinR9P8bf8a05Lq0X6WKy1Oc=
|
||||
github.com/cloudflare/redoctober v0.0.0-20211013234631-6a74ccc611f6 h1:QKzett0dn5FhjcIHNKSClEilabfhWCnsdijq3ftm9Ms=
|
||||
github.com/cloudflare/redoctober v0.0.0-20211013234631-6a74ccc611f6/go.mod h1:Ikt4Wfpln1YOrak+auA8BNxgiilj0Y2y7nO+aN2eMzk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
@@ -541,6 +574,8 @@ github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nC
|
||||
github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
|
||||
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||
github.com/compose-spec/compose-go v1.20.0 h1:h4ZKOst1EF/DwZp7dWkb+wbTVE4nEyT9Lc89to84Ol4=
|
||||
github.com/compose-spec/compose-go v1.20.0/go.mod h1:+MdqXV4RA7wdFsahh/Kb8U0pAJqkg7mr4PM9tFKU8RM=
|
||||
github.com/container-orchestrated-devices/container-device-interface v0.6.1 h1:mz77uJoP8im/4Zins+mPqt677ZMaflhoGaYrRAl5jvA=
|
||||
@@ -584,6 +619,8 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c h1:/ovYnF02fwL0kvspmy9AuyKg1JhdTRUgPw4nUxd9oZM=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE=
|
||||
@@ -592,11 +629,9 @@ github.com/dave/jennifer v1.2.0 h1:S15ZkFMRoJ36mGAQgWL1tnr0NQJh9rZ8qatseX/VbBc=
|
||||
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
|
||||
github.com/dchest/blake512 v1.0.0 h1:oDFEQFIqFSeuA34xLtXZ/rWxCXdSjirjzPhey5EUvmA=
|
||||
github.com/dchest/blake512 v1.0.0/go.mod h1:FV1x7xPPLWukZlpDpWQ88rF/SFwZ5qbskrzhLMB92JI=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/deepmap/oapi-codegen v1.6.0 h1:w/d1ntwh91XI0b/8ja7+u5SvA4IFfM0UNNLmiDR1gg0=
|
||||
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
|
||||
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
|
||||
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
|
||||
@@ -609,19 +644,26 @@ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi
|
||||
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
|
||||
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E=
|
||||
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
|
||||
github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/cli-docs-tool v0.6.0 h1:Z9x10SaZgFaB6jHgz3OWooynhSa40CsWkpe5hEnG/qA=
|
||||
github.com/docker/cli-docs-tool v0.6.0/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbIwTusYpzCXS/o=
|
||||
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=
|
||||
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 h1:iZOop7pqsg+56twTopWgwCGxdB5SI2yDO8Ti7eTRliQ=
|
||||
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
|
||||
github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
|
||||
github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf h1:Yt+4K30SdjOkRoRRm3vYNQgR+/ZIy0RmeUDZo7Y8zeQ=
|
||||
github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
|
||||
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4=
|
||||
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7 h1:tYwu/z8Y0NkkzGEh3z21mSWggMg4LwLRFucLS7TjARg=
|
||||
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
|
||||
github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM=
|
||||
github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae h1:UTOyRlLeWJrZx+ynml6q6qzZ1uDkJe/0Z5CMZRbEIJg=
|
||||
github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0=
|
||||
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM=
|
||||
github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g=
|
||||
@@ -639,9 +681,12 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c h1:CndMRAH4JIwxbW8KYq6Q+cGWcGHz0FjGR3QqcInWcW0=
|
||||
github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY=
|
||||
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/flosch/pongo2/v4 v4.0.2/go.mod h1:B5ObFANs/36VwxxlgKpdchIJHMvHB562PW+BWPhwZD8=
|
||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90 h1:WXb3TSNmHp2vHoCroCIB1foO/yQ36swABL8aOVeDpgg=
|
||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
|
||||
@@ -650,11 +695,13 @@ github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD
|
||||
github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo=
|
||||
github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 h1:IZqZOB2fydHte3kUgxrzK5E1fW7RQGeDwE8F/ZZnUYc=
|
||||
github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8=
|
||||
github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b/go.mod h1:CDncRYVRSDqwakm282WEkjfaAj1hxU/v5RXxk5nXOiI=
|
||||
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||
github.com/getkin/kin-openapi v0.61.0 h1:6awGqF5nG5zkVpMsAih1QH4VgzS8phTxECUWIFo7zko=
|
||||
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||
github.com/getsentry/sentry-go v0.11.0 h1:qro8uttJGvNAMr5CLcFI9CHR0aDzXl0Vs3Pmw/oTPg8=
|
||||
github.com/getsentry/sentry-go v0.11.0/go.mod h1:KBQIxiZAetw62Cj8Ri964vAEWVdgfaUCn30Q3bCvANo=
|
||||
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd h1:r04MMPyLHj/QwZuMJ5+7tJcBr1AQjpiAK/rZWRrQT7o=
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||
@@ -666,8 +713,13 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72 h1:b+9H1GAsx5RsjvDFLoS5zkNBzIQMuVKUYQDmxU3N5XE=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
|
||||
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
|
||||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU=
|
||||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
@@ -680,6 +732,9 @@ github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x
|
||||
github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84=
|
||||
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
|
||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A=
|
||||
@@ -688,14 +743,20 @@ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF0
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw=
|
||||
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
|
||||
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
|
||||
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
@@ -712,21 +773,31 @@ github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6
|
||||
github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-containerregistry v0.14.0 h1:z58vMqHxuwvAsVwvKEkmVBz2TlgBgH5k6koEXBtlYkw=
|
||||
github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk=
|
||||
github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
|
||||
github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U=
|
||||
github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg=
|
||||
github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
|
||||
@@ -734,6 +805,7 @@ github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8
|
||||
github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
@@ -744,6 +816,7 @@ github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7
|
||||
github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc=
|
||||
github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4=
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20210719221736-1c9a4c676720/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gotestyourself/gotestyourself v1.4.0 h1:CDSlSIuRL/Fsc72Ln5lMybtrCvSRDddsHsDRG/nP7Rg=
|
||||
github.com/gotestyourself/gotestyourself v1.4.0/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
|
||||
@@ -756,6 +829,7 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:Fecb
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/guptarohit/asciigraph v0.5.5/go.mod h1:dYl5wwK4gNsnFf9Zp+l06rFiDZ5YtXM6x7SRWZ3KGag=
|
||||
github.com/hanwen/go-fuse/v2 v2.2.0 h1:jo5QZYmBLNcl9ovypWaQ5yXMSSV+Ch68xoC3rtZvvBM=
|
||||
github.com/hanwen/go-fuse/v2 v2.2.0/go.mod h1:B1nGE/6RBFyBRC1RRnf23UpwCdyJ31eukw34oAKukAc=
|
||||
github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE=
|
||||
@@ -763,6 +837,8 @@ github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeC
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cty-funcs v0.0.0-20230405223818-a090f58aa992 h1:fYOrSfO5C9PmFGtmRWSYGqq52SOoE2dXMtAn2Xzh1LQ=
|
||||
github.com/hashicorp/go-cty-funcs v0.0.0-20230405223818-a090f58aa992/go.mod h1:Abjk0jbRkDaNCzsRhOv2iDCofYpX1eVsjozoiK63qLA=
|
||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||
@@ -774,6 +850,7 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
|
||||
@@ -783,6 +860,7 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
|
||||
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI=
|
||||
github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE=
|
||||
@@ -794,9 +872,11 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150 h1:vlNjIqmUZ9CMAWsbURYl3a6wZbw7q5RHVvlXTNS/Bs8=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
|
||||
github.com/hydrogen18/memlistener v1.0.0/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
|
||||
github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/influxdata/flux v0.65.1 h1:77BcVUCzvN5HMm8+j9PRBQ4iZcu98Dl4Y9rf+J5vhnc=
|
||||
github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
|
||||
@@ -804,9 +884,12 @@ github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vA
|
||||
github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.4.0 h1:HGBfZYStlx3Kqvsv1h2pJixbCl/jhnFtxpKFAv9Tu5k=
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c h1:qSHzRbhzK8RdXOsAdfDgO49TtqC1oZ+acxPrkfTxcCs=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385 h1:ED4e5Cc3z5vSN2Tz2GkOHN7vs4Sxe2yds6CXvDnvZFE=
|
||||
github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
|
||||
github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
|
||||
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU=
|
||||
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
|
||||
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 h1:vilfsDSy7TDxedi9gyBkMvAirat/oRcL0lFdJBf6tdM=
|
||||
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
|
||||
@@ -820,9 +903,12 @@ github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368 h1:+TUUmaF
|
||||
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
|
||||
github.com/intel/goresctrl v0.3.0 h1:K2D3GOzihV7xSBedGxONSlaw/un1LZgWsc9IfqipN4c=
|
||||
github.com/intel/goresctrl v0.3.0/go.mod h1:fdz3mD85cmP9sHD8JUlrNWAxvwM86CrbmVXltEKd7zk=
|
||||
github.com/iris-contrib/jade v1.1.4/go.mod h1:EDqR+ur9piDl6DUgs6qRrlfzmlx/D5UybogqrXvJTBE=
|
||||
github.com/iris-contrib/schema v0.0.6/go.mod h1:iYszG0IOsuIsfzjymw1kMzTL8YQcCWlm65f3wX8J5iA=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U=
|
||||
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
|
||||
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89 h1:12K8AlpT0/6QUXSfV0yi4Q0jkbq8NDtIKFtF61AoqV0=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
@@ -834,6 +920,8 @@ github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2E
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
@@ -850,6 +938,13 @@ github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559 h1:0VWDXPNE0brOek1Q8b
|
||||
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=
|
||||
github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/kataras/blocks v0.0.7/go.mod h1:UJIU97CluDo0f+zEjbnbkeMRlvYORtmc1304EeyXf4I=
|
||||
github.com/kataras/golog v0.1.7/go.mod h1:jOSQ+C5fUqsNSwurB/oAHq1IFSb0KI3l6GMa7xB6dZA=
|
||||
github.com/kataras/iris/v12 v12.2.0-beta5/go.mod h1:q26aoWJ0Knx/00iPKg5iizDK7oQQSPjbD8np0XDh6dc=
|
||||
github.com/kataras/pio v0.0.11/go.mod h1:38hH6SWH6m4DKSYmRhlrCJ5WItwWgCVrTNU62XZyUvI=
|
||||
github.com/kataras/sitemap v0.0.6/go.mod h1:dW4dOCNs896OR1HmG+dMLdT7JjDk7mYBzoIRwuj5jA4=
|
||||
github.com/kataras/tunnel v0.0.4/go.mod h1:9FkU4LaeifdMWqZu7o20ojmW4B7hdhv2CMLwfnHGpYw=
|
||||
github.com/kilic/bls12-381 v0.1.0/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=
|
||||
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
|
||||
@@ -868,17 +963,18 @@ github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada h1:3L+neHp83cTje
|
||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/knz/go-libedit v1.10.1 h1:0pHpWtx9vcvC0xGZqEQlQdfSQs7WRlAjuPvk3fOZDCo=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
|
||||
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
|
||||
github.com/kylelemons/go-gypsy v1.0.0 h1:7/wQ7A3UL1bnqRMnZ6T8cwCOArfZCxFmb1iTxaOOo1s=
|
||||
github.com/kylelemons/go-gypsy v1.0.0/go.mod h1:chkXM0zjdpXOiqkCW1XcCHDfjfk14PH2KKkQWxfJUcU=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/labstack/echo/v4 v4.2.1 h1:LF5Iq7t/jrtUuSutNuiEWtB5eiHfZ5gSe2pcu5exjQw=
|
||||
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
|
||||
github.com/labstack/echo/v4 v4.9.0/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks=
|
||||
github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
|
||||
github.com/lestrrat-go/backoff/v2 v2.0.8 h1:oNb5E5isby2kiro9AgdHLv5N5tint1AnDVVf2E2un5A=
|
||||
github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
|
||||
github.com/lestrrat-go/blackmagic v1.0.0 h1:XzdxDbuQTz0RZZEmdU7cnQxUtFUzgCSPq8RCz4BxIi4=
|
||||
@@ -895,6 +991,7 @@ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3 h1:jUp75lepDg0phMUJBCmvaeFDldD2N3S1lBuPwUTszio=
|
||||
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
|
||||
github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk=
|
||||
github.com/mailgun/raymond/v2 v2.0.46/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18=
|
||||
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd h1:HvFwW+cm9bCbZ/+vuGNq7CRWXql8c0y8nGeYpqmpvmk=
|
||||
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
@@ -903,6 +1000,7 @@ github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope
|
||||
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw=
|
||||
@@ -910,6 +1008,7 @@ github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HN
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
@@ -919,6 +1018,7 @@ github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104 h1:d8RFOZ2IiFtFWBcKEH
|
||||
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
|
||||
github.com/microcosm-cc/bluemonday v1.0.21/go.mod h1:ytNkv4RrDrLJ2pqlsSI46O6IVXmZOBBD4SaJyDwwTkM=
|
||||
github.com/microsoft/go-mssqldb v1.6.0 h1:mM3gYdVwEPFrlg/Dvr2DNVEgYFG7L42l+dGc67NNNpc=
|
||||
github.com/microsoft/go-mssqldb v1.6.0/go.mod h1:00mDtPbeQCRGC1HwOOR5K/gr30P1NcEG0vx6Kbv2aJU=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
@@ -988,6 +1088,7 @@ github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI=
|
||||
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
|
||||
github.com/pkg/profile v1.5.0 h1:042Buzk+NhDI+DeSAA62RwJL8VAuZUMQZUjCsRz1Mug=
|
||||
github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
|
||||
@@ -998,11 +1099,19 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
|
||||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||
github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc=
|
||||
github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
|
||||
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
|
||||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7/go.mod h1:IToEjHuttnUzwZI5KBSM/LOOW3qLbbrHOEfp3SbECGY=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0=
|
||||
@@ -1010,13 +1119,30 @@ github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1
|
||||
github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
|
||||
github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww=
|
||||
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sagikazarmark/crypt v0.19.0/go.mod h1:c6vimRziqqERhtSe0MhIvzE1w54FrCHtrXb5NH/ja78=
|
||||
github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240605080813-32bfc9fccde7/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20241006192342-95d2bc79b7e8/go.mod h1:6jxEQvNc7GQKMSUi25PthAUY3WnZL8CN0yWivBgAXi0=
|
||||
github.com/scroll-tech/da-codec v0.0.1/go.mod h1:odz1ck3umvYccCG03osaQBISAYGinZktZYbpk94fYRE=
|
||||
github.com/scroll-tech/da-codec v0.1.0/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/da-codec v0.1.1-0.20241004100020-91ac897e28fc/go.mod h1:6jxEQvNc7GQKMSUi25PthAUY3WnZL8CN0yWivBgAXi0=
|
||||
github.com/scroll-tech/da-codec v0.1.1-0.20241005161415-e6c8965dc0d7 h1:4QURqyAyOsUmLDhH/2BUQfWJk+RvsWHcrSyx/ikF/i4=
|
||||
github.com/scroll-tech/da-codec v0.1.1-0.20241005161415-e6c8965dc0d7/go.mod h1:6jxEQvNc7GQKMSUi25PthAUY3WnZL8CN0yWivBgAXi0=
|
||||
github.com/scroll-tech/da-codec v0.1.1-0.20241005165808-ba853e3447d6 h1:cObIVAkNWfLxBxAOot+ERoszNtCTZaf8vSeQ7N5VyvQ=
|
||||
github.com/scroll-tech/da-codec v0.1.1-0.20241005165808-ba853e3447d6/go.mod h1:6jxEQvNc7GQKMSUi25PthAUY3WnZL8CN0yWivBgAXi0=
|
||||
github.com/scroll-tech/da-codec v0.1.1-0.20241005172014-aca0bef21638 h1:2KIfClLB3iKr593Dia79/vkqMsqc29H0zcxp1/jel4k=
|
||||
github.com/scroll-tech/da-codec v0.1.1-0.20241005172014-aca0bef21638/go.mod h1:6jxEQvNc7GQKMSUi25PthAUY3WnZL8CN0yWivBgAXi0=
|
||||
github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b h1:5H6V6ybacXFJ2ti+eFwtf+12Otufod6goxK6/u7Nu1k=
|
||||
github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b/go.mod h1:48uxaqVgpD8ulH8p+nrBtfeLHZ9tX82bVVdPNkW3rPE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod h1:swB5NSp8pKNDuYsTxfR08bHS6L56i119PBx8fxvV8Cs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250206083728-ea43834c198f/go.mod h1:Ik3OBLl7cJxPC+CFyCBYNXBPek4wpdzkWehn/y5qLM8=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY=
|
||||
@@ -1031,6 +1157,7 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5I
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
@@ -1048,10 +1175,13 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/tdewolff/minify/v2 v2.12.4/go.mod h1:h+SRvSIX3kwgwTFOpSckvSxgax3uy8kZTSF1Ojrr3bk=
|
||||
github.com/tdewolff/parse/v2 v2.6.4/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs=
|
||||
github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=
|
||||
@@ -1066,8 +1196,10 @@ github.com/ugorji/go v1.2.7 h1:qYhyWUUd6WbiM+C6JZAUkIJt/1WrjzNHY9+KCIjVqTo=
|
||||
github.com/urfave/cli v1.22.12 h1:igJgVw1JdKH+trcLWLeLwZjU9fEfPesQ+9/e4MQ44S8=
|
||||
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
|
||||
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
||||
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.40.0/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxndDB4i4C0I=
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
|
||||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
@@ -1079,6 +1211,8 @@ github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0m
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA=
|
||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||
github.com/willf/bitset v1.1.3 h1:ekJIKh6+YbUIVt9DfNbkR5d6aFcFTLDRyJNAACURBg8=
|
||||
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
@@ -1093,6 +1227,9 @@ github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6Ut
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=
|
||||
github.com/yashtewari/glob-intersection v0.1.0 h1:6gJvMYQlTDOL3dMsPF6J0+26vwX9MB8/1q3uAdhmTrg=
|
||||
github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
|
||||
github.com/yosssi/ace v0.0.5/go.mod h1:ALfIzm2vT7t5ZE7uoIZqF3TQ7SAOyupFZnkrF5id+K0=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
|
||||
github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA=
|
||||
@@ -1129,6 +1266,8 @@ go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvS
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
|
||||
@@ -1147,8 +1286,7 @@ go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxt
|
||||
go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo=
|
||||
go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
|
||||
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
@@ -1161,9 +1299,11 @@ golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
@@ -1178,6 +1318,9 @@ golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm0
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=
|
||||
@@ -1189,12 +1332,15 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
@@ -1205,26 +1351,46 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
|
||||
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
|
||||
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
|
||||
golang.org/x/perf v0.0.0-20230113213139-801c7ef9e5c5/go.mod h1:UBKtEnL8aqnd+0JHqZ+2qoMDwtuy6cYhhKNoHLBiTQc=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -1236,21 +1402,43 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
@@ -1261,7 +1449,9 @@ golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2 h1:IRJeR9r1pYWsHKTRe/I
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
@@ -1271,6 +1461,7 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -1290,12 +1481,30 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtn
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
@@ -1316,6 +1525,15 @@ google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEn
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY=
|
||||
google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI=
|
||||
google.golang.org/api v0.162.0/go.mod h1:6SulDkfoBIg4NFmCuZ39XeeAgSHCPecfSUuDyYlAHs0=
|
||||
@@ -1326,6 +1544,7 @@ google.golang.org/api v0.171.0/go.mod h1:Hnq5AHm4OTMt2BUVjael2CWZFD6vksJdWCWiUAm
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
@@ -1339,6 +1558,22 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4=
|
||||
google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg=
|
||||
google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY=
|
||||
@@ -1369,6 +1604,10 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c/go.
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs=
|
||||
@@ -1385,8 +1624,6 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQ
|
||||
gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
|
||||
gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=
|
||||
@@ -1400,6 +1637,8 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/component-base v0.26.7 h1:uqsOyZh0Zqoaup8tmHa491D/CvgFdGUs+X2H/inNUKM=
|
||||
k8s.io/component-base v0.26.7/go.mod h1:CZe1HTmX/DQdeBrb9XYOXzs96jXth8ZbFvhLMsoJLUg=
|
||||
k8s.io/cri-api v0.27.1 h1:KWO+U8MfI9drXB/P4oU9VchaWYOlwDglJZVHWMpTT3Q=
|
||||
@@ -1416,6 +1655,8 @@ nullprogram.com/x/optparse v1.0.0 h1:xGFgVi5ZaWOnYdac2foDT3vg0ZZC9ErXFV57mr4OHrI
|
||||
rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.37 h1:fAPTNEpzQMOLMGwOHNbUkR2xXTQwMJOZYNx+/mLlOh0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.37/go.mod h1:vfnxT4FXNT8eGvO+xi/DsyC/qHmdujqwrUa1WSspCsk=
|
||||
tags.cncf.io/container-device-interface/specs-go v0.6.0 h1:V+tJJN6dqu8Vym6p+Ru+K5mJ49WL6Aoc5SJFSY0RLsQ=
|
||||
|
||||
373
prover/Cargo.lock
generated
373
prover/Cargo.lock
generated
@@ -441,6 +441,55 @@ version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
|
||||
|
||||
[[package]]
|
||||
name = "axum"
|
||||
version = "0.6.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"axum-core",
|
||||
"bitflags 1.3.2",
|
||||
"bytes",
|
||||
"futures-util",
|
||||
"http 0.2.12",
|
||||
"http-body 0.4.6",
|
||||
"hyper 0.14.28",
|
||||
"itoa",
|
||||
"matchit",
|
||||
"memchr",
|
||||
"mime",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"rustversion",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_path_to_error",
|
||||
"serde_urlencoded",
|
||||
"sync_wrapper",
|
||||
"tokio",
|
||||
"tower",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "axum-core"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"futures-util",
|
||||
"http 0.2.12",
|
||||
"http-body 0.4.6",
|
||||
"mime",
|
||||
"rustversion",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.71"
|
||||
@@ -501,6 +550,26 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bindgen"
|
||||
version = "0.69.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088"
|
||||
dependencies = [
|
||||
"bitflags 2.5.0",
|
||||
"cexpr",
|
||||
"clang-sys",
|
||||
"itertools 0.10.5",
|
||||
"lazy_static",
|
||||
"lazycell",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"regex",
|
||||
"rustc-hash",
|
||||
"shlex",
|
||||
"syn 2.0.66",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bit-set"
|
||||
version = "0.5.3"
|
||||
@@ -703,6 +772,17 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bzip2-sys"
|
||||
version = "0.1.11+1.0.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "c-kzg"
|
||||
version = "1.0.2"
|
||||
@@ -728,6 +808,15 @@ dependencies = [
|
||||
"once_cell",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cexpr"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766"
|
||||
dependencies = [
|
||||
"nom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "0.1.10"
|
||||
@@ -764,6 +853,17 @@ dependencies = [
|
||||
"inout",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clang-sys"
|
||||
version = "1.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4"
|
||||
dependencies = [
|
||||
"glob",
|
||||
"libc",
|
||||
"libloading",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "4.5.4"
|
||||
@@ -1136,6 +1236,12 @@ dependencies = [
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dotenv"
|
||||
version = "0.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f"
|
||||
|
||||
[[package]]
|
||||
name = "dotenvy"
|
||||
version = "0.15.7"
|
||||
@@ -2551,18 +2657,60 @@ dependencies = [
|
||||
"spin 0.5.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazycell"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.155"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c"
|
||||
|
||||
[[package]]
|
||||
name = "libloading"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34"
|
||||
dependencies = [
|
||||
"cfg-if 1.0.0",
|
||||
"windows-targets 0.48.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libm"
|
||||
version = "0.2.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058"
|
||||
|
||||
[[package]]
|
||||
name = "librocksdb-sys"
|
||||
version = "0.17.1+9.9.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2b7869a512ae9982f4d46ba482c2a304f1efd80c6412a3d4bf57bb79a619679f"
|
||||
dependencies = [
|
||||
"bindgen",
|
||||
"bzip2-sys",
|
||||
"cc",
|
||||
"libc",
|
||||
"libz-sys",
|
||||
"lz4-sys",
|
||||
"zstd-sys 2.0.13+zstd.1.5.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libz-sys"
|
||||
version = "1.1.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"pkg-config",
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
version = "0.4.14"
|
||||
@@ -2612,6 +2760,31 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lz4-sys"
|
||||
version = "1.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "109de74d5d2353660401699a4174a4ff23fcc649caf553df71933c7fb45ad868"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "matchers"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558"
|
||||
dependencies = [
|
||||
"regex-automata 0.1.10",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "matchit"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94"
|
||||
|
||||
[[package]]
|
||||
name = "maybe-rayon"
|
||||
version = "0.1.1"
|
||||
@@ -2634,6 +2807,12 @@ version = "0.3.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
|
||||
|
||||
[[package]]
|
||||
name = "minimal-lexical"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.7.3"
|
||||
@@ -2746,6 +2925,26 @@ dependencies = [
|
||||
"tempfile",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nom"
|
||||
version = "7.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
"minimal-lexical",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nu-ansi-term"
|
||||
version = "0.46.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84"
|
||||
dependencies = [
|
||||
"overload",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num"
|
||||
version = "0.4.3"
|
||||
@@ -2974,6 +3173,12 @@ dependencies = [
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "overload"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
|
||||
|
||||
[[package]]
|
||||
name = "pairing"
|
||||
version = "0.23.0"
|
||||
@@ -3267,7 +3472,7 @@ dependencies = [
|
||||
"rand",
|
||||
"rand_chacha",
|
||||
"rand_xorshift",
|
||||
"regex-syntax",
|
||||
"regex-syntax 0.8.3",
|
||||
"rusty-fork",
|
||||
"tempfile",
|
||||
"unarray",
|
||||
@@ -3278,6 +3483,7 @@ name = "prover"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"base64 0.13.1",
|
||||
"clap",
|
||||
"ctor 0.2.8",
|
||||
@@ -3298,6 +3504,7 @@ dependencies = [
|
||||
"reqwest-middleware",
|
||||
"reqwest-retry",
|
||||
"rlp",
|
||||
"scroll-proving-sdk",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sled",
|
||||
@@ -3489,8 +3696,17 @@ checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-automata",
|
||||
"regex-syntax",
|
||||
"regex-automata 0.4.6",
|
||||
"regex-syntax 0.8.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
|
||||
dependencies = [
|
||||
"regex-syntax 0.6.29",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3501,9 +3717,15 @@ checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-syntax",
|
||||
"regex-syntax 0.8.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.6.29"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.8.3"
|
||||
@@ -3787,6 +4009,16 @@ dependencies = [
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rocksdb"
|
||||
version = "0.23.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "26ec73b20525cb235bad420f911473b69f9fe27cc856c5461bccd7e4af037f43"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"librocksdb-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ruint"
|
||||
version = "1.12.1"
|
||||
@@ -3995,6 +4227,36 @@ version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "scroll-proving-sdk"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/scroll-proving-sdk.git?rev=160db6c#160db6ceec45235f13b0f2581802a614f7e90a4b"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"axum",
|
||||
"clap",
|
||||
"dotenv",
|
||||
"ethers-core 2.0.7 (git+https://github.com/scroll-tech/ethers-rs.git?branch=v2.0.7)",
|
||||
"ethers-providers 2.0.7 (git+https://github.com/scroll-tech/ethers-rs.git?branch=v2.0.7)",
|
||||
"hex",
|
||||
"http 1.1.0",
|
||||
"log",
|
||||
"prover 0.13.0",
|
||||
"rand",
|
||||
"reqwest 0.12.4",
|
||||
"reqwest-middleware",
|
||||
"reqwest-retry",
|
||||
"rlp",
|
||||
"rocksdb",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tiny-keccak",
|
||||
"tokio",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "scrypt"
|
||||
version = "0.10.0"
|
||||
@@ -4167,6 +4429,16 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_path_to_error"
|
||||
version = "0.1.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_stacker"
|
||||
version = "0.1.11"
|
||||
@@ -4265,6 +4537,30 @@ dependencies = [
|
||||
"cfg-if 1.0.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sharded-slab"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "shlex"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook-registry"
|
||||
version = "1.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "signature"
|
||||
version = "2.2.0"
|
||||
@@ -4580,6 +4876,16 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thread_local"
|
||||
version = "1.1.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c"
|
||||
dependencies = [
|
||||
"cfg-if 1.0.0",
|
||||
"once_cell",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "threadpool"
|
||||
version = "1.8.1"
|
||||
@@ -4623,11 +4929,26 @@ dependencies = [
|
||||
"bytes",
|
||||
"libc",
|
||||
"mio",
|
||||
"num_cpus",
|
||||
"parking_lot 0.12.3",
|
||||
"pin-project-lite",
|
||||
"signal-hook-registry",
|
||||
"socket2",
|
||||
"tokio-macros",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-macros"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.66",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-native-tls"
|
||||
version = "0.3.1"
|
||||
@@ -4717,6 +5038,7 @@ dependencies = [
|
||||
"tokio",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4737,6 +5059,7 @@ version = "0.1.40"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef"
|
||||
dependencies = [
|
||||
"log",
|
||||
"pin-project-lite",
|
||||
"tracing-attributes",
|
||||
"tracing-core",
|
||||
@@ -4760,6 +5083,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"valuable",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4772,6 +5096,35 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-log"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3"
|
||||
dependencies = [
|
||||
"log",
|
||||
"once_cell",
|
||||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-subscriber"
|
||||
version = "0.3.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b"
|
||||
dependencies = [
|
||||
"matchers",
|
||||
"nu-ansi-term",
|
||||
"once_cell",
|
||||
"regex",
|
||||
"sharded-slab",
|
||||
"smallvec",
|
||||
"thread_local",
|
||||
"tracing",
|
||||
"tracing-core",
|
||||
"tracing-log",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "try-lock"
|
||||
version = "0.2.5"
|
||||
@@ -5478,7 +5831,7 @@ name = "zstd-safe"
|
||||
version = "7.0.0"
|
||||
source = "git+https://github.com/scroll-tech/zstd-rs?branch=hack/mul-block#5c0892b6567dab31394d701477183ce9d6a32aca"
|
||||
dependencies = [
|
||||
"zstd-sys",
|
||||
"zstd-sys 2.0.9+zstd.1.5.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5489,3 +5842,13 @@ dependencies = [
|
||||
"cc",
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zstd-sys"
|
||||
version = "2.0.13+zstd.1.5.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
@@ -31,6 +31,7 @@ halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
|
||||
prover_darwin = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.12.2", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
prover_darwin_v2 = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.13.1", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "160db6c"}
|
||||
base64 = "0.13.1"
|
||||
reqwest = { version = "0.12.4", features = ["gzip"] }
|
||||
reqwest-middleware = "0.3"
|
||||
@@ -42,6 +43,7 @@ rand = "0.8.5"
|
||||
eth-keystore = "0.5.0"
|
||||
rlp = "0.5.2"
|
||||
tokio = "1.37.0"
|
||||
async-trait = "0.1"
|
||||
sled = "0.34.7"
|
||||
http = "1.1.0"
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
|
||||
@@ -1,26 +1,30 @@
|
||||
{
|
||||
"prover_name": "prover-1",
|
||||
"keystore_path": "keystore.json",
|
||||
"keystore_password": "prover-pwd",
|
||||
"db_path": "unique-db-path-for-prover-1",
|
||||
"prover_type": 2,
|
||||
"low_version_circuit": {
|
||||
"hard_fork_name": "bernoulli",
|
||||
"params_path": "params",
|
||||
"assets_path": "assets"
|
||||
},
|
||||
"high_version_circuit": {
|
||||
"hard_fork_name": "curie",
|
||||
"params_path": "params",
|
||||
"assets_path": "assets"
|
||||
},
|
||||
"sdk_config": {
|
||||
"prover_name_prefix": "prover-1",
|
||||
"keys_dir": "keys",
|
||||
"coordinator": {
|
||||
"base_url": "http://localhost:8555",
|
||||
"retry_count": 10,
|
||||
"retry_wait_time_sec": 10,
|
||||
"connection_timeout_sec": 30
|
||||
"base_url": "http://localhost:8555",
|
||||
"retry_count": 10,
|
||||
"retry_wait_time_sec": 10,
|
||||
"connection_timeout_sec": 30
|
||||
},
|
||||
"l2geth": {
|
||||
"endpoint": "http://localhost:9999"
|
||||
}
|
||||
"endpoint": "http://localhost:9999"
|
||||
},
|
||||
"prover": {
|
||||
"circuit_types": [1,2,3],
|
||||
"circuit_version": "v0.13.1"
|
||||
},
|
||||
"db_path": "unique-db-path-for-prover-1"
|
||||
},
|
||||
"low_version_circuit": {
|
||||
"hard_fork_name": "Darvin",
|
||||
"params_path": "params",
|
||||
"assets_path": "assets"
|
||||
},
|
||||
"high_version_circuit": {
|
||||
"hard_fork_name": "DarvinV2",
|
||||
"params_path": "params",
|
||||
"assets_path": "assets"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,55 +1,4 @@
|
||||
use anyhow::{bail, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs::File;
|
||||
|
||||
use crate::types::ProverType;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
pub hard_fork_name: String,
|
||||
pub params_path: String,
|
||||
pub assets_path: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CoordinatorConfig {
|
||||
pub base_url: String,
|
||||
pub retry_count: u32,
|
||||
pub retry_wait_time_sec: u64,
|
||||
pub connection_timeout_sec: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct L2GethConfig {
|
||||
pub endpoint: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct Config {
|
||||
pub prover_name: String,
|
||||
pub keystore_path: String,
|
||||
pub keystore_password: String,
|
||||
pub db_path: String,
|
||||
pub prover_type: ProverType,
|
||||
pub low_version_circuit: CircuitConfig,
|
||||
pub high_version_circuit: CircuitConfig,
|
||||
pub coordinator: CoordinatorConfig,
|
||||
pub l2geth: Option<L2GethConfig>,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn from_reader<R>(reader: R) -> Result<Self>
|
||||
where
|
||||
R: std::io::Read,
|
||||
{
|
||||
serde_json::from_reader(reader).map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
|
||||
pub fn from_file(file_name: String) -> Result<Self> {
|
||||
let file = File::open(file_name)?;
|
||||
Config::from_reader(&file)
|
||||
}
|
||||
}
|
||||
|
||||
static SCROLL_PROVER_ASSETS_DIR_ENV_NAME: &str = "SCROLL_PROVER_ASSETS_DIR";
|
||||
static mut SCROLL_PROVER_ASSETS_DIRS: Vec<String> = vec![];
|
||||
|
||||
@@ -1,142 +0,0 @@
|
||||
mod api;
|
||||
mod errors;
|
||||
pub mod listener;
|
||||
pub mod types;
|
||||
|
||||
use anyhow::{bail, Context, Ok, Result};
|
||||
use std::rc::Rc;
|
||||
|
||||
use api::Api;
|
||||
use errors::*;
|
||||
use listener::Listener;
|
||||
use tokio::runtime::Runtime;
|
||||
use types::*;
|
||||
|
||||
use crate::{config::Config, key_signer::KeySigner};
|
||||
|
||||
pub use errors::ProofStatusNotOKError;
|
||||
|
||||
pub struct CoordinatorClient<'a> {
|
||||
api: Api,
|
||||
token: Option<String>,
|
||||
config: &'a Config,
|
||||
key_signer: Rc<KeySigner>,
|
||||
rt: Runtime,
|
||||
listener: Box<dyn Listener>,
|
||||
vks: Vec<String>,
|
||||
}
|
||||
|
||||
impl<'a> CoordinatorClient<'a> {
|
||||
pub fn new(
|
||||
config: &'a Config,
|
||||
key_signer: Rc<KeySigner>,
|
||||
listener: Box<dyn Listener>,
|
||||
vks: Vec<String>,
|
||||
) -> Result<Self> {
|
||||
let rt = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()?;
|
||||
|
||||
let api = Api::new(
|
||||
&config.coordinator.base_url,
|
||||
core::time::Duration::from_secs(config.coordinator.connection_timeout_sec),
|
||||
config.coordinator.retry_count,
|
||||
config.coordinator.retry_wait_time_sec,
|
||||
)?;
|
||||
let mut client = Self {
|
||||
api,
|
||||
token: None,
|
||||
config,
|
||||
key_signer,
|
||||
rt,
|
||||
listener,
|
||||
vks,
|
||||
};
|
||||
client.login()?;
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
fn login(&mut self) -> Result<()> {
|
||||
let api = &self.api;
|
||||
let challenge_response = self.rt.block_on(api.challenge())?;
|
||||
if challenge_response.errcode != ErrorCode::Success {
|
||||
bail!("challenge failed: {}", challenge_response.errmsg)
|
||||
}
|
||||
let mut token: String;
|
||||
if let Some(r) = challenge_response.data {
|
||||
token = r.token;
|
||||
} else {
|
||||
bail!("challenge failed: got empty token")
|
||||
}
|
||||
|
||||
let login_message = LoginMessage {
|
||||
challenge: token.clone(),
|
||||
prover_name: self.config.prover_name.clone(),
|
||||
prover_version: crate::version::get_version(),
|
||||
prover_types: vec![self.config.prover_type],
|
||||
vks: self.vks.clone(),
|
||||
};
|
||||
|
||||
let buffer = rlp::encode(&login_message);
|
||||
let signature = self.key_signer.sign_buffer(&buffer)?;
|
||||
let login_request = LoginRequest {
|
||||
message: login_message,
|
||||
public_key: self.key_signer.get_public_key(),
|
||||
signature,
|
||||
};
|
||||
let login_response = self.rt.block_on(api.login(&login_request, &token))?;
|
||||
if login_response.errcode != ErrorCode::Success {
|
||||
bail!("login failed: {}", login_response.errmsg)
|
||||
}
|
||||
if let Some(r) = login_response.data {
|
||||
token = r.token;
|
||||
} else {
|
||||
bail!("login failed: got empty token")
|
||||
}
|
||||
self.token = Some(token);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn action_with_re_login<T, F, R>(&mut self, req: &R, mut f: F) -> Result<Response<T>>
|
||||
where
|
||||
F: FnMut(&mut Self, &R) -> Result<Response<T>>,
|
||||
{
|
||||
let response = f(self, req)?;
|
||||
if response.errcode == ErrorCode::ErrJWTTokenExpired {
|
||||
log::info!("JWT expired, attempting to re-login");
|
||||
self.login().context("JWT expired, re-login failed")?;
|
||||
log::info!("re-login success");
|
||||
return self.action_with_re_login(req, f);
|
||||
} else if response.errcode != ErrorCode::Success {
|
||||
bail!("action failed: {}", response.errmsg)
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
fn do_get_task(&mut self, req: &GetTaskRequest) -> Result<Response<GetTaskResponseData>> {
|
||||
self.rt
|
||||
.block_on(self.api.get_task(req, self.token.as_ref().unwrap()))
|
||||
}
|
||||
|
||||
pub fn get_task(&mut self, req: &GetTaskRequest) -> Result<Response<GetTaskResponseData>> {
|
||||
self.action_with_re_login(req, |s, req| s.do_get_task(req))
|
||||
}
|
||||
|
||||
fn do_submit_proof(
|
||||
&mut self,
|
||||
req: &SubmitProofRequest,
|
||||
) -> Result<Response<SubmitProofResponseData>> {
|
||||
let response = self
|
||||
.rt
|
||||
.block_on(self.api.submit_proof(req, self.token.as_ref().unwrap()))?;
|
||||
self.listener.on_proof_submitted(req);
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub fn submit_proof(
|
||||
&mut self,
|
||||
req: &SubmitProofRequest,
|
||||
) -> Result<Response<SubmitProofResponseData>> {
|
||||
self.action_with_re_login(req, |s, req| s.do_submit_proof(req))
|
||||
}
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
use crate::{coordinator_client::ProofStatusNotOKError, types::ProofStatus};
|
||||
|
||||
use super::{errors::*, types::*};
|
||||
use anyhow::{bail, Result};
|
||||
use core::time::Duration;
|
||||
use reqwest::{header::CONTENT_TYPE, Url};
|
||||
use reqwest_middleware::{ClientBuilder, ClientWithMiddleware};
|
||||
use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware};
|
||||
use serde::Serialize;
|
||||
|
||||
pub struct Api {
|
||||
url_base: Url,
|
||||
send_timeout: Duration,
|
||||
pub client: ClientWithMiddleware,
|
||||
}
|
||||
|
||||
impl Api {
|
||||
pub fn new(
|
||||
url_base: &str,
|
||||
send_timeout: Duration,
|
||||
retry_count: u32,
|
||||
retry_wait_time_sec: u64,
|
||||
) -> Result<Self> {
|
||||
let retry_wait_duration = core::time::Duration::from_secs(retry_wait_time_sec);
|
||||
let retry_policy = ExponentialBackoff::builder()
|
||||
.retry_bounds(retry_wait_duration / 2, retry_wait_duration)
|
||||
.build_with_max_retries(retry_count);
|
||||
|
||||
let client = ClientBuilder::new(reqwest::Client::new())
|
||||
.with(RetryTransientMiddleware::new_with_policy(retry_policy))
|
||||
.build();
|
||||
|
||||
Ok(Self {
|
||||
url_base: Url::parse(url_base)?,
|
||||
send_timeout,
|
||||
client,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn challenge(&self) -> Result<Response<ChallengeResponseData>> {
|
||||
let method = "/coordinator/v1/challenge";
|
||||
let url = self.build_url(method)?;
|
||||
|
||||
let response = self
|
||||
.client
|
||||
.get(url)
|
||||
.header(CONTENT_TYPE, "application/json")
|
||||
.timeout(self.send_timeout)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let response_body = response.text().await?;
|
||||
|
||||
serde_json::from_str(&response_body).map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
|
||||
pub async fn login(
|
||||
&self,
|
||||
req: &LoginRequest,
|
||||
token: &String,
|
||||
) -> Result<Response<LoginResponseData>> {
|
||||
let method = "/coordinator/v1/login";
|
||||
self.post_with_token(method, req, token).await
|
||||
}
|
||||
|
||||
pub async fn get_task(
|
||||
&self,
|
||||
req: &GetTaskRequest,
|
||||
token: &String,
|
||||
) -> Result<Response<GetTaskResponseData>> {
|
||||
let method = "/coordinator/v1/get_task";
|
||||
self.post_with_token(method, req, token).await
|
||||
}
|
||||
|
||||
pub async fn submit_proof(
|
||||
&self,
|
||||
req: &SubmitProofRequest,
|
||||
token: &String,
|
||||
) -> Result<Response<SubmitProofResponseData>> {
|
||||
let method = "/coordinator/v1/submit_proof";
|
||||
let response = self
|
||||
.post_with_token::<SubmitProofRequest, Response<SubmitProofResponseData>>(
|
||||
method, req, token,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// when req's status already not ok, we mark the error returned from coordinator and will
|
||||
// ignore it later.
|
||||
if response.errcode == ErrorCode::ErrCoordinatorHandleZkProofFailure
|
||||
&& req.status != ProofStatus::Ok
|
||||
&& response
|
||||
.errmsg
|
||||
.contains("validator failure proof msg status not ok")
|
||||
{
|
||||
return Err(anyhow::anyhow!(ProofStatusNotOKError));
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn post_with_token<Req, Resp>(
|
||||
&self,
|
||||
method: &str,
|
||||
req: &Req,
|
||||
token: &String,
|
||||
) -> Result<Resp>
|
||||
where
|
||||
Req: ?Sized + Serialize,
|
||||
Resp: serde::de::DeserializeOwned,
|
||||
{
|
||||
let url = self.build_url(method)?;
|
||||
let request_body = serde_json::to_string(req)?;
|
||||
|
||||
log::info!("[coordinator client], {method}, request: {request_body}");
|
||||
let response = self
|
||||
.client
|
||||
.post(url)
|
||||
.header(CONTENT_TYPE, "application/json")
|
||||
.bearer_auth(token)
|
||||
.body(request_body)
|
||||
.timeout(self.send_timeout)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
if response.status() != http::status::StatusCode::OK {
|
||||
log::error!(
|
||||
"[coordinator client], {method}, status not ok: {}",
|
||||
response.status()
|
||||
);
|
||||
bail!(
|
||||
"[coordinator client], {method}, status not ok: {}",
|
||||
response.status()
|
||||
)
|
||||
}
|
||||
|
||||
let response_body = response.text().await?;
|
||||
|
||||
log::info!("[coordinator client], {method}, response: {response_body}");
|
||||
serde_json::from_str(&response_body).map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
|
||||
fn build_url(&self, method: &str) -> Result<Url> {
|
||||
self.url_base.join(method).map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
use serde::{Deserialize, Deserializer};
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ErrorCode {
|
||||
Success,
|
||||
InternalServerError,
|
||||
|
||||
ErrProverStatsAPIParameterInvalidNo,
|
||||
ErrProverStatsAPIProverTaskFailure,
|
||||
ErrProverStatsAPIProverTotalRewardFailure,
|
||||
|
||||
ErrCoordinatorParameterInvalidNo,
|
||||
ErrCoordinatorGetTaskFailure,
|
||||
ErrCoordinatorHandleZkProofFailure,
|
||||
ErrCoordinatorEmptyProofData,
|
||||
|
||||
ErrJWTCommonErr,
|
||||
ErrJWTTokenExpired,
|
||||
|
||||
Undefined(i32),
|
||||
}
|
||||
|
||||
impl ErrorCode {
|
||||
fn from_i32(v: i32) -> Self {
|
||||
match v {
|
||||
0 => ErrorCode::Success,
|
||||
500 => ErrorCode::InternalServerError,
|
||||
10001 => ErrorCode::ErrProverStatsAPIParameterInvalidNo,
|
||||
10002 => ErrorCode::ErrProverStatsAPIProverTaskFailure,
|
||||
10003 => ErrorCode::ErrProverStatsAPIProverTotalRewardFailure,
|
||||
20001 => ErrorCode::ErrCoordinatorParameterInvalidNo,
|
||||
20002 => ErrorCode::ErrCoordinatorGetTaskFailure,
|
||||
20003 => ErrorCode::ErrCoordinatorHandleZkProofFailure,
|
||||
20004 => ErrorCode::ErrCoordinatorEmptyProofData,
|
||||
50000 => ErrorCode::ErrJWTCommonErr,
|
||||
50001 => ErrorCode::ErrJWTTokenExpired,
|
||||
_ => {
|
||||
log::error!("get unexpected error code from coordinator: {v}");
|
||||
ErrorCode::Undefined(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ErrorCode {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: i32 = i32::deserialize(deserializer)?;
|
||||
Ok(ErrorCode::from_i32(v))
|
||||
}
|
||||
}
|
||||
|
||||
// ====================================================
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ProofStatusNotOKError;
|
||||
|
||||
impl fmt::Display for ProofStatusNotOKError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "proof status not ok")
|
||||
}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
use super::SubmitProofRequest;
|
||||
|
||||
pub trait Listener {
|
||||
fn on_proof_submitted(&self, req: &SubmitProofRequest);
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
use super::errors::ErrorCode;
|
||||
use crate::types::{ProofFailureType, ProofStatus, ProverType, TaskType};
|
||||
use rlp::{Encodable, RlpStream};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct Response<T> {
|
||||
pub errcode: ErrorCode,
|
||||
pub errmsg: String,
|
||||
pub data: Option<T>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct LoginMessage {
|
||||
pub challenge: String,
|
||||
pub prover_name: String,
|
||||
pub prover_version: String,
|
||||
pub prover_types: Vec<ProverType>,
|
||||
pub vks: Vec<String>,
|
||||
}
|
||||
|
||||
impl Encodable for LoginMessage {
|
||||
fn rlp_append(&self, s: &mut RlpStream) {
|
||||
let num_fields = 5;
|
||||
s.begin_list(num_fields);
|
||||
s.append(&self.challenge);
|
||||
s.append(&self.prover_version);
|
||||
s.append(&self.prover_name);
|
||||
// The ProverType in go side is an type alias of uint8
|
||||
// A uint8 slice is treated as a string when doing the rlp encoding
|
||||
let prover_types = self
|
||||
.prover_types
|
||||
.iter()
|
||||
.map(|prover_type: &ProverType| prover_type.to_u8())
|
||||
.collect::<Vec<u8>>();
|
||||
s.append(&prover_types);
|
||||
s.begin_list(self.vks.len());
|
||||
for vk in &self.vks {
|
||||
s.append(vk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct LoginRequest {
|
||||
pub message: LoginMessage,
|
||||
pub public_key: String,
|
||||
pub signature: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct LoginResponseData {
|
||||
pub time: String,
|
||||
pub token: String,
|
||||
}
|
||||
|
||||
pub type ChallengeResponseData = LoginResponseData;
|
||||
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
pub struct GetTaskRequest {
|
||||
pub task_types: Vec<TaskType>,
|
||||
pub prover_height: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct GetTaskResponseData {
|
||||
pub uuid: String,
|
||||
pub task_id: String,
|
||||
pub task_type: TaskType,
|
||||
pub task_data: String,
|
||||
pub hard_fork_name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct SubmitProofRequest {
|
||||
pub uuid: String,
|
||||
pub task_id: String,
|
||||
pub task_type: TaskType,
|
||||
pub status: ProofStatus,
|
||||
pub proof: String,
|
||||
pub failure_type: Option<ProofFailureType>,
|
||||
pub failure_msg: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct SubmitProofResponseData {}
|
||||
@@ -1,57 +0,0 @@
|
||||
use crate::types::CommonHash;
|
||||
use anyhow::Result;
|
||||
use ethers_core::types::BlockNumber;
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
use std::fmt::Debug;
|
||||
|
||||
use ethers_providers::{Http, Provider};
|
||||
|
||||
pub struct GethClient {
|
||||
id: String,
|
||||
provider: Provider<Http>,
|
||||
rt: Runtime,
|
||||
}
|
||||
|
||||
impl GethClient {
|
||||
pub fn new(id: &str, api_url: &str) -> Result<Self> {
|
||||
let provider = Provider::<Http>::try_from(api_url)?;
|
||||
let rt = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()?;
|
||||
|
||||
Ok(Self {
|
||||
id: id.to_string(),
|
||||
provider,
|
||||
rt,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_block_trace_by_hash<T>(&mut self, hash: &CommonHash) -> Result<T>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + Debug + Send,
|
||||
{
|
||||
log::info!(
|
||||
"{}: calling get_block_trace_by_hash, hash: {:#?}",
|
||||
self.id,
|
||||
hash
|
||||
);
|
||||
|
||||
let trace_future = self
|
||||
.provider
|
||||
.request("scroll_getBlockTraceByNumberOrHash", [format!("{hash:#x}")]);
|
||||
|
||||
let trace = self.rt.block_on(trace_future)?;
|
||||
Ok(trace)
|
||||
}
|
||||
|
||||
pub fn block_number(&mut self) -> Result<BlockNumber> {
|
||||
log::info!("{}: calling block_number", self.id);
|
||||
|
||||
let trace_future = self.provider.request("eth_blockNumber", ());
|
||||
|
||||
let trace = self.rt.block_on(trace_future)?;
|
||||
Ok(trace)
|
||||
}
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use ethers_core::{
|
||||
k256::{
|
||||
ecdsa::{signature::hazmat::PrehashSigner, RecoveryId, Signature, SigningKey},
|
||||
elliptic_curve::{sec1::ToEncodedPoint, FieldBytes},
|
||||
PublicKey, Secp256k1, SecretKey,
|
||||
},
|
||||
types::Signature as EthSignature,
|
||||
};
|
||||
|
||||
use ethers_core::types::{H256, U256};
|
||||
use hex::ToHex;
|
||||
use tiny_keccak::{Hasher, Keccak};
|
||||
|
||||
pub struct KeySigner {
|
||||
public_key: PublicKey,
|
||||
signer: SigningKey,
|
||||
}
|
||||
|
||||
impl KeySigner {
|
||||
pub fn new(key_path: &str, passwd: &str) -> Result<Self> {
|
||||
let p = Path::new(key_path);
|
||||
|
||||
let secret = if !p.exists() {
|
||||
log::info!("[key_signer] key_path not exists, create one");
|
||||
let dir = p.parent().unwrap();
|
||||
let name = p.file_name().and_then(|s| s.to_str());
|
||||
let mut rng = rand::thread_rng();
|
||||
let (secret, _) = eth_keystore::new(dir, &mut rng, passwd, name)?;
|
||||
secret
|
||||
} else {
|
||||
log::info!("[key_signer] key_path already exists, load it");
|
||||
eth_keystore::decrypt_key(key_path, passwd).map_err(|e| anyhow::anyhow!(e))?
|
||||
};
|
||||
|
||||
let secret_key = SecretKey::from_bytes(secret.as_slice().into())?;
|
||||
|
||||
let signer = SigningKey::from(secret_key.clone());
|
||||
|
||||
Ok(Self {
|
||||
public_key: secret_key.public_key(),
|
||||
signer,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_public_key(&self) -> String {
|
||||
let v: Vec<u8> = Vec::from(self.public_key.to_encoded_point(true).as_bytes());
|
||||
buffer_to_hex(&v, false)
|
||||
}
|
||||
|
||||
/// Signs the provided hash.
|
||||
pub fn sign_hash(&self, hash: H256) -> Result<EthSignature> {
|
||||
let signer = &self.signer as &dyn PrehashSigner<(Signature, RecoveryId)>;
|
||||
let (recoverable_sig, recovery_id) = signer.sign_prehash(hash.as_ref())?;
|
||||
|
||||
let v = u8::from(recovery_id) as u64;
|
||||
|
||||
let r_bytes: FieldBytes<Secp256k1> = recoverable_sig.r().into();
|
||||
let s_bytes: FieldBytes<Secp256k1> = recoverable_sig.s().into();
|
||||
let r = U256::from_big_endian(r_bytes.as_slice());
|
||||
let s = U256::from_big_endian(s_bytes.as_slice());
|
||||
|
||||
Ok(EthSignature { r, s, v })
|
||||
}
|
||||
|
||||
pub fn sign_buffer<T>(&self, buffer: &T) -> Result<String>
|
||||
where
|
||||
T: AsRef<[u8]>,
|
||||
{
|
||||
let pre_hash = keccak256(buffer);
|
||||
|
||||
let hash = H256::from(pre_hash);
|
||||
let sig = self.sign_hash(hash)?;
|
||||
|
||||
Ok(buffer_to_hex(&sig.to_vec(), true))
|
||||
}
|
||||
}
|
||||
|
||||
fn buffer_to_hex<T>(buffer: &T, has_prefix: bool) -> String
|
||||
where
|
||||
T: AsRef<[u8]>,
|
||||
{
|
||||
if has_prefix {
|
||||
format!("0x{}", buffer.encode_hex::<String>())
|
||||
} else {
|
||||
buffer.encode_hex::<String>()
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the Keccak-256 hash of input bytes.
|
||||
///
|
||||
/// Note that strings are interpreted as UTF-8 bytes,
|
||||
pub fn keccak256<T: AsRef<[u8]>>(bytes: T) -> [u8; 32] {
|
||||
let mut output = [0u8; 32];
|
||||
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(bytes.as_ref());
|
||||
hasher.finalize(&mut output);
|
||||
|
||||
output
|
||||
}
|
||||
@@ -2,26 +2,19 @@
|
||||
#![feature(core_intrinsics)]
|
||||
|
||||
mod config;
|
||||
mod coordinator_client;
|
||||
mod geth_client;
|
||||
mod key_signer;
|
||||
mod prover;
|
||||
mod task_cache;
|
||||
mod task_processor;
|
||||
mod types;
|
||||
mod utils;
|
||||
mod version;
|
||||
mod zk_circuits_handler;
|
||||
|
||||
use anyhow::Result;
|
||||
use clap::{ArgAction, Parser};
|
||||
use config::{AssetsDirEnvConfig, Config};
|
||||
use prover::Prover;
|
||||
use std::rc::Rc;
|
||||
use task_cache::{ClearCacheCoordinatorListener, TaskCache};
|
||||
use task_processor::TaskProcessor;
|
||||
use prover::{LocalProver, LocalProverConfig};
|
||||
use scroll_proving_sdk::{
|
||||
prover::ProverBuilder,
|
||||
utils::{get_version, init_tracing},
|
||||
};
|
||||
use utils::get_prover_type;
|
||||
|
||||
/// Simple program to greet a person
|
||||
#[derive(Parser, Debug)]
|
||||
#[clap(disable_version_flag = true)]
|
||||
struct Args {
|
||||
@@ -38,49 +31,38 @@ struct Args {
|
||||
log_file: Option<String>,
|
||||
}
|
||||
|
||||
fn start() -> Result<()> {
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
init_tracing();
|
||||
|
||||
let args = Args::parse();
|
||||
|
||||
if args.version {
|
||||
println!("version is {}", version::get_version());
|
||||
println!("version is {}", get_version());
|
||||
std::process::exit(0);
|
||||
}
|
||||
|
||||
utils::log_init(args.log_file);
|
||||
let cfg = LocalProverConfig::from_file(args.config_file)?;
|
||||
let sdk_config = cfg.sdk_config.clone();
|
||||
let mut prover_types = vec![];
|
||||
sdk_config
|
||||
.prover
|
||||
.circuit_types
|
||||
.iter()
|
||||
.for_each(|circuit_type| {
|
||||
if let Some(pt) = get_prover_type(*circuit_type) {
|
||||
if !prover_types.contains(&pt) {
|
||||
prover_types.push(pt);
|
||||
}
|
||||
}
|
||||
});
|
||||
let local_prover = LocalProver::new(cfg, prover_types);
|
||||
let prover = ProverBuilder::new(sdk_config)
|
||||
.with_proving_service(Box::new(local_prover))
|
||||
.build()
|
||||
.await?;
|
||||
|
||||
let config: Config = Config::from_file(args.config_file)?;
|
||||
|
||||
if let Err(e) = AssetsDirEnvConfig::init() {
|
||||
log::error!("AssetsDirEnvConfig init failed: {:#}", e);
|
||||
std::process::exit(-2);
|
||||
}
|
||||
|
||||
let task_cache = Rc::new(TaskCache::new(&config.db_path)?);
|
||||
|
||||
let coordinator_listener = Box::new(ClearCacheCoordinatorListener {
|
||||
task_cache: task_cache.clone(),
|
||||
});
|
||||
|
||||
let prover = Prover::new(&config, coordinator_listener)?;
|
||||
|
||||
log::info!(
|
||||
"prover start successfully. name: {}, type: {:?}, publickey: {}, version: {}",
|
||||
config.prover_name,
|
||||
config.prover_type,
|
||||
prover.get_public_key(),
|
||||
version::get_version(),
|
||||
);
|
||||
|
||||
let task_processor = TaskProcessor::new(&prover, task_cache);
|
||||
|
||||
task_processor.start();
|
||||
prover.run().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let result = start();
|
||||
if let Err(e) = result {
|
||||
log::error!("main exit with error {:#}", e)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,170 +1,192 @@
|
||||
use anyhow::{bail, Context, Error, Ok, Result};
|
||||
use ethers_core::types::U64;
|
||||
|
||||
use std::{cell::RefCell, rc::Rc};
|
||||
|
||||
use crate::{
|
||||
config::Config,
|
||||
coordinator_client::{listener::Listener, types::*, CoordinatorClient},
|
||||
geth_client::GethClient,
|
||||
key_signer::KeySigner,
|
||||
types::{ProofFailureType, ProofStatus, ProverType},
|
||||
utils::get_task_types,
|
||||
types::ProverType,
|
||||
utils::get_prover_type,
|
||||
zk_circuits_handler::{CircuitsHandler, CircuitsHandlerProvider},
|
||||
};
|
||||
use anyhow::{anyhow, Result};
|
||||
use async_trait::async_trait;
|
||||
use scroll_proving_sdk::{
|
||||
config::Config as SdkConfig,
|
||||
prover::{
|
||||
proving_service::{
|
||||
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
|
||||
QueryTaskResponse, TaskStatus,
|
||||
},
|
||||
ProvingService,
|
||||
},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
fs::File,
|
||||
sync::{Arc, Mutex},
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
use tokio::{runtime::Handle, sync::RwLock, task::JoinHandle};
|
||||
|
||||
use super::types::{ProofDetail, Task};
|
||||
|
||||
pub struct Prover<'a> {
|
||||
config: &'a Config,
|
||||
key_signer: Rc<KeySigner>,
|
||||
circuits_handler_provider: RefCell<CircuitsHandlerProvider<'a>>,
|
||||
coordinator_client: RefCell<CoordinatorClient<'a>>,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct LocalProverConfig {
|
||||
pub sdk_config: SdkConfig,
|
||||
pub high_version_circuit: CircuitConfig,
|
||||
pub low_version_circuit: CircuitConfig,
|
||||
}
|
||||
|
||||
impl<'a> Prover<'a> {
|
||||
pub fn new(config: &'a Config, coordinator_listener: Box<dyn Listener>) -> Result<Self> {
|
||||
let prover_type = config.prover_type;
|
||||
let keystore_path = &config.keystore_path;
|
||||
let keystore_password = &config.keystore_password;
|
||||
|
||||
let geth_client = if config.prover_type == ProverType::Chunk {
|
||||
Some(Rc::new(RefCell::new(
|
||||
GethClient::new(
|
||||
&config.prover_name,
|
||||
&config.l2geth.as_ref().unwrap().endpoint,
|
||||
)
|
||||
.context("failed to create l2 geth_client")?,
|
||||
)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let provider = CircuitsHandlerProvider::new(prover_type, config, geth_client.clone())
|
||||
.context("failed to create circuits handler provider")?;
|
||||
|
||||
let vks = provider.init_vks(prover_type, config, geth_client.clone());
|
||||
|
||||
let key_signer = Rc::new(KeySigner::new(keystore_path, keystore_password)?);
|
||||
let coordinator_client =
|
||||
CoordinatorClient::new(config, Rc::clone(&key_signer), coordinator_listener, vks)
|
||||
.context("failed to create coordinator_client")?;
|
||||
|
||||
let prover = Prover {
|
||||
config,
|
||||
key_signer: Rc::clone(&key_signer),
|
||||
circuits_handler_provider: RefCell::new(provider),
|
||||
coordinator_client: RefCell::new(coordinator_client),
|
||||
geth_client,
|
||||
};
|
||||
|
||||
Ok(prover)
|
||||
impl LocalProverConfig {
|
||||
pub fn from_reader<R>(reader: R) -> Result<Self>
|
||||
where
|
||||
R: std::io::Read,
|
||||
{
|
||||
serde_json::from_reader(reader).map_err(|e| anyhow!(e))
|
||||
}
|
||||
|
||||
pub fn get_public_key(&self) -> String {
|
||||
self.key_signer.get_public_key()
|
||||
pub fn from_file(file_name: String) -> Result<Self> {
|
||||
let file = File::open(file_name)?;
|
||||
Self::from_reader(&file)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fetch_task(&self) -> Result<Task> {
|
||||
log::info!("[prover] start to fetch_task");
|
||||
let mut req = GetTaskRequest {
|
||||
task_types: get_task_types(self.config.prover_type),
|
||||
prover_height: None,
|
||||
};
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
pub hard_fork_name: String,
|
||||
pub params_path: String,
|
||||
pub assets_path: String,
|
||||
}
|
||||
|
||||
if self.config.prover_type == ProverType::Chunk {
|
||||
let latest_block_number = self.get_latest_block_number_value()?;
|
||||
if let Some(v) = latest_block_number {
|
||||
if v.as_u64() == 0 {
|
||||
bail!("omit to prove task of the genesis block")
|
||||
pub struct LocalProver {
|
||||
config: LocalProverConfig,
|
||||
prover_types: Vec<ProverType>,
|
||||
circuits_handler_provider: RwLock<CircuitsHandlerProvider>,
|
||||
next_task_id: Arc<Mutex<u64>>,
|
||||
current_task: Arc<Mutex<Option<JoinHandle<Result<String>>>>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ProvingService for LocalProver {
|
||||
fn is_local(&self) -> bool {
|
||||
true
|
||||
}
|
||||
async fn get_vks(&self, req: GetVkRequest) -> GetVkResponse {
|
||||
let mut prover_types = vec![];
|
||||
req.circuit_types.iter().for_each(|circuit_type| {
|
||||
if let Some(pt) = get_prover_type(*circuit_type) {
|
||||
if !prover_types.contains(&pt) {
|
||||
prover_types.push(pt);
|
||||
}
|
||||
req.prover_height = Some(v.as_u64());
|
||||
} else {
|
||||
log::error!("[prover] failed to fetch latest confirmed block number, got None");
|
||||
bail!("failed to fetch latest confirmed block number, got None")
|
||||
}
|
||||
}
|
||||
let resp = self.coordinator_client.borrow_mut().get_task(&req)?;
|
||||
});
|
||||
|
||||
match resp.data {
|
||||
Some(d) => Ok(Task::from(d)),
|
||||
None => {
|
||||
bail!("data of get_task empty, while error_code is success. there may be something wrong in response data or inner logic.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn prove_task(&self, task: &Task) -> Result<ProofDetail> {
|
||||
log::info!("[prover] start to prove_task, task id: {}", task.id);
|
||||
let handler: Rc<Box<dyn CircuitsHandler>> = self
|
||||
let vks = self
|
||||
.circuits_handler_provider
|
||||
.borrow_mut()
|
||||
.get_circuits_handler(&task.hard_fork_name)
|
||||
.context("failed to get circuit handler")?;
|
||||
self.do_prove(task, handler)
|
||||
.read()
|
||||
.await
|
||||
.init_vks(&self.config, prover_types)
|
||||
.await;
|
||||
GetVkResponse { vks, error: None }
|
||||
}
|
||||
async fn prove(&self, req: ProveRequest) -> ProveResponse {
|
||||
let handler = self
|
||||
.circuits_handler_provider
|
||||
.write()
|
||||
.await
|
||||
.get_circuits_handler(&req.hard_fork_name, self.prover_types.clone())
|
||||
.expect("failed to get circuit handler");
|
||||
|
||||
match self.do_prove(req, handler).await {
|
||||
Ok(resp) => resp,
|
||||
Err(e) => ProveResponse {
|
||||
status: TaskStatus::Failed,
|
||||
error: Some(format!("failed to request proof: {}", e)),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn do_prove(&self, task: &Task, handler: Rc<Box<dyn CircuitsHandler>>) -> Result<ProofDetail> {
|
||||
let mut proof_detail = ProofDetail {
|
||||
id: task.id.clone(),
|
||||
proof_type: task.task_type,
|
||||
async fn query_task(&self, req: QueryTaskRequest) -> QueryTaskResponse {
|
||||
let handle = self.current_task.lock().unwrap().take();
|
||||
if let Some(handle) = handle {
|
||||
if handle.is_finished() {
|
||||
return match handle.await {
|
||||
Ok(Ok(proof)) => QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Success,
|
||||
proof: Some(proof),
|
||||
..Default::default()
|
||||
},
|
||||
Ok(Err(e)) => QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Failed,
|
||||
error: Some(format!("proving task failed: {}", e)),
|
||||
..Default::default()
|
||||
},
|
||||
Err(e) => QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Failed,
|
||||
error: Some(format!("proving task panicked: {}", e)),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
} else {
|
||||
*self.current_task.lock().unwrap() = Some(handle);
|
||||
return QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Proving,
|
||||
..Default::default()
|
||||
};
|
||||
}
|
||||
}
|
||||
// If no handle is found
|
||||
QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Failed,
|
||||
error: Some("no proving task is running".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
proof_detail.proof_data = handler.get_proof_data(task.task_type, task)?;
|
||||
Ok(proof_detail)
|
||||
}
|
||||
|
||||
pub fn submit_proof(&self, proof_detail: ProofDetail, task: &Task) -> Result<()> {
|
||||
log::info!(
|
||||
"[prover] start to submit_proof, task id: {}",
|
||||
proof_detail.id
|
||||
);
|
||||
|
||||
let request = SubmitProofRequest {
|
||||
uuid: task.uuid.clone(),
|
||||
task_id: proof_detail.id,
|
||||
task_type: proof_detail.proof_type,
|
||||
status: ProofStatus::Ok,
|
||||
proof: proof_detail.proof_data,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
self.do_submit(&request)
|
||||
}
|
||||
|
||||
pub fn submit_error(
|
||||
&self,
|
||||
task: &Task,
|
||||
failure_type: ProofFailureType,
|
||||
error: Error,
|
||||
) -> Result<()> {
|
||||
log::info!("[prover] start to submit_error, task id: {}", task.id);
|
||||
let request = SubmitProofRequest {
|
||||
uuid: task.uuid.clone(),
|
||||
task_id: task.id.clone(),
|
||||
task_type: task.task_type,
|
||||
status: ProofStatus::Error,
|
||||
failure_type: Some(failure_type),
|
||||
failure_msg: Some(format!("{:#}", error)),
|
||||
..Default::default()
|
||||
};
|
||||
self.do_submit(&request)
|
||||
}
|
||||
|
||||
fn do_submit(&self, request: &SubmitProofRequest) -> Result<()> {
|
||||
self.coordinator_client.borrow_mut().submit_proof(request)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_latest_block_number_value(&self) -> Result<Option<U64>> {
|
||||
let number = self
|
||||
.geth_client
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.borrow_mut()
|
||||
.block_number()?;
|
||||
Ok(number.as_number())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LocalProver {
|
||||
pub fn new(config: LocalProverConfig, prover_types: Vec<ProverType>) -> Self {
|
||||
let circuits_handler_provider = CircuitsHandlerProvider::new(config.clone())
|
||||
.expect("failed to create circuits handler provider");
|
||||
|
||||
Self {
|
||||
config,
|
||||
prover_types,
|
||||
circuits_handler_provider: RwLock::new(circuits_handler_provider),
|
||||
next_task_id: Arc::new(Mutex::new(0)),
|
||||
current_task: Arc::new(Mutex::new(None)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_prove(
|
||||
&self,
|
||||
req: ProveRequest,
|
||||
handler: Arc<Box<dyn CircuitsHandler>>,
|
||||
) -> Result<ProveResponse> {
|
||||
let task_id = {
|
||||
let mut next_task_id = self.next_task_id.lock().unwrap();
|
||||
*next_task_id += 1;
|
||||
*next_task_id
|
||||
};
|
||||
|
||||
let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
|
||||
let created_at = duration.as_secs() as f64 + duration.subsec_nanos() as f64 * 1e-9;
|
||||
|
||||
let req_clone = req.clone();
|
||||
let handle = Handle::current();
|
||||
let task_handle =
|
||||
tokio::task::spawn_blocking(move || handle.block_on(handler.get_proof_data(req_clone)));
|
||||
|
||||
*self.current_task.lock().unwrap() = Some(task_handle);
|
||||
|
||||
Ok(ProveResponse {
|
||||
task_id: task_id.to_string(),
|
||||
circuit_type: req.circuit_type,
|
||||
circuit_version: req.circuit_version,
|
||||
hard_fork_name: req.hard_fork_name,
|
||||
status: TaskStatus::Proving,
|
||||
created_at,
|
||||
input: Some(req.input),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
use anyhow::{Ok, Result};
|
||||
|
||||
use super::coordinator_client::{listener::Listener, types::SubmitProofRequest};
|
||||
use crate::types::TaskWrapper;
|
||||
use sled::{Config, Db};
|
||||
use std::rc::Rc;
|
||||
|
||||
pub struct TaskCache {
|
||||
db: Db,
|
||||
}
|
||||
|
||||
impl TaskCache {
|
||||
pub fn new(db_path: &String) -> Result<Self> {
|
||||
let config = Config::new().path(db_path);
|
||||
let db = config.open()?;
|
||||
log::info!("[task_cache] initiate successfully to {db_path}");
|
||||
Ok(Self { db })
|
||||
}
|
||||
|
||||
pub fn put_task(&self, task_wrapper: &TaskWrapper) -> Result<()> {
|
||||
let k = task_wrapper.task.id.clone().into_bytes();
|
||||
let v = serde_json::to_vec(task_wrapper)?;
|
||||
self.db.insert(k, v)?;
|
||||
log::info!(
|
||||
"[task_cache] put_task with task_id: {}",
|
||||
task_wrapper.task.id
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_last_task(&self) -> Result<Option<TaskWrapper>> {
|
||||
let last = self.db.last()?;
|
||||
if let Some((k, v)) = last {
|
||||
let kk = std::str::from_utf8(k.as_ref())?;
|
||||
let task_wrapper: TaskWrapper = serde_json::from_slice(v.as_ref())?;
|
||||
log::info!(
|
||||
"[task_cache] get_last_task with task_id: {kk}, count: {}",
|
||||
task_wrapper.get_count()
|
||||
);
|
||||
return Ok(Some(task_wrapper));
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub fn delete_task(&self, task_id: String) -> Result<()> {
|
||||
let k = task_id.clone().into_bytes();
|
||||
self.db.remove(k)?;
|
||||
log::info!("[task cache] delete_task with task_id: {task_id}");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// ========================= listener ===========================
|
||||
|
||||
pub struct ClearCacheCoordinatorListener {
|
||||
pub task_cache: Rc<TaskCache>,
|
||||
}
|
||||
|
||||
impl Listener for ClearCacheCoordinatorListener {
|
||||
fn on_proof_submitted(&self, req: &SubmitProofRequest) {
|
||||
let result = self.task_cache.delete_task(req.task_id.clone());
|
||||
if let Err(e) = result {
|
||||
log::error!("delete task from embed db failed, {:#}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
use super::{coordinator_client::ProofStatusNotOKError, prover::Prover, task_cache::TaskCache};
|
||||
use anyhow::{Context, Result};
|
||||
use std::rc::Rc;
|
||||
|
||||
pub struct TaskProcessor<'a> {
|
||||
prover: &'a Prover<'a>,
|
||||
task_cache: Rc<TaskCache>,
|
||||
}
|
||||
|
||||
impl<'a> TaskProcessor<'a> {
|
||||
pub fn new(prover: &'a Prover<'a>, task_cache: Rc<TaskCache>) -> Self {
|
||||
TaskProcessor { prover, task_cache }
|
||||
}
|
||||
|
||||
pub fn start(&self) {
|
||||
loop {
|
||||
log::info!("start a new round.");
|
||||
if let Err(err) = self.prove_and_submit() {
|
||||
if err.is::<ProofStatusNotOKError>() {
|
||||
log::info!("proof status not ok, downgrade level to info.");
|
||||
} else {
|
||||
log::error!("encounter error: {:#}", err);
|
||||
}
|
||||
} else {
|
||||
log::info!("prove & submit succeed.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn prove_and_submit(&self) -> Result<()> {
|
||||
let task_from_cache = self
|
||||
.task_cache
|
||||
.get_last_task()
|
||||
.context("failed to peek from stack")?;
|
||||
|
||||
let mut task_wrapper = match task_from_cache {
|
||||
Some(t) => t,
|
||||
None => {
|
||||
let fetch_result = self.prover.fetch_task();
|
||||
if let Err(err) = fetch_result {
|
||||
std::thread::sleep(core::time::Duration::from_secs(10));
|
||||
return Err(err).context("failed to fetch task from coordinator");
|
||||
}
|
||||
fetch_result.unwrap().into()
|
||||
}
|
||||
};
|
||||
|
||||
if task_wrapper.get_count() <= 2 {
|
||||
task_wrapper.increment_count();
|
||||
self.task_cache
|
||||
.put_task(&task_wrapper)
|
||||
.context("failed to push task into stack, updating count")?;
|
||||
|
||||
log::info!(
|
||||
"start to prove task, task_type: {:?}, task_id: {}",
|
||||
task_wrapper.task.task_type,
|
||||
task_wrapper.task.id
|
||||
);
|
||||
let result = match self.prover.prove_task(&task_wrapper.task) {
|
||||
Ok(proof_detail) => self.prover.submit_proof(proof_detail, &task_wrapper.task),
|
||||
Err(error) => {
|
||||
log::error!(
|
||||
"failed to prove task, id: {}, error: {:#}",
|
||||
&task_wrapper.task.id,
|
||||
error
|
||||
);
|
||||
self.prover.submit_error(
|
||||
&task_wrapper.task,
|
||||
super::types::ProofFailureType::NoPanic,
|
||||
error,
|
||||
)
|
||||
}
|
||||
};
|
||||
return result;
|
||||
}
|
||||
|
||||
// if tried times >= 3, it's probably due to circuit proving panic
|
||||
log::error!(
|
||||
"zk proving panic for task, task_type: {:?}, task_id: {}",
|
||||
task_wrapper.task.task_type,
|
||||
task_wrapper.task.id
|
||||
);
|
||||
self.prover.submit_error(
|
||||
&task_wrapper.task,
|
||||
super::types::ProofFailureType::Panic,
|
||||
anyhow::anyhow!("zk proving panic for task"),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,59 +1,10 @@
|
||||
use ethers_core::types::H256;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
use crate::coordinator_client::types::GetTaskResponseData;
|
||||
use scroll_proving_sdk::prover::types::CircuitType;
|
||||
|
||||
pub type CommonHash = H256;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum TaskType {
|
||||
Undefined,
|
||||
Chunk,
|
||||
Batch,
|
||||
Bundle,
|
||||
}
|
||||
|
||||
impl TaskType {
|
||||
fn from_u8(v: u8) -> Self {
|
||||
match v {
|
||||
1 => TaskType::Chunk,
|
||||
2 => TaskType::Batch,
|
||||
3 => TaskType::Bundle,
|
||||
_ => TaskType::Undefined,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for TaskType {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
TaskType::Undefined => serializer.serialize_u8(0),
|
||||
TaskType::Chunk => serializer.serialize_u8(1),
|
||||
TaskType::Batch => serializer.serialize_u8(2),
|
||||
TaskType::Bundle => serializer.serialize_u8(3),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for TaskType {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: u8 = u8::deserialize(deserializer)?;
|
||||
Ok(TaskType::from_u8(v))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for TaskType {
|
||||
fn default() -> Self {
|
||||
Self::Undefined
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum ProverType {
|
||||
Chunk,
|
||||
@@ -70,13 +21,6 @@ impl ProverType {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_u8(self) -> u8 {
|
||||
match self {
|
||||
ProverType::Chunk => 1,
|
||||
ProverType::Batch => 2,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ProverType {
|
||||
@@ -103,54 +47,18 @@ impl<'de> Deserialize<'de> for ProverType {
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct Task {
|
||||
pub uuid: String,
|
||||
pub id: String,
|
||||
#[serde(rename = "type", default)]
|
||||
pub task_type: TaskType,
|
||||
pub task_type: CircuitType,
|
||||
pub task_data: String,
|
||||
#[serde(default)]
|
||||
pub hard_fork_name: String,
|
||||
}
|
||||
|
||||
impl From<GetTaskResponseData> for Task {
|
||||
fn from(value: GetTaskResponseData) -> Self {
|
||||
Self {
|
||||
uuid: value.uuid,
|
||||
id: value.task_id,
|
||||
task_type: value.task_type,
|
||||
task_data: value.task_data,
|
||||
hard_fork_name: value.hard_fork_name,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct TaskWrapper {
|
||||
pub task: Task,
|
||||
count: usize,
|
||||
}
|
||||
|
||||
impl TaskWrapper {
|
||||
pub fn increment_count(&mut self) {
|
||||
self.count += 1;
|
||||
}
|
||||
|
||||
pub fn get_count(&self) -> usize {
|
||||
self.count
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Task> for TaskWrapper {
|
||||
fn from(task: Task) -> Self {
|
||||
TaskWrapper { task, count: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct ProofDetail {
|
||||
pub id: String,
|
||||
#[serde(rename = "type", default)]
|
||||
pub proof_type: TaskType,
|
||||
pub proof_type: CircuitType,
|
||||
pub proof_data: String,
|
||||
pub error: String,
|
||||
}
|
||||
|
||||
@@ -1,32 +1,18 @@
|
||||
use env_logger::Env;
|
||||
use std::{fs::OpenOptions, sync::Once};
|
||||
use crate::types::ProverType;
|
||||
use scroll_proving_sdk::prover::types::CircuitType;
|
||||
|
||||
use crate::types::{ProverType, TaskType};
|
||||
|
||||
static LOG_INIT: Once = Once::new();
|
||||
|
||||
/// Initialize log
|
||||
pub fn log_init(log_file: Option<String>) {
|
||||
LOG_INIT.call_once(|| {
|
||||
let mut builder = env_logger::Builder::from_env(Env::default().default_filter_or("info"));
|
||||
if let Some(file_path) = log_file {
|
||||
let target = Box::new(
|
||||
OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(false)
|
||||
.open(file_path)
|
||||
.expect("Can't create log file"),
|
||||
);
|
||||
builder.target(env_logger::Target::Pipe(target));
|
||||
}
|
||||
builder.init();
|
||||
});
|
||||
}
|
||||
|
||||
pub fn get_task_types(prover_type: ProverType) -> Vec<TaskType> {
|
||||
pub fn get_circuit_types(prover_type: ProverType) -> Vec<CircuitType> {
|
||||
match prover_type {
|
||||
ProverType::Chunk => vec![TaskType::Chunk],
|
||||
ProverType::Batch => vec![TaskType::Batch, TaskType::Bundle],
|
||||
ProverType::Chunk => vec![CircuitType::Chunk],
|
||||
ProverType::Batch => vec![CircuitType::Batch, CircuitType::Bundle],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_prover_type(task_type: CircuitType) -> Option<ProverType> {
|
||||
match task_type {
|
||||
CircuitType::Undefined => None,
|
||||
CircuitType::Chunk => Some(ProverType::Chunk),
|
||||
CircuitType::Batch => Some(ProverType::Batch),
|
||||
CircuitType::Bundle => Some(ProverType::Batch),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
use std::cell::OnceCell;
|
||||
|
||||
static DEFAULT_COMMIT: &str = "unknown";
|
||||
static mut VERSION: OnceCell<String> = OnceCell::new();
|
||||
|
||||
pub const TAG: &str = "v0.0.0";
|
||||
pub const DEFAULT_ZK_VERSION: &str = "000000-000000";
|
||||
|
||||
fn init_version() -> String {
|
||||
let commit = option_env!("GIT_REV").unwrap_or(DEFAULT_COMMIT);
|
||||
let tag = option_env!("GO_TAG").unwrap_or(TAG);
|
||||
let zk_version = option_env!("ZK_VERSION").unwrap_or(DEFAULT_ZK_VERSION);
|
||||
format!("{tag}-{commit}-{zk_version}")
|
||||
}
|
||||
|
||||
pub fn get_version() -> String {
|
||||
unsafe { VERSION.get_or_init(init_version).clone() }
|
||||
}
|
||||
@@ -2,16 +2,16 @@ mod common;
|
||||
mod darwin;
|
||||
mod darwin_v2;
|
||||
|
||||
use super::geth_client::GethClient;
|
||||
use crate::{
|
||||
config::{AssetsDirEnvConfig, Config},
|
||||
types::{ProverType, Task, TaskType},
|
||||
utils::get_task_types,
|
||||
config::AssetsDirEnvConfig, prover::LocalProverConfig, types::ProverType,
|
||||
utils::get_circuit_types,
|
||||
};
|
||||
use anyhow::{bail, Result};
|
||||
use async_trait::async_trait;
|
||||
use darwin::DarwinHandler;
|
||||
use darwin_v2::DarwinV2Handler;
|
||||
use std::{cell::RefCell, collections::HashMap, rc::Rc};
|
||||
use scroll_proving_sdk::prover::{proving_service::ProveRequest, CircuitType};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
type HardForkName = String;
|
||||
|
||||
@@ -21,40 +21,36 @@ pub mod utils {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait CircuitsHandler {
|
||||
fn get_vk(&self, task_type: TaskType) -> Option<Vec<u8>>;
|
||||
#[async_trait]
|
||||
pub trait CircuitsHandler: Send + Sync {
|
||||
async fn get_vk(&self, task_type: CircuitType) -> Option<Vec<u8>>;
|
||||
|
||||
fn get_proof_data(&self, task_type: TaskType, task: &Task) -> Result<String>;
|
||||
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String>;
|
||||
}
|
||||
|
||||
type CircuitsHandlerBuilder = fn(
|
||||
prover_type: ProverType,
|
||||
config: &Config,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
prover_types: Vec<ProverType>,
|
||||
config: &LocalProverConfig,
|
||||
) -> Result<Box<dyn CircuitsHandler>>;
|
||||
|
||||
pub struct CircuitsHandlerProvider<'a> {
|
||||
prover_type: ProverType,
|
||||
config: &'a Config,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
pub struct CircuitsHandlerProvider {
|
||||
config: LocalProverConfig,
|
||||
circuits_handler_builder_map: HashMap<HardForkName, CircuitsHandlerBuilder>,
|
||||
|
||||
current_fork_name: Option<HardForkName>,
|
||||
current_circuit: Option<Rc<Box<dyn CircuitsHandler>>>,
|
||||
current_circuit: Option<Arc<Box<dyn CircuitsHandler>>>,
|
||||
}
|
||||
|
||||
impl<'a> CircuitsHandlerProvider<'a> {
|
||||
pub fn new(
|
||||
prover_type: ProverType,
|
||||
config: &'a Config,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Result<Self> {
|
||||
impl CircuitsHandlerProvider {
|
||||
pub fn new(config: LocalProverConfig) -> Result<Self> {
|
||||
let mut m: HashMap<HardForkName, CircuitsHandlerBuilder> = HashMap::new();
|
||||
|
||||
if let Err(e) = AssetsDirEnvConfig::init() {
|
||||
panic!("AssetsDirEnvConfig init failed: {:#}", e);
|
||||
}
|
||||
|
||||
fn handler_builder(
|
||||
prover_type: ProverType,
|
||||
config: &Config,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
prover_types: Vec<ProverType>,
|
||||
config: &LocalProverConfig,
|
||||
) -> Result<Box<dyn CircuitsHandler>> {
|
||||
log::info!(
|
||||
"now init zk circuits handler, hard_fork_name: {}",
|
||||
@@ -62,10 +58,9 @@ impl<'a> CircuitsHandlerProvider<'a> {
|
||||
);
|
||||
AssetsDirEnvConfig::enable_first();
|
||||
DarwinHandler::new(
|
||||
prover_type,
|
||||
prover_types,
|
||||
&config.low_version_circuit.params_path,
|
||||
&config.low_version_circuit.assets_path,
|
||||
geth_client,
|
||||
)
|
||||
.map(|handler| Box::new(handler) as Box<dyn CircuitsHandler>)
|
||||
}
|
||||
@@ -75,9 +70,8 @@ impl<'a> CircuitsHandlerProvider<'a> {
|
||||
);
|
||||
|
||||
fn next_handler_builder(
|
||||
prover_type: ProverType,
|
||||
config: &Config,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
prover_types: Vec<ProverType>,
|
||||
config: &LocalProverConfig,
|
||||
) -> Result<Box<dyn CircuitsHandler>> {
|
||||
log::info!(
|
||||
"now init zk circuits handler, hard_fork_name: {}",
|
||||
@@ -85,10 +79,9 @@ impl<'a> CircuitsHandlerProvider<'a> {
|
||||
);
|
||||
AssetsDirEnvConfig::enable_second();
|
||||
DarwinV2Handler::new(
|
||||
prover_type,
|
||||
prover_types,
|
||||
&config.high_version_circuit.params_path,
|
||||
&config.high_version_circuit.assets_path,
|
||||
geth_client,
|
||||
)
|
||||
.map(|handler| Box::new(handler) as Box<dyn CircuitsHandler>)
|
||||
}
|
||||
@@ -99,9 +92,7 @@ impl<'a> CircuitsHandlerProvider<'a> {
|
||||
);
|
||||
|
||||
let provider = CircuitsHandlerProvider {
|
||||
prover_type,
|
||||
config,
|
||||
geth_client,
|
||||
circuits_handler_builder_map: m,
|
||||
current_fork_name: None,
|
||||
current_circuit: None,
|
||||
@@ -113,7 +104,8 @@ impl<'a> CircuitsHandlerProvider<'a> {
|
||||
pub fn get_circuits_handler(
|
||||
&mut self,
|
||||
hard_fork_name: &String,
|
||||
) -> Result<Rc<Box<dyn CircuitsHandler>>> {
|
||||
prover_types: Vec<ProverType>,
|
||||
) -> Result<Arc<Box<dyn CircuitsHandler>>> {
|
||||
match &self.current_fork_name {
|
||||
Some(fork_name) if fork_name == hard_fork_name => {
|
||||
log::info!("get circuits handler from cache");
|
||||
@@ -129,12 +121,12 @@ impl<'a> CircuitsHandlerProvider<'a> {
|
||||
);
|
||||
if let Some(builder) = self.circuits_handler_builder_map.get(hard_fork_name) {
|
||||
log::info!("building circuits handler for {hard_fork_name}");
|
||||
let handler = builder(self.prover_type, self.config, self.geth_client.clone())
|
||||
let handler = builder(prover_types, &self.config)
|
||||
.expect("failed to build circuits handler");
|
||||
self.current_fork_name = Some(hard_fork_name.clone());
|
||||
let rc_handler = Rc::new(handler);
|
||||
self.current_circuit = Some(rc_handler.clone());
|
||||
Ok(rc_handler)
|
||||
let arc_handler = Arc::new(handler);
|
||||
self.current_circuit = Some(arc_handler.clone());
|
||||
Ok(arc_handler)
|
||||
} else {
|
||||
bail!("missing builder, there must be something wrong.")
|
||||
}
|
||||
@@ -142,33 +134,32 @@ impl<'a> CircuitsHandlerProvider<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init_vks(
|
||||
pub async fn init_vks(
|
||||
&self,
|
||||
prover_type: ProverType,
|
||||
config: &'a Config,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
config: &LocalProverConfig,
|
||||
prover_types: Vec<ProverType>,
|
||||
) -> Vec<String> {
|
||||
self.circuits_handler_builder_map
|
||||
.iter()
|
||||
.flat_map(|(hard_fork_name, build)| {
|
||||
let handler = build(prover_type, config, geth_client.clone())
|
||||
.expect("failed to build circuits handler");
|
||||
let mut vks = Vec::new();
|
||||
for (hard_fork_name, build) in self.circuits_handler_builder_map.iter() {
|
||||
let handler =
|
||||
build(prover_types.clone(), config).expect("failed to build circuits handler");
|
||||
|
||||
get_task_types(prover_type)
|
||||
.into_iter()
|
||||
.map(|task_type| {
|
||||
let vk = handler
|
||||
.get_vk(task_type)
|
||||
.map_or("".to_string(), utils::encode_vk);
|
||||
log::info!(
|
||||
"vk for {hard_fork_name}, is {vk}, task_type: {:?}",
|
||||
task_type
|
||||
);
|
||||
vk
|
||||
})
|
||||
.filter(|vk| !vk.is_empty())
|
||||
.collect::<Vec<String>>()
|
||||
})
|
||||
.collect::<Vec<String>>()
|
||||
for prover_type in prover_types.iter() {
|
||||
for task_type in get_circuit_types(*prover_type).into_iter() {
|
||||
let vk = handler
|
||||
.get_vk(task_type)
|
||||
.await
|
||||
.map_or("".to_string(), utils::encode_vk);
|
||||
log::info!(
|
||||
"vk for {hard_fork_name}, is {vk}, task_type: {:?}",
|
||||
task_type
|
||||
);
|
||||
if !vk.is_empty() {
|
||||
vks.push(vk)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
vks
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use super::{common::*, CircuitsHandler};
|
||||
use crate::{
|
||||
geth_client::GethClient,
|
||||
types::{ProverType, TaskType},
|
||||
};
|
||||
use crate::types::ProverType;
|
||||
use anyhow::{bail, Context, Ok, Result};
|
||||
use async_trait::async_trait;
|
||||
use once_cell::sync::Lazy;
|
||||
use scroll_proving_sdk::prover::{proving_service::ProveRequest, CircuitType};
|
||||
use serde::Deserialize;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::types::{CommonHash, Task};
|
||||
use std::{cell::RefCell, cmp::Ordering, env, rc::Rc};
|
||||
use crate::types::CommonHash;
|
||||
use std::env;
|
||||
|
||||
use prover_darwin::{
|
||||
aggregator::Prover as BatchProver,
|
||||
@@ -37,16 +37,10 @@ pub struct ChunkTaskDetail {
|
||||
pub block_hashes: Vec<CommonHash>,
|
||||
}
|
||||
|
||||
fn get_block_number(block_trace: &BlockTrace) -> Option<u64> {
|
||||
block_trace.header.number.map(|n| n.as_u64())
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct DarwinHandler {
|
||||
chunk_prover: Option<RefCell<ChunkProver<'static>>>,
|
||||
batch_prover: Option<RefCell<BatchProver<'static>>>,
|
||||
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
chunk_prover: Option<RwLock<ChunkProver<'static>>>,
|
||||
batch_prover: Option<RwLock<BatchProver<'static>>>,
|
||||
}
|
||||
|
||||
impl DarwinHandler {
|
||||
@@ -54,7 +48,6 @@ impl DarwinHandler {
|
||||
prover_types: Vec<ProverType>,
|
||||
params_dir: &str,
|
||||
assets_dir: &str,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Result<Self> {
|
||||
let class_name = std::intrinsics::type_name::<Self>();
|
||||
let prover_types_set = prover_types
|
||||
@@ -63,7 +56,6 @@ impl DarwinHandler {
|
||||
let mut handler = Self {
|
||||
batch_prover: None,
|
||||
chunk_prover: None,
|
||||
geth_client,
|
||||
};
|
||||
let degrees: Vec<u32> = get_degrees(&prover_types_set, |prover_type| match prover_type {
|
||||
ProverType::Chunk => ZKEVM_DEGREES.clone(),
|
||||
@@ -81,12 +73,12 @@ impl DarwinHandler {
|
||||
for prover_type in prover_types_set {
|
||||
match prover_type {
|
||||
ProverType::Chunk => {
|
||||
handler.chunk_prover = Some(RefCell::new(ChunkProver::from_params_and_assets(
|
||||
handler.chunk_prover = Some(RwLock::new(ChunkProver::from_params_and_assets(
|
||||
params_map, assets_dir,
|
||||
)));
|
||||
}
|
||||
ProverType::Batch => {
|
||||
handler.batch_prover = Some(RefCell::new(BatchProver::from_params_and_assets(
|
||||
handler.batch_prover = Some(RwLock::new(BatchProver::from_params_and_assets(
|
||||
params_map, assets_dir,
|
||||
)))
|
||||
}
|
||||
@@ -95,22 +87,18 @@ impl DarwinHandler {
|
||||
Ok(handler)
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
prover_type: ProverType,
|
||||
params_dir: &str,
|
||||
assets_dir: &str,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Result<Self> {
|
||||
Self::new_multi(vec![prover_type], params_dir, assets_dir, geth_client)
|
||||
pub fn new(prover_types: Vec<ProverType>, params_dir: &str, assets_dir: &str) -> Result<Self> {
|
||||
Self::new_multi(prover_types, params_dir, assets_dir)
|
||||
}
|
||||
|
||||
fn gen_chunk_proof_raw(&self, chunk_trace: Vec<BlockTrace>) -> Result<ChunkProof> {
|
||||
async fn gen_chunk_proof_raw(&self, chunk_trace: Vec<BlockTrace>) -> Result<ChunkProof> {
|
||||
if let Some(prover) = self.chunk_prover.as_ref() {
|
||||
let chunk = ChunkProvingTask::from(chunk_trace);
|
||||
|
||||
let chunk_proof =
|
||||
prover
|
||||
.borrow_mut()
|
||||
.write()
|
||||
.await
|
||||
.gen_chunk_proof(chunk, None, None, self.get_output_dir())?;
|
||||
|
||||
return Ok(chunk_proof);
|
||||
@@ -118,13 +106,13 @@ impl DarwinHandler {
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
fn gen_chunk_proof(&self, task: &crate::types::Task) -> Result<String> {
|
||||
let chunk_trace = self.gen_chunk_traces(task)?;
|
||||
let chunk_proof = self.gen_chunk_proof_raw(chunk_trace)?;
|
||||
async fn gen_chunk_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let chunk_traces: Vec<BlockTrace> = serde_json::from_str(&prove_request.input)?;
|
||||
let chunk_proof = self.gen_chunk_proof_raw(chunk_traces).await?;
|
||||
Ok(serde_json::to_string(&chunk_proof)?)
|
||||
}
|
||||
|
||||
fn gen_batch_proof_raw(&self, batch_task_detail: BatchTaskDetail) -> Result<BatchProof> {
|
||||
async fn gen_batch_proof_raw(&self, batch_task_detail: BatchTaskDetail) -> Result<BatchProof> {
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let chunk_hashes_proofs: Vec<(ChunkInfo, ChunkProof)> = batch_task_detail
|
||||
.chunk_infos
|
||||
@@ -136,13 +124,13 @@ impl DarwinHandler {
|
||||
let chunk_proofs: Vec<ChunkProof> =
|
||||
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
|
||||
|
||||
let is_valid = prover.borrow_mut().check_protocol_of_chunks(&chunk_proofs);
|
||||
let is_valid = prover.read().await.check_protocol_of_chunks(&chunk_proofs);
|
||||
|
||||
if !is_valid {
|
||||
bail!("non-match chunk protocol")
|
||||
}
|
||||
check_chunk_hashes("", &chunk_hashes_proofs).context("failed to check chunk info")?;
|
||||
let batch_proof = prover.borrow_mut().gen_batch_proof(
|
||||
let batch_proof = prover.write().await.gen_batch_proof(
|
||||
batch_task_detail.batch_proving_task,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
@@ -153,17 +141,18 @@ impl DarwinHandler {
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
fn gen_batch_proof(&self, task: &crate::types::Task) -> Result<String> {
|
||||
log::info!("[circuit] gen_batch_proof for task {}", task.id);
|
||||
|
||||
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&task.task_data)?;
|
||||
let batch_proof = self.gen_batch_proof_raw(batch_task_detail)?;
|
||||
async fn gen_batch_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&prove_request.input)?;
|
||||
let batch_proof = self.gen_batch_proof_raw(batch_task_detail).await?;
|
||||
Ok(serde_json::to_string(&batch_proof)?)
|
||||
}
|
||||
|
||||
fn gen_bundle_proof_raw(&self, bundle_task_detail: BundleTaskDetail) -> Result<BundleProof> {
|
||||
async fn gen_bundle_proof_raw(
|
||||
&self,
|
||||
bundle_task_detail: BundleTaskDetail,
|
||||
) -> Result<BundleProof> {
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let bundle_proof = prover.borrow_mut().gen_bundle_proof(
|
||||
let bundle_proof = prover.write().await.gen_bundle_proof(
|
||||
bundle_task_detail,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
@@ -174,100 +163,45 @@ impl DarwinHandler {
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
fn gen_bundle_proof(&self, task: &crate::types::Task) -> Result<String> {
|
||||
log::info!("[circuit] gen_bundle_proof for task {}", task.id);
|
||||
let bundle_task_detail: BundleTaskDetail = serde_json::from_str(&task.task_data)?;
|
||||
let bundle_proof = self.gen_bundle_proof_raw(bundle_task_detail)?;
|
||||
async fn gen_bundle_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let bundle_task_detail: BundleTaskDetail = serde_json::from_str(&prove_request.input)?;
|
||||
let bundle_proof = self.gen_bundle_proof_raw(bundle_task_detail).await?;
|
||||
Ok(serde_json::to_string(&bundle_proof)?)
|
||||
}
|
||||
|
||||
fn get_output_dir(&self) -> Option<&str> {
|
||||
OUTPUT_DIR.as_deref()
|
||||
}
|
||||
|
||||
fn gen_chunk_traces(&self, task: &Task) -> Result<Vec<BlockTrace>> {
|
||||
let chunk_task_detail: ChunkTaskDetail = serde_json::from_str(&task.task_data)?;
|
||||
self.get_sorted_traces_by_hashes(&chunk_task_detail.block_hashes)
|
||||
}
|
||||
|
||||
fn get_sorted_traces_by_hashes(&self, block_hashes: &[CommonHash]) -> Result<Vec<BlockTrace>> {
|
||||
if block_hashes.is_empty() {
|
||||
log::error!("[prover] failed to get sorted traces: block_hashes are empty");
|
||||
bail!("block_hashes are empty")
|
||||
}
|
||||
|
||||
let mut block_traces = Vec::new();
|
||||
for hash in block_hashes.iter() {
|
||||
let trace = self
|
||||
.geth_client
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.borrow_mut()
|
||||
.get_block_trace_by_hash(hash)?;
|
||||
block_traces.push(trace);
|
||||
}
|
||||
|
||||
block_traces.sort_by(|a, b| {
|
||||
if get_block_number(a).is_none() {
|
||||
Ordering::Less
|
||||
} else if get_block_number(b).is_none() {
|
||||
Ordering::Greater
|
||||
} else {
|
||||
get_block_number(a)
|
||||
.unwrap()
|
||||
.cmp(&get_block_number(b).unwrap())
|
||||
}
|
||||
});
|
||||
|
||||
let block_numbers: Vec<u64> = block_traces
|
||||
.iter()
|
||||
.map(|trace| get_block_number(trace).unwrap_or(0))
|
||||
.collect();
|
||||
let mut i = 0;
|
||||
while i < block_numbers.len() - 1 {
|
||||
if block_numbers[i] + 1 != block_numbers[i + 1] {
|
||||
log::error!(
|
||||
"[prover] block numbers are not continuous, got {} and {}",
|
||||
block_numbers[i],
|
||||
block_numbers[i + 1]
|
||||
);
|
||||
bail!(
|
||||
"block numbers are not continuous, got {} and {}",
|
||||
block_numbers[i],
|
||||
block_numbers[i + 1]
|
||||
)
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
Ok(block_traces)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CircuitsHandler for DarwinHandler {
|
||||
fn get_vk(&self, task_type: TaskType) -> Option<Vec<u8>> {
|
||||
async fn get_vk(&self, task_type: CircuitType) -> Option<Vec<u8>> {
|
||||
match task_type {
|
||||
TaskType::Chunk => self
|
||||
.chunk_prover
|
||||
.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_vk()),
|
||||
TaskType::Batch => self
|
||||
CircuitType::Chunk => self.chunk_prover.as_ref().unwrap().read().await.get_vk(),
|
||||
CircuitType::Batch => self
|
||||
.batch_prover
|
||||
.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_batch_vk()),
|
||||
TaskType::Bundle => self
|
||||
.unwrap()
|
||||
.read()
|
||||
.await
|
||||
.get_batch_vk(),
|
||||
CircuitType::Bundle => self
|
||||
.batch_prover
|
||||
.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_bundle_vk()),
|
||||
.unwrap()
|
||||
.read()
|
||||
.await
|
||||
.get_bundle_vk(),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_proof_data(&self, task_type: TaskType, task: &crate::types::Task) -> Result<String> {
|
||||
match task_type {
|
||||
TaskType::Chunk => self.gen_chunk_proof(task),
|
||||
TaskType::Batch => self.gen_batch_proof(task),
|
||||
TaskType::Bundle => self.gen_bundle_proof(task),
|
||||
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
match prove_request.circuit_type {
|
||||
CircuitType::Chunk => self.gen_chunk_proof(prove_request).await,
|
||||
CircuitType::Batch => self.gen_batch_proof(prove_request).await,
|
||||
CircuitType::Bundle => self.gen_bundle_proof(prove_request).await,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
@@ -280,11 +214,12 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::zk_circuits_handler::utils::encode_vk;
|
||||
use prover_darwin::utils::chunk_trace_to_witness_block;
|
||||
use scroll_proving_sdk::utils::init_tracing;
|
||||
use std::{path::PathBuf, sync::LazyLock};
|
||||
|
||||
#[ctor::ctor]
|
||||
fn init() {
|
||||
crate::utils::log_init(None);
|
||||
init_tracing();
|
||||
log::info!("logger initialized");
|
||||
}
|
||||
|
||||
@@ -312,19 +247,18 @@ mod tests {
|
||||
assert!(result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_circuits() -> Result<()> {
|
||||
#[tokio::test]
|
||||
async fn test_circuits() -> Result<()> {
|
||||
let bi_handler = DarwinHandler::new_multi(
|
||||
vec![ProverType::Chunk, ProverType::Batch],
|
||||
&PARAMS_PATH,
|
||||
&ASSETS_PATH,
|
||||
None,
|
||||
)?;
|
||||
|
||||
let chunk_handler = bi_handler;
|
||||
let chunk_vk = chunk_handler.get_vk(TaskType::Chunk).unwrap();
|
||||
let chunk_vk = chunk_handler.get_vk(CircuitType::Chunk).await.unwrap();
|
||||
|
||||
check_vk(TaskType::Chunk, chunk_vk, "chunk vk must be available");
|
||||
check_vk(CircuitType::Chunk, chunk_vk, "chunk vk must be available");
|
||||
let chunk_dir_paths = get_chunk_dir_paths()?;
|
||||
log::info!("chunk_dir_paths, {:?}", chunk_dir_paths);
|
||||
let mut chunk_infos = vec![];
|
||||
@@ -338,18 +272,18 @@ mod tests {
|
||||
chunk_infos.push(chunk_info);
|
||||
|
||||
log::info!("start to prove {chunk_id}");
|
||||
let chunk_proof = chunk_handler.gen_chunk_proof_raw(chunk_trace)?;
|
||||
let chunk_proof = chunk_handler.gen_chunk_proof_raw(chunk_trace).await?;
|
||||
let proof_data = serde_json::to_string(&chunk_proof)?;
|
||||
dump_proof(chunk_id, proof_data)?;
|
||||
chunk_proofs.push(chunk_proof);
|
||||
}
|
||||
|
||||
let batch_handler = chunk_handler;
|
||||
let batch_vk = batch_handler.get_vk(TaskType::Batch).unwrap();
|
||||
check_vk(TaskType::Batch, batch_vk, "batch vk must be available");
|
||||
let batch_vk = batch_handler.get_vk(CircuitType::Batch).await.unwrap();
|
||||
check_vk(CircuitType::Batch, batch_vk, "batch vk must be available");
|
||||
let batch_task_detail = make_batch_task_detail(chunk_infos, chunk_proofs);
|
||||
log::info!("start to prove batch");
|
||||
let batch_proof = batch_handler.gen_batch_proof_raw(batch_task_detail)?;
|
||||
let batch_proof = batch_handler.gen_batch_proof_raw(batch_task_detail).await?;
|
||||
let proof_data = serde_json::to_string(&batch_proof)?;
|
||||
dump_proof("batch_proof".to_string(), proof_data)?;
|
||||
|
||||
@@ -369,19 +303,19 @@ mod tests {
|
||||
// }
|
||||
}
|
||||
|
||||
fn check_vk(proof_type: TaskType, vk: Vec<u8>, info: &str) {
|
||||
fn check_vk(proof_type: CircuitType, vk: Vec<u8>, info: &str) {
|
||||
log::info!("check_vk, {:?}", proof_type);
|
||||
let vk_from_file = read_vk(proof_type).unwrap();
|
||||
assert_eq!(vk_from_file, encode_vk(vk), "{info}")
|
||||
}
|
||||
|
||||
fn read_vk(proof_type: TaskType) -> Result<String> {
|
||||
fn read_vk(proof_type: CircuitType) -> Result<String> {
|
||||
log::info!("read_vk, {:?}", proof_type);
|
||||
let vk_file = match proof_type {
|
||||
TaskType::Chunk => CHUNK_VK_PATH.clone(),
|
||||
TaskType::Batch => BATCH_VK_PATH.clone(),
|
||||
TaskType::Bundle => todo!(),
|
||||
TaskType::Undefined => unreachable!(),
|
||||
CircuitType::Chunk => CHUNK_VK_PATH.clone(),
|
||||
CircuitType::Batch => BATCH_VK_PATH.clone(),
|
||||
CircuitType::Bundle => todo!(),
|
||||
CircuitType::Undefined => unreachable!(),
|
||||
};
|
||||
|
||||
let data = std::fs::read(vk_file)?;
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use super::{common::*, CircuitsHandler};
|
||||
use crate::{
|
||||
geth_client::GethClient,
|
||||
types::{ProverType, TaskType},
|
||||
};
|
||||
use crate::types::ProverType;
|
||||
use anyhow::{bail, Context, Ok, Result};
|
||||
use async_trait::async_trait;
|
||||
use once_cell::sync::Lazy;
|
||||
use scroll_proving_sdk::prover::{proving_service::ProveRequest, CircuitType};
|
||||
use serde::Deserialize;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::types::{CommonHash, Task};
|
||||
use std::{cell::RefCell, cmp::Ordering, env, rc::Rc};
|
||||
use crate::types::CommonHash;
|
||||
use std::env;
|
||||
|
||||
use prover_darwin_v2::{
|
||||
aggregator::Prover as BatchProver,
|
||||
@@ -37,16 +37,10 @@ pub struct ChunkTaskDetail {
|
||||
pub block_hashes: Vec<CommonHash>,
|
||||
}
|
||||
|
||||
fn get_block_number(block_trace: &BlockTrace) -> Option<u64> {
|
||||
block_trace.header.number.map(|n| n.as_u64())
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct DarwinV2Handler {
|
||||
chunk_prover: Option<RefCell<ChunkProver<'static>>>,
|
||||
batch_prover: Option<RefCell<BatchProver<'static>>>,
|
||||
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
chunk_prover: Option<RwLock<ChunkProver<'static>>>,
|
||||
batch_prover: Option<RwLock<BatchProver<'static>>>,
|
||||
}
|
||||
|
||||
impl DarwinV2Handler {
|
||||
@@ -54,7 +48,6 @@ impl DarwinV2Handler {
|
||||
prover_types: Vec<ProverType>,
|
||||
params_dir: &str,
|
||||
assets_dir: &str,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Result<Self> {
|
||||
let class_name = std::intrinsics::type_name::<Self>();
|
||||
let prover_types_set = prover_types
|
||||
@@ -63,7 +56,6 @@ impl DarwinV2Handler {
|
||||
let mut handler = Self {
|
||||
batch_prover: None,
|
||||
chunk_prover: None,
|
||||
geth_client,
|
||||
};
|
||||
let degrees: Vec<u32> = get_degrees(&prover_types_set, |prover_type| match prover_type {
|
||||
ProverType::Chunk => ZKEVM_DEGREES.clone(),
|
||||
@@ -81,12 +73,12 @@ impl DarwinV2Handler {
|
||||
for prover_type in prover_types_set {
|
||||
match prover_type {
|
||||
ProverType::Chunk => {
|
||||
handler.chunk_prover = Some(RefCell::new(ChunkProver::from_params_and_assets(
|
||||
handler.chunk_prover = Some(RwLock::new(ChunkProver::from_params_and_assets(
|
||||
params_map, assets_dir,
|
||||
)));
|
||||
}
|
||||
ProverType::Batch => {
|
||||
handler.batch_prover = Some(RefCell::new(BatchProver::from_params_and_assets(
|
||||
handler.batch_prover = Some(RwLock::new(BatchProver::from_params_and_assets(
|
||||
params_map, assets_dir,
|
||||
)))
|
||||
}
|
||||
@@ -95,22 +87,18 @@ impl DarwinV2Handler {
|
||||
Ok(handler)
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
prover_type: ProverType,
|
||||
params_dir: &str,
|
||||
assets_dir: &str,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Result<Self> {
|
||||
Self::new_multi(vec![prover_type], params_dir, assets_dir, geth_client)
|
||||
pub fn new(prover_types: Vec<ProverType>, params_dir: &str, assets_dir: &str) -> Result<Self> {
|
||||
Self::new_multi(prover_types, params_dir, assets_dir)
|
||||
}
|
||||
|
||||
fn gen_chunk_proof_raw(&self, chunk_trace: Vec<BlockTrace>) -> Result<ChunkProof> {
|
||||
async fn gen_chunk_proof_raw(&self, chunk_trace: Vec<BlockTrace>) -> Result<ChunkProof> {
|
||||
if let Some(prover) = self.chunk_prover.as_ref() {
|
||||
let chunk = ChunkProvingTask::from(chunk_trace);
|
||||
|
||||
let chunk_proof =
|
||||
prover
|
||||
.borrow_mut()
|
||||
.write()
|
||||
.await
|
||||
.gen_chunk_proof(chunk, None, None, self.get_output_dir())?;
|
||||
|
||||
return Ok(chunk_proof);
|
||||
@@ -118,13 +106,13 @@ impl DarwinV2Handler {
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
fn gen_chunk_proof(&self, task: &crate::types::Task) -> Result<String> {
|
||||
let chunk_trace = self.gen_chunk_traces(task)?;
|
||||
let chunk_proof = self.gen_chunk_proof_raw(chunk_trace)?;
|
||||
async fn gen_chunk_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let chunk_traces: Vec<BlockTrace> = serde_json::from_str(&prove_request.input)?;
|
||||
let chunk_proof = self.gen_chunk_proof_raw(chunk_traces).await?;
|
||||
Ok(serde_json::to_string(&chunk_proof)?)
|
||||
}
|
||||
|
||||
fn gen_batch_proof_raw(&self, batch_task_detail: BatchTaskDetail) -> Result<BatchProof> {
|
||||
async fn gen_batch_proof_raw(&self, batch_task_detail: BatchTaskDetail) -> Result<BatchProof> {
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let chunk_hashes_proofs: Vec<(ChunkInfo, ChunkProof)> = batch_task_detail
|
||||
.chunk_infos
|
||||
@@ -136,13 +124,13 @@ impl DarwinV2Handler {
|
||||
let chunk_proofs: Vec<ChunkProof> =
|
||||
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
|
||||
|
||||
let is_valid = prover.borrow_mut().check_protocol_of_chunks(&chunk_proofs);
|
||||
let is_valid = prover.write().await.check_protocol_of_chunks(&chunk_proofs);
|
||||
|
||||
if !is_valid {
|
||||
bail!("non-match chunk protocol")
|
||||
}
|
||||
check_chunk_hashes("", &chunk_hashes_proofs).context("failed to check chunk info")?;
|
||||
let batch_proof = prover.borrow_mut().gen_batch_proof(
|
||||
let batch_proof = prover.write().await.gen_batch_proof(
|
||||
batch_task_detail.batch_proving_task,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
@@ -153,17 +141,18 @@ impl DarwinV2Handler {
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
fn gen_batch_proof(&self, task: &crate::types::Task) -> Result<String> {
|
||||
log::info!("[circuit] gen_batch_proof for task {}", task.id);
|
||||
|
||||
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&task.task_data)?;
|
||||
let batch_proof = self.gen_batch_proof_raw(batch_task_detail)?;
|
||||
async fn gen_batch_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&prove_request.input)?;
|
||||
let batch_proof = self.gen_batch_proof_raw(batch_task_detail).await?;
|
||||
Ok(serde_json::to_string(&batch_proof)?)
|
||||
}
|
||||
|
||||
fn gen_bundle_proof_raw(&self, bundle_task_detail: BundleTaskDetail) -> Result<BundleProof> {
|
||||
async fn gen_bundle_proof_raw(
|
||||
&self,
|
||||
bundle_task_detail: BundleTaskDetail,
|
||||
) -> Result<BundleProof> {
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let bundle_proof = prover.borrow_mut().gen_bundle_proof(
|
||||
let bundle_proof = prover.write().await.gen_bundle_proof(
|
||||
bundle_task_detail,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
@@ -174,100 +163,45 @@ impl DarwinV2Handler {
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
fn gen_bundle_proof(&self, task: &crate::types::Task) -> Result<String> {
|
||||
log::info!("[circuit] gen_bundle_proof for task {}", task.id);
|
||||
let bundle_task_detail: BundleTaskDetail = serde_json::from_str(&task.task_data)?;
|
||||
let bundle_proof = self.gen_bundle_proof_raw(bundle_task_detail)?;
|
||||
async fn gen_bundle_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let bundle_task_detail: BundleTaskDetail = serde_json::from_str(&prove_request.input)?;
|
||||
let bundle_proof = self.gen_bundle_proof_raw(bundle_task_detail).await?;
|
||||
Ok(serde_json::to_string(&bundle_proof)?)
|
||||
}
|
||||
|
||||
fn get_output_dir(&self) -> Option<&str> {
|
||||
OUTPUT_DIR.as_deref()
|
||||
}
|
||||
|
||||
fn gen_chunk_traces(&self, task: &Task) -> Result<Vec<BlockTrace>> {
|
||||
let chunk_task_detail: ChunkTaskDetail = serde_json::from_str(&task.task_data)?;
|
||||
self.get_sorted_traces_by_hashes(&chunk_task_detail.block_hashes)
|
||||
}
|
||||
|
||||
fn get_sorted_traces_by_hashes(&self, block_hashes: &[CommonHash]) -> Result<Vec<BlockTrace>> {
|
||||
if block_hashes.is_empty() {
|
||||
log::error!("[prover] failed to get sorted traces: block_hashes are empty");
|
||||
bail!("block_hashes are empty")
|
||||
}
|
||||
|
||||
let mut block_traces = Vec::new();
|
||||
for hash in block_hashes.iter() {
|
||||
let trace = self
|
||||
.geth_client
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.borrow_mut()
|
||||
.get_block_trace_by_hash(hash)?;
|
||||
block_traces.push(trace);
|
||||
}
|
||||
|
||||
block_traces.sort_by(|a, b| {
|
||||
if get_block_number(a).is_none() {
|
||||
Ordering::Less
|
||||
} else if get_block_number(b).is_none() {
|
||||
Ordering::Greater
|
||||
} else {
|
||||
get_block_number(a)
|
||||
.unwrap()
|
||||
.cmp(&get_block_number(b).unwrap())
|
||||
}
|
||||
});
|
||||
|
||||
let block_numbers: Vec<u64> = block_traces
|
||||
.iter()
|
||||
.map(|trace| get_block_number(trace).unwrap_or(0))
|
||||
.collect();
|
||||
let mut i = 0;
|
||||
while i < block_numbers.len() - 1 {
|
||||
if block_numbers[i] + 1 != block_numbers[i + 1] {
|
||||
log::error!(
|
||||
"[prover] block numbers are not continuous, got {} and {}",
|
||||
block_numbers[i],
|
||||
block_numbers[i + 1]
|
||||
);
|
||||
bail!(
|
||||
"block numbers are not continuous, got {} and {}",
|
||||
block_numbers[i],
|
||||
block_numbers[i + 1]
|
||||
)
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
Ok(block_traces)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CircuitsHandler for DarwinV2Handler {
|
||||
fn get_vk(&self, task_type: TaskType) -> Option<Vec<u8>> {
|
||||
async fn get_vk(&self, task_type: CircuitType) -> Option<Vec<u8>> {
|
||||
match task_type {
|
||||
TaskType::Chunk => self
|
||||
.chunk_prover
|
||||
.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_vk()),
|
||||
TaskType::Batch => self
|
||||
CircuitType::Chunk => self.chunk_prover.as_ref().unwrap().read().await.get_vk(),
|
||||
CircuitType::Batch => self
|
||||
.batch_prover
|
||||
.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_batch_vk()),
|
||||
TaskType::Bundle => self
|
||||
.unwrap()
|
||||
.read()
|
||||
.await
|
||||
.get_batch_vk(),
|
||||
CircuitType::Bundle => self
|
||||
.batch_prover
|
||||
.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_bundle_vk()),
|
||||
.unwrap()
|
||||
.read()
|
||||
.await
|
||||
.get_bundle_vk(),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_proof_data(&self, task_type: TaskType, task: &crate::types::Task) -> Result<String> {
|
||||
match task_type {
|
||||
TaskType::Chunk => self.gen_chunk_proof(task),
|
||||
TaskType::Batch => self.gen_batch_proof(task),
|
||||
TaskType::Bundle => self.gen_bundle_proof(task),
|
||||
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
match prove_request.circuit_type {
|
||||
CircuitType::Chunk => self.gen_chunk_proof(prove_request).await,
|
||||
CircuitType::Batch => self.gen_batch_proof(prove_request).await,
|
||||
CircuitType::Bundle => self.gen_bundle_proof(prove_request).await,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
@@ -284,11 +218,12 @@ mod tests {
|
||||
aggregator::eip4844, utils::chunk_trace_to_witness_block, BatchData, BatchHeader,
|
||||
MAX_AGG_SNARKS,
|
||||
};
|
||||
use scroll_proving_sdk::utils::init_tracing;
|
||||
use std::{path::PathBuf, sync::LazyLock};
|
||||
|
||||
#[ctor::ctor]
|
||||
fn init() {
|
||||
crate::utils::log_init(None);
|
||||
init_tracing();
|
||||
log::info!("logger initialized");
|
||||
}
|
||||
|
||||
@@ -316,19 +251,18 @@ mod tests {
|
||||
assert!(result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_circuits() -> Result<()> {
|
||||
#[tokio::test]
|
||||
async fn test_circuits() -> Result<()> {
|
||||
let bi_handler = DarwinV2Handler::new_multi(
|
||||
vec![ProverType::Chunk, ProverType::Batch],
|
||||
&PARAMS_PATH,
|
||||
&ASSETS_PATH,
|
||||
None,
|
||||
)?;
|
||||
|
||||
let chunk_handler = bi_handler;
|
||||
let chunk_vk = chunk_handler.get_vk(TaskType::Chunk).unwrap();
|
||||
let chunk_vk = chunk_handler.get_vk(CircuitType::Chunk).await.unwrap();
|
||||
|
||||
check_vk(TaskType::Chunk, chunk_vk, "chunk vk must be available");
|
||||
check_vk(CircuitType::Chunk, chunk_vk, "chunk vk must be available");
|
||||
let chunk_dir_paths = get_chunk_dir_paths()?;
|
||||
log::info!("chunk_dir_paths, {:?}", chunk_dir_paths);
|
||||
let mut chunk_traces = vec![];
|
||||
@@ -343,18 +277,18 @@ mod tests {
|
||||
chunk_infos.push(chunk_info);
|
||||
|
||||
log::info!("start to prove {chunk_id}");
|
||||
let chunk_proof = chunk_handler.gen_chunk_proof_raw(chunk_trace)?;
|
||||
let chunk_proof = chunk_handler.gen_chunk_proof_raw(chunk_trace).await?;
|
||||
let proof_data = serde_json::to_string(&chunk_proof)?;
|
||||
dump_proof(chunk_id, proof_data)?;
|
||||
chunk_proofs.push(chunk_proof);
|
||||
}
|
||||
|
||||
let batch_handler = chunk_handler;
|
||||
let batch_vk = batch_handler.get_vk(TaskType::Batch).unwrap();
|
||||
check_vk(TaskType::Batch, batch_vk, "batch vk must be available");
|
||||
let batch_vk = batch_handler.get_vk(CircuitType::Batch).await.unwrap();
|
||||
check_vk(CircuitType::Batch, batch_vk, "batch vk must be available");
|
||||
let batch_task_detail = make_batch_task_detail(chunk_traces, chunk_proofs, None);
|
||||
log::info!("start to prove batch");
|
||||
let batch_proof = batch_handler.gen_batch_proof_raw(batch_task_detail)?;
|
||||
let batch_proof = batch_handler.gen_batch_proof_raw(batch_task_detail).await?;
|
||||
let proof_data = serde_json::to_string(&batch_proof)?;
|
||||
dump_proof("batch_proof".to_string(), proof_data)?;
|
||||
|
||||
@@ -427,19 +361,19 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn check_vk(proof_type: TaskType, vk: Vec<u8>, info: &str) {
|
||||
fn check_vk(proof_type: CircuitType, vk: Vec<u8>, info: &str) {
|
||||
log::info!("check_vk, {:?}", proof_type);
|
||||
let vk_from_file = read_vk(proof_type).unwrap();
|
||||
assert_eq!(vk_from_file, encode_vk(vk), "{info}")
|
||||
}
|
||||
|
||||
fn read_vk(proof_type: TaskType) -> Result<String> {
|
||||
fn read_vk(proof_type: CircuitType) -> Result<String> {
|
||||
log::info!("read_vk, {:?}", proof_type);
|
||||
let vk_file = match proof_type {
|
||||
TaskType::Chunk => CHUNK_VK_PATH.clone(),
|
||||
TaskType::Batch => BATCH_VK_PATH.clone(),
|
||||
TaskType::Bundle => todo!(),
|
||||
TaskType::Undefined => unreachable!(),
|
||||
CircuitType::Chunk => CHUNK_VK_PATH.clone(),
|
||||
CircuitType::Batch => BATCH_VK_PATH.clone(),
|
||||
CircuitType::Bundle => todo!(),
|
||||
CircuitType::Undefined => unreachable!(),
|
||||
};
|
||||
|
||||
let data = std::fs::read(vk_file)?;
|
||||
|
||||
@@ -34,5 +34,5 @@ var L2GasPriceOracleMetaData = &bind.MetaData{
|
||||
|
||||
// L1GasPriceOracleMetaData contains all meta data concerning the L1GasPriceOracle contract.
|
||||
var L1GasPriceOracleMetaData = &bind.MetaData{
|
||||
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"BlobScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"CommitScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BlobBaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BlobBaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"overhead\",\"type\":\"uint256\"}],\"name\":\"OverheadUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"ScalarUpdated\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"blobScalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"commitScalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"getL1Fee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"getL1GasUsed\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BlobBaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"overhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_l1BlobBaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFeeAndBlobBaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
|
||||
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"BlobScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"CommitScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BlobBaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BlobBaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"overhead\",\"type\":\"uint256\"}],\"name\":\"OverheadUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"ScalarUpdated\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"blobScalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"commitScalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"getL1Fee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"getL1GasUsed\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BlobBaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"overhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_l1BlobBaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFeeAndBlobBaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
|
||||
}
|
||||
|
||||
@@ -52,14 +52,15 @@ func TestPackImportGenesisBatch(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackSetL1BaseFee(t *testing.T) {
|
||||
func TestPackSetL1BaseFeeAndBlobBaseFee(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l1GasOracleABI, err := L1GasPriceOracleMetaData.GetAbi()
|
||||
assert.NoError(err)
|
||||
|
||||
baseFee := big.NewInt(2333)
|
||||
_, err = l1GasOracleABI.Pack("setL1BaseFee", baseFee)
|
||||
blobBaseFee := big.NewInt(1)
|
||||
_, err = l1GasOracleABI.Pack("setL1BaseFeeAndBlobBaseFee", baseFee, blobBaseFee)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/relayer"
|
||||
"scroll-tech/rollup/internal/controller/watcher"
|
||||
butils "scroll-tech/rollup/internal/utils"
|
||||
rutils "scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
@@ -78,15 +78,9 @@ func action(ctx *cli.Context) error {
|
||||
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
genesisPath := ctx.String(utils.Genesis.Name)
|
||||
genesis, err := utils.ReadGenesis(genesisPath)
|
||||
if err != nil {
|
||||
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
|
||||
}
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, db, registry)
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, genesis.Config, relayer.ServiceTypeL1GasOracle, registry)
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, relayer.ServiceTypeL1GasOracle, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
@@ -98,7 +92,7 @@ func action(ctx *cli.Context) error {
|
||||
go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
|
||||
// Fetch the latest block number to decrease the delay when fetching gas prices
|
||||
// Use latest block number - 1 to prevent frequent reorg
|
||||
number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l1client, rpc.LatestBlockNumber)
|
||||
number, loopErr := rutils.GetLatestConfirmedBlockNumber(ctx, l1client, rpc.LatestBlockNumber)
|
||||
if loopErr != nil {
|
||||
log.Error("failed to get block number", "err", loopErr)
|
||||
return
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -20,7 +21,7 @@ import (
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/relayer"
|
||||
"scroll-tech/rollup/internal/controller/watcher"
|
||||
butils "scroll-tech/rollup/internal/utils"
|
||||
rutils "scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
@@ -84,15 +85,16 @@ func action(ctx *cli.Context) error {
|
||||
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, genesis.Config, db, registry)
|
||||
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, genesis.Config, db, registry)
|
||||
bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, genesis.Config, db, registry)
|
||||
minCodecVersion := encoding.CodecVersion(ctx.Uint(utils.MinCodecVersionFlag.Name))
|
||||
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, genesis.Config, db, registry)
|
||||
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, registry)
|
||||
bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, minCodecVersion, genesis.Config, db, registry)
|
||||
|
||||
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
|
||||
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, registry)
|
||||
|
||||
// Watcher loop to fetch missing blocks
|
||||
go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
|
||||
number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
|
||||
number, loopErr := rutils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
|
||||
if loopErr != nil {
|
||||
log.Error("failed to get block number", "err", loopErr)
|
||||
return
|
||||
@@ -108,8 +110,6 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches)
|
||||
|
||||
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)
|
||||
|
||||
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessPendingBundles)
|
||||
|
||||
// Finish start all rollup relayer functions.
|
||||
|
||||
@@ -18,11 +18,10 @@
|
||||
"gas_oracle_config": {
|
||||
"min_gas_price": 0,
|
||||
"gas_price_diff": 50000,
|
||||
"l1_base_fee_weight": 0.132,
|
||||
"l1_blob_base_fee_weight": 0.145,
|
||||
"check_committed_batches_window_minutes": 5,
|
||||
"l1_base_fee_default": 15000000000,
|
||||
"l1_blob_base_fee_default": 1
|
||||
"l1_blob_base_fee_default": 1,
|
||||
"l1_blob_base_fee_threshold": 0
|
||||
},
|
||||
"gas_oracle_sender_signer_config": {
|
||||
"signer_type": "PrivateKey",
|
||||
|
||||
@@ -3,7 +3,7 @@ module scroll-tech/rollup
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.11.0
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0
|
||||
github.com/consensys/gnark-crypto v0.12.1
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
@@ -11,8 +11,8 @@ require (
|
||||
github.com/holiman/uint256 v1.2.4
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/da-codec v0.1.1-0.20240819100936-c6af3bbe7068
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821075135-bdd1b005d40f
|
||||
github.com/scroll-tech/da-codec v0.1.2
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241023093931-91c2f9c27f4d
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/spf13/viper v1.19.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
@@ -21,41 +21,51 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
|
||||
github.com/DataDog/zstd v1.4.5 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
||||
github.com/bytedance/sonic v1.10.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/cockroachdb/errors v1.11.1 // indirect
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||
github.com/cockroachdb/pebble v1.1.0 // indirect
|
||||
github.com/cockroachdb/redact v1.1.5 // indirect
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect
|
||||
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/getsentry/sentry-go v0.18.0 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-kit/kit v0.9.0 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.15.5 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 // indirect
|
||||
@@ -64,7 +74,10 @@ require (
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
@@ -84,9 +97,8 @@ require (
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/prometheus/tsdb v0.7.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
@@ -114,15 +126,16 @@ require (
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
)
|
||||
|
||||
127
rollup/go.sum
127
rollup/go.sum
@@ -1,20 +1,24 @@
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
|
||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
|
||||
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.11.0 h1:5oxSgA+tC1xuGsrIorR+sYiziYltmJyEZ9qA25b6l5U=
|
||||
github.com/agiledragon/gomonkey/v2 v2.11.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
@@ -28,7 +32,6 @@ github.com/bytedance/sonic v1.10.1 h1:7a1wuFXL1cMy7a3f7/VFcEtriuXQnUBhtoVfOZiays
|
||||
github.com/bytedance/sonic v1.10.1/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4=
|
||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||
@@ -37,6 +40,18 @@ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
|
||||
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
|
||||
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
|
||||
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4=
|
||||
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
|
||||
github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=
|
||||
github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/pebble v1.1.0 h1:pcFh8CdCIt2kmEpK0OIatq67Ln9uGDYY3d5XnE0LJG4=
|
||||
github.com/cockroachdb/pebble v1.1.0/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E=
|
||||
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
|
||||
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
|
||||
@@ -45,18 +60,20 @@ github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0q
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI=
|
||||
github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
@@ -69,16 +86,14 @@ github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
|
||||
github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
|
||||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
@@ -92,13 +107,17 @@ github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7N
|
||||
github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
|
||||
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
@@ -129,10 +148,10 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw=
|
||||
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
@@ -154,16 +173,21 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
@@ -178,7 +202,6 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
@@ -197,7 +220,6 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
@@ -213,31 +235,26 @@ github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754=
|
||||
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
||||
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
|
||||
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
@@ -248,10 +265,10 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke
|
||||
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||
github.com/scroll-tech/da-codec v0.1.1-0.20240819100936-c6af3bbe7068 h1:KyTp4aedcpjr/rbntrmlhUxjrDYu1Q02QDLaF5vqpxs=
|
||||
github.com/scroll-tech/da-codec v0.1.1-0.20240819100936-c6af3bbe7068/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821075135-bdd1b005d40f h1:0XhY20/Sh2UCroZqD4orK7eDElQD2XK4GLrTbPmUBpw=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821075135-bdd1b005d40f/go.mod h1:jLTGZ5iL5T7g1BEWrQXVIR+wutJFDTVs/mCfjAlrhrA=
|
||||
github.com/scroll-tech/da-codec v0.1.2 h1:QyJ+dQ4zWVVJwuqxNt4MiKyrymVc6rHe4YPtURkjiRc=
|
||||
github.com/scroll-tech/da-codec v0.1.2/go.mod h1:odz1ck3umvYccCG03osaQBISAYGinZktZYbpk94fYRE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241023093931-91c2f9c27f4d h1:vuv7fGKEDtoeetI6RkKt8RAByJsYZBWk9Vo6gShv65c=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241023093931-91c2f9c27f4d/go.mod h1:PWEOTg6LeWlJAlFJauO0msSLXWnpHmE+mVh5txtfeRM=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
@@ -264,7 +281,6 @@ github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL
|
||||
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
||||
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
|
||||
@@ -279,7 +295,6 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
@@ -310,6 +325,7 @@ github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
@@ -328,27 +344,30 @@ golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -366,6 +385,7 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
@@ -380,7 +400,11 @@ golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -395,19 +419,16 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
||||
@@ -68,22 +68,30 @@ type RelayerConfig struct {
|
||||
FinalizeBundleWithoutProofTimeoutSec uint64 `json:"finalize_bundle_without_proof_timeout_sec"`
|
||||
}
|
||||
|
||||
// AlternativeGasTokenConfig The configuration for handling token exchange rates when updating the gas price oracle.
|
||||
type AlternativeGasTokenConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
Mode string `json:"mode"`
|
||||
FixedExchangeRate float64 `json:"fixed_exchange_rate"` // fixed exchange rate of L2 gas token / L1 gas token
|
||||
TokenSymbolPair string `json:"token_symbol_pair"` // The pair should be L2 gas token symbol + L1 gas token symbol
|
||||
}
|
||||
|
||||
// GasOracleConfig The config for updating gas price oracle.
|
||||
type GasOracleConfig struct {
|
||||
// MinGasPrice store the minimum gas price to set.
|
||||
MinGasPrice uint64 `json:"min_gas_price"`
|
||||
// GasPriceDiff is the minimum percentage of gas price difference to update gas oracle.
|
||||
GasPriceDiff uint64 `json:"gas_price_diff"`
|
||||
// AlternativeGasTokenConfig The configuration for handling token exchange rates when updating the gas price oracle.
|
||||
AlternativeGasTokenConfig *AlternativeGasTokenConfig `json:"alternative_gas_token_config"`
|
||||
|
||||
// The following configs are only for updating L1 gas price, used for sender in L2.
|
||||
// The weight for L1 base fee.
|
||||
L1BaseFeeWeight float64 `json:"l1_base_fee_weight"`
|
||||
// The weight for L1 blob base fee.
|
||||
L1BlobBaseFeeWeight float64 `json:"l1_blob_base_fee_weight"`
|
||||
// CheckCommittedBatchesWindowMinutes the time frame to check if we committed batches to decide to update gas oracle or not in minutes
|
||||
CheckCommittedBatchesWindowMinutes int `json:"check_committed_batches_window_minutes"`
|
||||
L1BaseFeeDefault uint64 `json:"l1_base_fee_default"`
|
||||
L1BlobBaseFeeDefault uint64 `json:"l1_blob_base_fee_default"`
|
||||
|
||||
// L1BlobBaseFeeThreshold the threshold of L1 blob base fee to enter the default gas price mode
|
||||
L1BlobBaseFeeThreshold uint64 `json:"l1_blob_base_fee_threshold"`
|
||||
}
|
||||
|
||||
// SignerConfig - config of signer, contains type and config corresponding to type
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
@@ -21,6 +20,7 @@ import (
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/sender"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
rutils "scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
// Layer1Relayer is responsible for updating L1 gas price oracle contract on L2.
|
||||
@@ -29,18 +29,15 @@ import (
|
||||
type Layer1Relayer struct {
|
||||
ctx context.Context
|
||||
|
||||
cfg *config.RelayerConfig
|
||||
chainCfg *params.ChainConfig
|
||||
cfg *config.RelayerConfig
|
||||
|
||||
gasOracleSender *sender.Sender
|
||||
l1GasOracleABI *abi.ABI
|
||||
|
||||
lastBaseFee uint64
|
||||
lastBlobBaseFee uint64
|
||||
minGasPrice uint64
|
||||
gasPriceDiff uint64
|
||||
l1BaseFeeWeight float64
|
||||
l1BlobBaseFeeWeight float64
|
||||
lastBaseFee uint64
|
||||
lastBlobBaseFee uint64
|
||||
minGasPrice uint64
|
||||
gasPriceDiff uint64
|
||||
|
||||
l1BlockOrm *orm.L1Block
|
||||
l2BlockOrm *orm.L2Block
|
||||
@@ -50,7 +47,7 @@ type Layer1Relayer struct {
|
||||
}
|
||||
|
||||
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
|
||||
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig, serviceType ServiceType, reg prometheus.Registerer) (*Layer1Relayer, error) {
|
||||
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, serviceType ServiceType, reg prometheus.Registerer) (*Layer1Relayer, error) {
|
||||
var gasOracleSender *sender.Sender
|
||||
var err error
|
||||
|
||||
@@ -81,7 +78,6 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
|
||||
|
||||
l1Relayer := &Layer1Relayer{
|
||||
cfg: cfg,
|
||||
chainCfg: chainCfg,
|
||||
ctx: ctx,
|
||||
l1BlockOrm: orm.NewL1Block(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
@@ -90,10 +86,8 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
|
||||
gasOracleSender: gasOracleSender,
|
||||
l1GasOracleABI: bridgeAbi.L1GasPriceOracleABI,
|
||||
|
||||
minGasPrice: minGasPrice,
|
||||
gasPriceDiff: gasPriceDiff,
|
||||
l1BaseFeeWeight: cfg.GasOracleConfig.L1BaseFeeWeight,
|
||||
l1BlobBaseFeeWeight: cfg.GasOracleConfig.L1BlobBaseFeeWeight,
|
||||
minGasPrice: minGasPrice,
|
||||
gasPriceDiff: gasPriceDiff,
|
||||
}
|
||||
|
||||
l1Relayer.metrics = initL1RelayerMetrics(reg)
|
||||
@@ -131,32 +125,45 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
block := blocks[0]
|
||||
|
||||
if types.GasOracleStatus(block.GasOracleStatus) == types.GasOraclePending {
|
||||
latestL2Height, err := r.l2BlockOrm.GetL2BlocksLatestHeight(r.ctx)
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch latest L2 block height from db", "err", err)
|
||||
if block.BaseFee == 0 || block.BlobBaseFee == 0 {
|
||||
log.Error("Invalid base fee or blob base fee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", block.BaseFee, "block.BlobBaseFee", block.BlobBaseFee)
|
||||
return
|
||||
}
|
||||
|
||||
var isBernoulli = block.BlobBaseFee > 0 && r.chainCfg.IsBernoulli(new(big.Int).SetUint64(latestL2Height))
|
||||
var isCurie = block.BlobBaseFee > 0 && r.chainCfg.IsCurie(new(big.Int).SetUint64(latestL2Height))
|
||||
baseFee := block.BaseFee
|
||||
blobBaseFee := block.BlobBaseFee
|
||||
|
||||
var baseFee uint64
|
||||
var blobBaseFee uint64
|
||||
if isCurie {
|
||||
baseFee = block.BaseFee
|
||||
blobBaseFee = block.BlobBaseFee
|
||||
} else if isBernoulli {
|
||||
baseFee = uint64(math.Ceil(r.l1BaseFeeWeight*float64(block.BaseFee) + r.l1BlobBaseFeeWeight*float64(block.BlobBaseFee)))
|
||||
} else {
|
||||
baseFee = block.BaseFee
|
||||
// include the token exchange rate in the fee data if alternative gas token enabled
|
||||
if r.cfg.GasOracleConfig.AlternativeGasTokenConfig != nil && r.cfg.GasOracleConfig.AlternativeGasTokenConfig.Enabled {
|
||||
// The exchange rate represent the number of native token on L1 required to exchange for 1 native token on L2.
|
||||
var exchangeRate float64
|
||||
switch r.cfg.GasOracleConfig.AlternativeGasTokenConfig.Mode {
|
||||
case "Fixed":
|
||||
exchangeRate = r.cfg.GasOracleConfig.AlternativeGasTokenConfig.FixedExchangeRate
|
||||
case "BinanceApi":
|
||||
exchangeRate, err = rutils.GetExchangeRateFromBinanceApi(r.cfg.GasOracleConfig.AlternativeGasTokenConfig.TokenSymbolPair, 5)
|
||||
if err != nil {
|
||||
log.Error("Failed to get gas token exchange rate from Binance api", "tokenSymbolPair", r.cfg.GasOracleConfig.AlternativeGasTokenConfig.TokenSymbolPair, "err", err)
|
||||
return
|
||||
}
|
||||
default:
|
||||
log.Error("Invalid alternative gas token mode", "mode", r.cfg.GasOracleConfig.AlternativeGasTokenConfig.Mode)
|
||||
return
|
||||
}
|
||||
if exchangeRate == 0 {
|
||||
log.Error("Invalid exchange rate", "exchangeRate", exchangeRate)
|
||||
return
|
||||
}
|
||||
baseFee = uint64(math.Ceil(float64(baseFee) / exchangeRate))
|
||||
blobBaseFee = uint64(math.Ceil(float64(blobBaseFee) / exchangeRate))
|
||||
}
|
||||
|
||||
if r.shouldUpdateGasOracle(baseFee, blobBaseFee, isCurie) {
|
||||
if r.shouldUpdateGasOracle(baseFee, blobBaseFee) {
|
||||
// It indicates the committing batch has been stuck for a long time, it's likely that the L1 gas fee spiked.
|
||||
// If we are not committing batches due to high fees then we shouldn't update fees to prevent users from paying high l1_data_fee
|
||||
// Also, set fees to some default value, because we have already updated fees to some high values, probably
|
||||
var reachTimeout bool
|
||||
if reachTimeout, err = r.commitBatchReachTimeout(); reachTimeout && err == nil {
|
||||
if reachTimeout, err = r.commitBatchReachTimeout(); reachTimeout && block.BlobBaseFee > r.cfg.GasOracleConfig.L1BlobBaseFeeThreshold && err == nil {
|
||||
if r.lastBaseFee == r.cfg.GasOracleConfig.L1BaseFeeDefault && r.lastBlobBaseFee == r.cfg.GasOracleConfig.L1BlobBaseFeeDefault {
|
||||
return
|
||||
}
|
||||
@@ -165,24 +172,15 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
} else if err != nil {
|
||||
return
|
||||
}
|
||||
var data []byte
|
||||
if isCurie {
|
||||
data, err = r.l1GasOracleABI.Pack("setL1BaseFeeAndBlobBaseFee", new(big.Int).SetUint64(baseFee), new(big.Int).SetUint64(blobBaseFee))
|
||||
if err != nil {
|
||||
log.Error("Failed to pack setL1BaseFeeAndBlobBaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "isBernoulli", isBernoulli, "isCurie", isCurie, "err", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
data, err = r.l1GasOracleABI.Pack("setL1BaseFee", new(big.Int).SetUint64(baseFee))
|
||||
if err != nil {
|
||||
log.Error("Failed to pack setL1BaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "isBernoulli", isBernoulli, "isCurie", isCurie, "err", err)
|
||||
return
|
||||
}
|
||||
data, err := r.l1GasOracleABI.Pack("setL1BaseFeeAndBlobBaseFee", new(big.Int).SetUint64(baseFee), new(big.Int).SetUint64(blobBaseFee))
|
||||
if err != nil {
|
||||
log.Error("Failed to pack setL1BaseFeeAndBlobBaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, data, nil, 0)
|
||||
if err != nil {
|
||||
log.Error("Failed to send gas oracle update tx to layer2", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "isBernoulli", isBernoulli, "isCurie", isCurie, "err", err)
|
||||
log.Error("Failed to send gas oracle update tx to layer2", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -196,7 +194,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
r.lastBlobBaseFee = blobBaseFee
|
||||
r.metrics.rollupL1RelayerLatestBaseFee.Set(float64(r.lastBaseFee))
|
||||
r.metrics.rollupL1RelayerLatestBlobBaseFee.Set(float64(r.lastBlobBaseFee))
|
||||
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee, "blobBaseFee", blobBaseFee, "isBernoulli", isBernoulli, "isCurie", isCurie)
|
||||
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee, "blobBaseFee", blobBaseFee)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -245,31 +243,22 @@ func (r *Layer1Relayer) StopSenders() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) shouldUpdateGasOracle(baseFee uint64, blobBaseFee uint64, isCurie bool) bool {
|
||||
func (r *Layer1Relayer) shouldUpdateGasOracle(baseFee uint64, blobBaseFee uint64) bool {
|
||||
// Right after restarting.
|
||||
if r.lastBaseFee == 0 {
|
||||
log.Info("First time to update gas oracle after restarting", "baseFee", baseFee, "blobBaseFee", blobBaseFee)
|
||||
return true
|
||||
}
|
||||
|
||||
expectedBaseFeeDelta := r.lastBaseFee*r.gasPriceDiff/gasPriceDiffPrecision + 1
|
||||
if baseFee >= r.minGasPrice && (baseFee >= r.lastBaseFee+expectedBaseFeeDelta || baseFee+expectedBaseFeeDelta <= r.lastBaseFee) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Omitting blob base fee checks before Curie.
|
||||
if !isCurie {
|
||||
return false
|
||||
}
|
||||
|
||||
// Right after enabling Curie.
|
||||
if r.lastBlobBaseFee == 0 {
|
||||
if baseFee >= r.minGasPrice && math.Abs(float64(baseFee)-float64(r.lastBaseFee)) >= float64(expectedBaseFeeDelta) {
|
||||
return true
|
||||
}
|
||||
|
||||
expectedBlobBaseFeeDelta := r.lastBlobBaseFee * r.gasPriceDiff / gasPriceDiffPrecision
|
||||
// Plus a minimum of 0.01 gwei, since the blob base fee is usually low, preventing short-time flunctuation.
|
||||
expectedBlobBaseFeeDelta += 10000000
|
||||
if blobBaseFee >= r.minGasPrice && (blobBaseFee >= r.lastBlobBaseFee+expectedBlobBaseFeeDelta || blobBaseFee+expectedBlobBaseFeeDelta <= r.lastBlobBaseFee) {
|
||||
if blobBaseFee >= r.minGasPrice && math.Abs(float64(blobBaseFee)-float64(r.lastBlobBaseFee)) >= float64(expectedBlobBaseFeeDelta) {
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
@@ -36,7 +35,7 @@ func setupL1RelayerDB(t *testing.T) *gorm.DB {
|
||||
func testCreateNewL1Relayer(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, ServiceTypeL1GasOracle, nil)
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig, ServiceTypeL1GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
defer relayer.StopSenders()
|
||||
@@ -58,7 +57,7 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, ¶ms.ChainConfig{}, ServiceTypeL1GasOracle, nil)
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, ServiceTypeL1GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l1Relayer.StopSenders()
|
||||
|
||||
@@ -91,7 +90,7 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, ¶ms.ChainConfig{}, ServiceTypeL1GasOracle, nil)
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, ServiceTypeL1GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, l1Relayer)
|
||||
defer l1Relayer.StopSenders()
|
||||
@@ -141,14 +140,6 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
return tmpInfo, nil
|
||||
})
|
||||
|
||||
convey.Convey("setL1BaseFee failure", t, func() {
|
||||
targetErr := errors.New("pack setL1BaseFee error")
|
||||
patchGuard.ApplyMethodFunc(l1Relayer.l1GasOracleABI, "Pack", func(name string, args ...interface{}) ([]byte, error) {
|
||||
return nil, targetErr
|
||||
})
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(l1Relayer.l1GasOracleABI, "Pack", func(name string, args ...interface{}) ([]byte, error) {
|
||||
return []byte("for test"), nil
|
||||
})
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -12,11 +13,6 @@ import (
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv0"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv1"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv2"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv3"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv4"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
@@ -75,28 +71,9 @@ type Layer2Relayer struct {
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig, initGenesis bool, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) {
|
||||
|
||||
var gasOracleSender, commitSender, finalizeSender *sender.Sender
|
||||
var err error
|
||||
|
||||
// check that all 3 signer addresses are different, because there will be a problem in managing nonce for different senders
|
||||
gasOracleSenderAddr, err := addrFromSignerConfig(cfg.GasOracleSenderSignerConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse addr from gas oracle signer config, err: %v", err)
|
||||
}
|
||||
commitSenderAddr, err := addrFromSignerConfig(cfg.CommitSenderSignerConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse addr from commit sender config, err: %v", err)
|
||||
}
|
||||
finalizeSenderAddr, err := addrFromSignerConfig(cfg.FinalizeSenderSignerConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse addr from finalize sender config, err: %v", err)
|
||||
}
|
||||
if gasOracleSenderAddr == commitSenderAddr || gasOracleSenderAddr == finalizeSenderAddr || commitSenderAddr == finalizeSenderAddr {
|
||||
return nil, fmt.Errorf("gas oracle, commit, and finalize sender addresses must be different. Got: Gas Oracle=%s, Commit=%s, Finalize=%s",
|
||||
gasOracleSenderAddr.Hex(), commitSenderAddr.Hex(), finalizeSenderAddr.Hex())
|
||||
}
|
||||
|
||||
switch serviceType {
|
||||
case ServiceTypeL2GasOracle:
|
||||
gasOracleSender, err = sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderSignerConfig, "l2_relayer", "gas_oracle_sender", types.SenderTypeL2GasOracle, db, reg)
|
||||
@@ -110,6 +87,18 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
}
|
||||
|
||||
case ServiceTypeL2RollupRelayer:
|
||||
commitSenderAddr, err := addrFromSignerConfig(cfg.CommitSenderSignerConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse addr from commit sender config, err: %v", err)
|
||||
}
|
||||
finalizeSenderAddr, err := addrFromSignerConfig(cfg.FinalizeSenderSignerConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse addr from finalize sender config, err: %v", err)
|
||||
}
|
||||
if commitSenderAddr == finalizeSenderAddr {
|
||||
return nil, fmt.Errorf("commit and finalize sender addresses must be different. Got: Commit=%s, Finalize=%s", commitSenderAddr.Hex(), finalizeSenderAddr.Hex())
|
||||
}
|
||||
|
||||
commitSender, err = sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderSignerConfig, "l2_relayer", "commit_sender", types.SenderTypeCommitBatch, db, reg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new commit sender failed, err: %w", err)
|
||||
@@ -165,7 +154,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
}
|
||||
|
||||
// chain_monitor client
|
||||
if cfg.ChainMonitor.Enabled {
|
||||
if serviceType == ServiceTypeL2RollupRelayer && cfg.ChainMonitor.Enabled {
|
||||
layer2Relayer.chainMonitorClient = resty.New()
|
||||
layer2Relayer.chainMonitorClient.SetRetryCount(cfg.ChainMonitor.TryTimes)
|
||||
layer2Relayer.chainMonitorClient.SetTimeout(time.Duration(cfg.ChainMonitor.TimeOut) * time.Second)
|
||||
@@ -217,7 +206,7 @@ func (r *Layer2Relayer) initializeGenesis() error {
|
||||
|
||||
err = r.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
var dbChunk *orm.Chunk
|
||||
dbChunk, err = r.chunkOrm.InsertChunk(r.ctx, chunk, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.ChunkMetrics{}, dbTX)
|
||||
dbChunk, err = r.chunkOrm.InsertChunk(r.ctx, chunk, encoding.CodecV0, rutils.ChunkMetrics{}, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert chunk: %v", err)
|
||||
}
|
||||
@@ -234,7 +223,7 @@ func (r *Layer2Relayer) initializeGenesis() error {
|
||||
}
|
||||
|
||||
var dbBatch *orm.Batch
|
||||
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.BatchMetrics{}, dbTX)
|
||||
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, encoding.CodecV0, rutils.BatchMetrics{}, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert batch: %v", err)
|
||||
}
|
||||
@@ -324,13 +313,40 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
return
|
||||
}
|
||||
suggestGasPriceUint64 := uint64(suggestGasPrice.Int64())
|
||||
|
||||
// include the token exchange rate in the fee data if alternative gas token enabled
|
||||
if r.cfg.GasOracleConfig.AlternativeGasTokenConfig != nil && r.cfg.GasOracleConfig.AlternativeGasTokenConfig.Enabled {
|
||||
// The exchange rate represent the number of native token on L1 required to exchange for 1 native token on L2.
|
||||
var exchangeRate float64
|
||||
switch r.cfg.GasOracleConfig.AlternativeGasTokenConfig.Mode {
|
||||
case "Fixed":
|
||||
exchangeRate = r.cfg.GasOracleConfig.AlternativeGasTokenConfig.FixedExchangeRate
|
||||
case "BinanceApi":
|
||||
exchangeRate, err = rutils.GetExchangeRateFromBinanceApi(r.cfg.GasOracleConfig.AlternativeGasTokenConfig.TokenSymbolPair, 5)
|
||||
if err != nil {
|
||||
log.Error("Failed to get gas token exchange rate from Binance api", "tokenSymbolPair", r.cfg.GasOracleConfig.AlternativeGasTokenConfig.TokenSymbolPair, "err", err)
|
||||
return
|
||||
}
|
||||
default:
|
||||
log.Error("Invalid alternative gas token mode", "mode", r.cfg.GasOracleConfig.AlternativeGasTokenConfig.Mode)
|
||||
return
|
||||
}
|
||||
if exchangeRate == 0 {
|
||||
log.Error("Invalid exchange rate", "exchangeRate", exchangeRate)
|
||||
return
|
||||
}
|
||||
suggestGasPriceUint64 = uint64(math.Ceil(float64(suggestGasPriceUint64) * exchangeRate))
|
||||
suggestGasPrice = new(big.Int).SetUint64(suggestGasPriceUint64)
|
||||
}
|
||||
|
||||
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
|
||||
if r.lastGasPrice > 0 && expectedDelta == 0 {
|
||||
expectedDelta = 1
|
||||
}
|
||||
|
||||
// last is undefine or (suggestGasPriceUint64 >= minGasPrice && exceed diff)
|
||||
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice && (suggestGasPriceUint64 >= r.lastGasPrice+expectedDelta || suggestGasPriceUint64+expectedDelta <= r.lastGasPrice)) {
|
||||
// last is undefined or (suggestGasPriceUint64 >= minGasPrice && exceed diff)
|
||||
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice &&
|
||||
(math.Abs(float64(suggestGasPriceUint64)-float64(r.lastGasPrice)) >= float64(expectedDelta))) {
|
||||
data, err := r.l2GasOracleABI.Pack("setL2BaseFee", suggestGasPrice)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack setL2BaseFee", "batch.Hash", batch.Hash, "GasPrice", suggestGasPrice.Uint64(), "err", err)
|
||||
@@ -372,6 +388,14 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
return
|
||||
}
|
||||
|
||||
// check codec version
|
||||
for _, dbChunk := range dbChunks {
|
||||
if dbBatch.CodecVersion != dbChunk.CodecVersion {
|
||||
log.Error("batch codec version is different from chunk codec version", "batch index", dbBatch.Index, "chunk index", dbChunk.Index, "batch codec version", dbBatch.CodecVersion, "chunk codec version", dbChunk.CodecVersion)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
chunks := make([]*encoding.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
blocks, getErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
@@ -393,38 +417,24 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
return
|
||||
}
|
||||
|
||||
if dbParentBatch.CodecVersion > dbBatch.CodecVersion {
|
||||
log.Error("parent batch codec version is greater than current batch codec version", "index", dbBatch.Index, "hash", dbBatch.Hash, "parent codec version", dbParentBatch.CodecVersion, "current codec version", dbBatch.CodecVersion)
|
||||
return
|
||||
}
|
||||
|
||||
var calldata []byte
|
||||
var blob *kzg4844.Blob
|
||||
if encoding.CodecVersion(dbBatch.CodecVersion) == encoding.CodecV0 {
|
||||
calldata, err = r.constructCommitBatchPayloadCodecV0(dbBatch, dbParentBatch, dbChunks, chunks)
|
||||
if err != nil {
|
||||
log.Error("failed to construct commitBatch payload codecv0", "index", dbBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
} else if encoding.CodecVersion(dbBatch.CodecVersion) == encoding.CodecV1 {
|
||||
calldata, blob, err = r.constructCommitBatchPayloadCodecV1(dbBatch, dbParentBatch, dbChunks, chunks)
|
||||
if err != nil {
|
||||
log.Error("failed to construct commitBatch payload codecv1", "index", dbBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
} else if encoding.CodecVersion(dbBatch.CodecVersion) == encoding.CodecV2 {
|
||||
calldata, blob, err = r.constructCommitBatchPayloadCodecV2(dbBatch, dbParentBatch, dbChunks, chunks)
|
||||
if err != nil {
|
||||
log.Error("failed to construct commitBatch payload codecv2", "index", dbBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
} else if encoding.CodecVersion(dbBatch.CodecVersion) == encoding.CodecV3 {
|
||||
calldata, blob, err = r.constructCommitBatchPayloadCodecV3(dbBatch, dbParentBatch, dbChunks, chunks)
|
||||
if err != nil {
|
||||
log.Error("failed to construct commitBatchWithBlobProof payload codecv3", "index", dbBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
} else if encoding.CodecVersion(dbBatch.CodecVersion) == encoding.CodecV4 {
|
||||
codecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
|
||||
switch codecVersion {
|
||||
case encoding.CodecV4:
|
||||
calldata, blob, err = r.constructCommitBatchPayloadCodecV4(dbBatch, dbParentBatch, dbChunks, chunks)
|
||||
if err != nil {
|
||||
log.Error("failed to construct commitBatchWithBlobProof payload codecv4", "index", dbBatch.Index, "err", err)
|
||||
log.Error("failed to construct commitBatchWithBlobProof payload for V4", "codecVersion", codecVersion, "index", dbBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
default:
|
||||
log.Error("unsupported codec version", "codecVersion", codecVersion)
|
||||
return
|
||||
}
|
||||
|
||||
// fallbackGasLimit is non-zero only in sending non-blob transactions.
|
||||
@@ -479,69 +489,6 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessCommittedBatches submit proof to layer 1 rollup contract
|
||||
func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
// retrieves the earliest batch whose rollup status is 'committed'
|
||||
fields := map[string]interface{}{
|
||||
"rollup_status": types.RollupCommitted,
|
||||
}
|
||||
orderByList := []string{"index ASC"}
|
||||
limit := 1
|
||||
batches, err := r.batchOrm.GetBatches(r.ctx, fields, orderByList, limit)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch committed L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
if len(batches) != 1 {
|
||||
log.Warn("Unexpected result for GetBlockBatches", "number of batches", len(batches))
|
||||
return
|
||||
}
|
||||
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesTotal.Inc()
|
||||
|
||||
batch := batches[0]
|
||||
status := types.ProvingStatus(batch.ProvingStatus)
|
||||
switch status {
|
||||
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
|
||||
if batch.CommittedAt == nil {
|
||||
log.Error("batch.CommittedAt is nil", "index", batch.Index, "hash", batch.Hash)
|
||||
return
|
||||
}
|
||||
|
||||
if r.cfg.EnableTestEnvBypassFeatures && utils.NowUTC().Sub(*batch.CommittedAt) > time.Duration(r.cfg.FinalizeBatchWithoutProofTimeoutSec)*time.Second {
|
||||
if err := r.finalizeBatch(batch, false); err != nil {
|
||||
log.Error("Failed to finalize timeout batch without proof", "index", batch.Index, "hash", batch.Hash, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
case types.ProvingTaskVerified:
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
|
||||
if err := r.finalizeBatch(batch, true); err != nil {
|
||||
log.Error("Failed to finalize batch with proof", "index", batch.Index, "hash", batch.Hash, "err", err)
|
||||
}
|
||||
|
||||
case types.ProvingTaskFailed:
|
||||
// We were unable to prove this batch. There are two possibilities:
|
||||
// (a) Prover bug. In this case, we should fix and redeploy the prover.
|
||||
// In the meantime, we continue to commit batches to L1 as well as
|
||||
// proposing and proving chunks and batches.
|
||||
// (b) Unprovable batch, e.g. proof overflow. In this case we need to
|
||||
// stop the ledger, fix the limit, revert all the violating blocks,
|
||||
// chunks and batches and all subsequent ones, and resume, i.e. this
|
||||
// case requires manual resolution.
|
||||
log.Error(
|
||||
"batch proving failed",
|
||||
"Index", batch.Index,
|
||||
"Hash", batch.Hash,
|
||||
"ProvedAt", batch.ProvedAt,
|
||||
"ProofTimeSec", batch.ProofTimeSec,
|
||||
)
|
||||
|
||||
default:
|
||||
log.Error("encounter unreachable case in ProcessCommittedBatches", "proving status", status)
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessPendingBundles submits proof to layer 1 rollup contract
|
||||
func (r *Layer2Relayer) ProcessPendingBundles() {
|
||||
r.metrics.rollupL2RelayerProcessPendingBundlesTotal.Inc()
|
||||
@@ -559,16 +506,35 @@ func (r *Layer2Relayer) ProcessPendingBundles() {
|
||||
switch status {
|
||||
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
|
||||
if r.cfg.EnableTestEnvBypassFeatures && utils.NowUTC().Sub(bundle.CreatedAt) > time.Duration(r.cfg.FinalizeBundleWithoutProofTimeoutSec)*time.Second {
|
||||
// check if last batch is finalized, because in fake finalize bundle mode, the contract does not verify if the previous bundle or batch is finalized.
|
||||
if bundle.StartBatchIndex == 0 {
|
||||
log.Error("invalid args: start batch index of bundle is 0", "bundle index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex)
|
||||
return
|
||||
}
|
||||
|
||||
lastBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, bundle.StartBatchIndex-1)
|
||||
if err != nil {
|
||||
log.Error("failed to get last batch", "batch index", bundle.StartBatchIndex-1, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if types.RollupStatus(lastBatch.RollupStatus) != types.RollupFinalized {
|
||||
log.Error("previous bundle or batch is not finalized", "batch index", lastBatch.Index, "batch hash", lastBatch.Hash, "rollup status", types.RollupStatus(lastBatch.RollupStatus))
|
||||
return
|
||||
}
|
||||
|
||||
if err := r.finalizeBundle(bundle, false); err != nil {
|
||||
log.Error("Failed to finalize timeout bundle without proof", "index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err)
|
||||
log.Error("failed to finalize timeout bundle without proof", "bundle index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
case types.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "bundle hash", bundle.Hash)
|
||||
log.Info("Start to roll up zk proof", "index", bundle.Index, "bundle hash", bundle.Hash)
|
||||
r.metrics.rollupL2RelayerProcessPendingBundlesFinalizedTotal.Inc()
|
||||
if err := r.finalizeBundle(bundle, true); err != nil {
|
||||
log.Error("Failed to finalize bundle with proof", "index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err)
|
||||
log.Error("failed to finalize bundle with proof", "bundle index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
case types.ProvingTaskFailed:
|
||||
@@ -580,153 +546,49 @@ func (r *Layer2Relayer) ProcessPendingBundles() {
|
||||
// stop the ledger, fix the limit, revert all the violating blocks,
|
||||
// chunks, batches, bundles and all subsequent ones, and resume,
|
||||
// i.e. this case requires manual resolution.
|
||||
log.Error("bundle proving failed", "index", bundle.Index, "hash", bundle.Hash, "proved at", bundle.ProvedAt, "proof time sec", bundle.ProofTimeSec)
|
||||
log.Error("bundle proving failed", "bundle index", bundle.Index, "bundle hash", bundle.Hash, "proved at", bundle.ProvedAt, "proof time sec", bundle.ProofTimeSec)
|
||||
|
||||
default:
|
||||
log.Error("encounter unreachable case in ProcessPendingBundles", "proving status", status)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error {
|
||||
// Check batch status before sending `finalizeBatch` tx.
|
||||
if r.cfg.ChainMonitor.Enabled {
|
||||
var batchStatus bool
|
||||
batchStatus, err := r.getBatchStatusByIndex(dbBatch)
|
||||
func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error {
|
||||
// Check if current bundle codec version is not less than the preceding one
|
||||
if bundle.StartBatchIndex > 0 {
|
||||
prevBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, bundle.StartBatchIndex-1)
|
||||
if err != nil {
|
||||
r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc()
|
||||
log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", dbBatch.Index, "err", err)
|
||||
log.Error("failed to get previous batch",
|
||||
"current bundle index", bundle.Index,
|
||||
"start batch index", bundle.StartBatchIndex,
|
||||
"error", err)
|
||||
return err
|
||||
}
|
||||
if !batchStatus {
|
||||
r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc()
|
||||
log.Error("the batch status is false, stop finalize batch and check the reason", "batch_index", dbBatch.Index)
|
||||
return errors.New("the batch status is false")
|
||||
if bundle.CodecVersion < prevBatch.CodecVersion {
|
||||
log.Error("current bundle codec version is less than the preceding batch",
|
||||
"current bundle index", bundle.Index,
|
||||
"current codec version", bundle.CodecVersion,
|
||||
"prev batch index", prevBatch.Index,
|
||||
"prev codec version", prevBatch.CodecVersion)
|
||||
return errors.New("current bundle codec version cannot be less than the preceding batch")
|
||||
}
|
||||
}
|
||||
|
||||
if dbBatch.Index == 0 {
|
||||
return errors.New("invalid args: batch index is 0, should only happen in finalizing genesis batch")
|
||||
}
|
||||
|
||||
dbParentBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, dbBatch.Index-1)
|
||||
if getErr != nil {
|
||||
return fmt.Errorf("failed to get batch, index: %d, err: %w", dbBatch.Index-1, getErr)
|
||||
}
|
||||
|
||||
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch chunks: %w", err)
|
||||
}
|
||||
|
||||
var aggProof *message.BatchProof
|
||||
if withProof {
|
||||
aggProof, getErr = r.batchOrm.GetVerifiedProofByHash(r.ctx, dbBatch.Hash)
|
||||
if getErr != nil {
|
||||
return fmt.Errorf("failed to get verified proof by hash, index: %d, err: %w", dbBatch.Index, getErr)
|
||||
}
|
||||
|
||||
if err = aggProof.SanityCheck(); err != nil {
|
||||
return fmt.Errorf("failed to check agg_proof sanity, index: %d, err: %w", dbBatch.Index, err)
|
||||
}
|
||||
}
|
||||
|
||||
var calldata []byte
|
||||
if !r.chainCfg.IsBernoulli(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) { // codecv0
|
||||
log.Info("Start to roll up zk proof", "batch hash", dbBatch.Hash)
|
||||
|
||||
calldata, err = r.constructFinalizeBatchPayloadCodecV0(dbBatch, dbParentBatch, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct finalizeBatch payload codecv0, index: %v, err: %w", dbBatch.Index, err)
|
||||
}
|
||||
} else if !r.chainCfg.IsCurie(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) { // codecv1
|
||||
log.Info("Start to roll up zk proof", "batch hash", dbBatch.Hash)
|
||||
|
||||
chunks := make([]*encoding.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
blocks, dbErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
if dbErr != nil {
|
||||
return fmt.Errorf("failed to fetch blocks: %w", dbErr)
|
||||
}
|
||||
chunks[i] = &encoding.Chunk{Blocks: blocks}
|
||||
}
|
||||
|
||||
calldata, err = r.constructFinalizeBatchPayloadCodecV1(dbBatch, dbParentBatch, dbChunks, chunks, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct finalizeBatch payload codecv1, index: %v, err: %w", dbBatch.Index, err)
|
||||
}
|
||||
} else if !r.chainCfg.IsDarwin(dbChunks[0].StartBlockTime) { // codecv2
|
||||
log.Info("Start to roll up zk proof", "batch hash", dbBatch.Hash)
|
||||
|
||||
chunks := make([]*encoding.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
blocks, dbErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
if dbErr != nil {
|
||||
return fmt.Errorf("failed to fetch blocks: %w", dbErr)
|
||||
}
|
||||
chunks[i] = &encoding.Chunk{Blocks: blocks}
|
||||
}
|
||||
|
||||
calldata, err = r.constructFinalizeBatchPayloadCodecV2(dbBatch, dbParentBatch, dbChunks, chunks, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct finalizeBatch payload codecv2, index: %v, err: %w", dbBatch.Index, err)
|
||||
}
|
||||
} else { // codecv3
|
||||
log.Debug("encoding is codecv3, using finalizeBundle instead", "index", dbBatch.Index)
|
||||
return nil
|
||||
}
|
||||
|
||||
txHash, err := r.finalizeSender.SendTransaction(dbBatch.Hash, &r.cfg.RollupContractAddress, calldata, nil, 0)
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"finalizeBatch in layer1 failed",
|
||||
"with proof", withProof,
|
||||
"index", dbBatch.Index,
|
||||
"hash", dbBatch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"err", err,
|
||||
"calldata", common.Bytes2Hex(calldata),
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String())
|
||||
|
||||
// Updating rollup status in database.
|
||||
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, dbBatch.Hash, txHash.String(), types.RollupFinalizing); err != nil {
|
||||
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Updating the proving status when finalizing without proof, thus the coordinator could omit this task.
|
||||
// it isn't a necessary step, so don't put in a transaction with UpdateFinalizeTxHashAndRollupStatus
|
||||
if !withProof {
|
||||
txErr := r.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
if updateErr := r.batchOrm.UpdateProvingStatus(r.ctx, dbBatch.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
if updateErr := r.chunkOrm.UpdateProvingStatusByBatchHash(r.ctx, dbBatch.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if txErr != nil {
|
||||
log.Error("Updating chunk and batch proving status when finalizing without proof failure", "batchHash", dbBatch.Hash, "err", txErr)
|
||||
}
|
||||
}
|
||||
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error {
|
||||
// Check batch status before sending `finalizeBundle` tx.
|
||||
if r.cfg.ChainMonitor.Enabled {
|
||||
for batchIndex := bundle.StartBatchIndex; batchIndex <= bundle.EndBatchIndex; batchIndex++ {
|
||||
tmpBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, batchIndex)
|
||||
if getErr != nil {
|
||||
log.Error("failed to get batch by index", "batch index", batchIndex, "error", getErr)
|
||||
return getErr
|
||||
}
|
||||
for batchIndex := bundle.StartBatchIndex; batchIndex <= bundle.EndBatchIndex; batchIndex++ {
|
||||
tmpBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, batchIndex)
|
||||
if getErr != nil {
|
||||
log.Error("failed to get batch by index", "batch index", batchIndex, "error", getErr)
|
||||
return getErr
|
||||
}
|
||||
|
||||
// check codec version
|
||||
if tmpBatch.CodecVersion != bundle.CodecVersion {
|
||||
log.Error("bundle codec version is different from batch codec version", "bundle index", bundle.Index, "batch index", tmpBatch.Index, "bundle codec version", bundle.CodecVersion, "batch codec version", tmpBatch.CodecVersion)
|
||||
return errors.New("bundle codec version is different from batch codec version")
|
||||
}
|
||||
|
||||
if r.cfg.ChainMonitor.Enabled {
|
||||
batchStatus, getErr := r.getBatchStatusByIndex(tmpBatch)
|
||||
if getErr != nil {
|
||||
r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc()
|
||||
@@ -759,7 +621,7 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
|
||||
}
|
||||
}
|
||||
|
||||
calldata, err := r.constructFinalizeBundlePayloadCodecV3AndV4(dbBatch, aggProof)
|
||||
calldata, err := r.constructFinalizeBundlePayloadCodecV4(dbBatch, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct finalizeBundle payload codecv3, index: %v, err: %w", dbBatch.Index, err)
|
||||
}
|
||||
@@ -775,8 +637,22 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
|
||||
log.Info("finalizeBundle in layer1", "with proof", withProof, "index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "tx hash", txHash.String())
|
||||
|
||||
// Updating rollup status in database.
|
||||
if err := r.bundleOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, bundle.Hash, txHash.String(), types.RollupFinalizing); err != nil {
|
||||
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", bundle.Index, "bundle hash", bundle.Hash, "tx hash", txHash.String(), "err", err)
|
||||
err = r.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
if err = r.batchOrm.UpdateFinalizeTxHashAndRollupStatusByBundleHash(r.ctx, bundle.Hash, txHash.String(), types.RollupFinalizing, dbTX); err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatusByBundleHash failed", "index", bundle.Index, "bundle hash", bundle.Hash, "tx hash", txHash.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = r.bundleOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, bundle.Hash, txHash.String(), types.RollupFinalizing, dbTX); err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "index", bundle.Index, "bundle hash", bundle.Hash, "tx hash", txHash.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Warn("failed to update rollup status of bundle and batches", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -887,12 +763,12 @@ func (r *Layer2Relayer) handleConfirmation(cfm *sender.Confirmation) {
|
||||
}
|
||||
|
||||
err := r.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatusByBundleHash(r.ctx, bundleHash, cfm.TxHash.String(), status); err != nil {
|
||||
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatusByBundleHash(r.ctx, bundleHash, cfm.TxHash.String(), status, dbTX); err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatusByBundleHash failed", "confirmation", cfm, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := r.bundleOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, bundleHash, cfm.TxHash.String(), status); err != nil {
|
||||
if err := r.bundleOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, bundleHash, cfm.TxHash.String(), status, dbTX); err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "confirmation", cfm, "err", err)
|
||||
return err
|
||||
}
|
||||
@@ -965,129 +841,6 @@ func (r *Layer2Relayer) handleL2RollupRelayerConfirmLoop(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV0(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, error) {
|
||||
daBatch, err := codecv0.NewDABatchFromBytes(dbBatch.BatchHeader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create DA batch from bytes: %w", err)
|
||||
}
|
||||
|
||||
encodedChunks := make([][]byte, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
daChunk, createErr := codecv0.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore)
|
||||
if createErr != nil {
|
||||
return nil, fmt.Errorf("failed to create DA chunk: %w", createErr)
|
||||
}
|
||||
daChunkBytes, encodeErr := daChunk.Encode()
|
||||
if encodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to encode DA chunk: %w", encodeErr)
|
||||
}
|
||||
encodedChunks[i] = daChunkBytes
|
||||
}
|
||||
|
||||
calldata, packErr := r.l1RollupABI.Pack("commitBatch", daBatch.Version, dbParentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack commitBatch: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV1(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, *kzg4844.Blob, error) {
|
||||
batch := &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
|
||||
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
daBatch, createErr := codecv1.NewDABatch(batch)
|
||||
if createErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create DA batch: %w", createErr)
|
||||
}
|
||||
|
||||
encodedChunks := make([][]byte, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
daChunk, createErr := codecv1.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore)
|
||||
if createErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create DA chunk: %w", createErr)
|
||||
}
|
||||
encodedChunks[i] = daChunk.Encode()
|
||||
}
|
||||
|
||||
calldata, packErr := r.l1RollupABI.Pack("commitBatch", daBatch.Version, dbParentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap)
|
||||
if packErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to pack commitBatch: %w", packErr)
|
||||
}
|
||||
return calldata, daBatch.Blob(), nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV2(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, *kzg4844.Blob, error) {
|
||||
batch := &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
|
||||
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
daBatch, createErr := codecv2.NewDABatch(batch)
|
||||
if createErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create DA batch: %w", createErr)
|
||||
}
|
||||
|
||||
encodedChunks := make([][]byte, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
daChunk, createErr := codecv2.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore)
|
||||
if createErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create DA chunk: %w", createErr)
|
||||
}
|
||||
encodedChunks[i] = daChunk.Encode()
|
||||
}
|
||||
|
||||
calldata, packErr := r.l1RollupABI.Pack("commitBatch", daBatch.Version, dbParentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap)
|
||||
if packErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to pack commitBatch: %w", packErr)
|
||||
}
|
||||
return calldata, daBatch.Blob(), nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV3(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, *kzg4844.Blob, error) {
|
||||
batch := &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
|
||||
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
daBatch, createErr := codecv3.NewDABatch(batch)
|
||||
if createErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create DA batch: %w", createErr)
|
||||
}
|
||||
|
||||
encodedChunks := make([][]byte, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
daChunk, createErr := codecv3.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore)
|
||||
if createErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create DA chunk: %w", createErr)
|
||||
}
|
||||
encodedChunks[i] = daChunk.Encode()
|
||||
}
|
||||
|
||||
blobDataProof, err := daBatch.BlobDataProofForPointEvaluation()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get blob data proof for point evaluation: %w", err)
|
||||
}
|
||||
|
||||
skippedL1MessageBitmap, _, err := encoding.ConstructSkippedBitmap(batch.Index, batch.Chunks, batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to construct skipped L1 message bitmap: %w", err)
|
||||
}
|
||||
|
||||
calldata, packErr := r.l1RollupABI.Pack("commitBatchWithBlobProof", daBatch.Version, dbParentBatch.BatchHeader, encodedChunks, skippedL1MessageBitmap, blobDataProof)
|
||||
if packErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to pack commitBatchWithBlobProof: %w", packErr)
|
||||
}
|
||||
return calldata, daBatch.Blob(), nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV4(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, *kzg4844.Blob, error) {
|
||||
batch := &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
@@ -1096,18 +849,26 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV4(dbBatch *orm.Batch, d
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
daBatch, createErr := codecv4.NewDABatch(batch, dbBatch.EnableCompress)
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
|
||||
}
|
||||
|
||||
daBatch, createErr := codec.NewDABatch(batch)
|
||||
if createErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create DA batch: %w", createErr)
|
||||
}
|
||||
|
||||
encodedChunks := make([][]byte, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
daChunk, createErr := codecv4.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore)
|
||||
daChunk, createErr := codec.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore)
|
||||
if createErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create DA chunk: %w", createErr)
|
||||
}
|
||||
encodedChunks[i] = daChunk.Encode()
|
||||
encodedChunks[i], err = daChunk.Encode()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to encode DA chunk: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
blobDataProof, err := daBatch.BlobDataProofForPointEvaluation()
|
||||
@@ -1115,147 +876,14 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV4(dbBatch *orm.Batch, d
|
||||
return nil, nil, fmt.Errorf("failed to get blob data proof for point evaluation: %w", err)
|
||||
}
|
||||
|
||||
skippedL1MessageBitmap, _, err := encoding.ConstructSkippedBitmap(batch.Index, batch.Chunks, batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to construct skipped L1 message bitmap: %w", err)
|
||||
}
|
||||
|
||||
calldata, packErr := r.l1RollupABI.Pack("commitBatchWithBlobProof", daBatch.Version, dbParentBatch.BatchHeader, encodedChunks, skippedL1MessageBitmap, blobDataProof)
|
||||
calldata, packErr := r.l1RollupABI.Pack("commitBatchWithBlobProof", daBatch.Version(), dbParentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap(), blobDataProof)
|
||||
if packErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to pack commitBatchWithBlobProof: %w", packErr)
|
||||
}
|
||||
return calldata, daBatch.Blob(), nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV0(dbBatch *orm.Batch, dbParentBatch *orm.Batch, aggProof *message.BatchProof) ([]byte, error) {
|
||||
if aggProof != nil { // finalizeBatch with proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBatchWithProof",
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbParentBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
aggProof.Proof,
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBatchWithProof: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
// finalizeBatch without proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBatch",
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbParentBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBatch: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV1(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk, aggProof *message.BatchProof) ([]byte, error) {
|
||||
batch := &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
|
||||
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
daBatch, createErr := codecv1.NewDABatch(batch)
|
||||
if createErr != nil {
|
||||
return nil, fmt.Errorf("failed to create DA batch: %w", createErr)
|
||||
}
|
||||
|
||||
blobDataProof, getErr := daBatch.BlobDataProof()
|
||||
if getErr != nil {
|
||||
return nil, fmt.Errorf("failed to get blob data proof: %w", getErr)
|
||||
}
|
||||
|
||||
if aggProof != nil { // finalizeBatch4844 with proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBatchWithProof4844",
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbParentBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
blobDataProof,
|
||||
aggProof.Proof,
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBatchWithProof4844: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
// finalizeBatch4844 without proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBatch4844",
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbParentBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
blobDataProof,
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBatch4844: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV2(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk, aggProof *message.BatchProof) ([]byte, error) {
|
||||
batch := &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
|
||||
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
daBatch, createErr := codecv2.NewDABatch(batch)
|
||||
if createErr != nil {
|
||||
return nil, fmt.Errorf("failed to create DA batch: %w", createErr)
|
||||
}
|
||||
|
||||
blobDataProof, getErr := daBatch.BlobDataProof()
|
||||
if getErr != nil {
|
||||
return nil, fmt.Errorf("failed to get blob data proof: %w", getErr)
|
||||
}
|
||||
|
||||
if aggProof != nil { // finalizeBatch4844 with proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBatchWithProof4844",
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbParentBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
blobDataProof,
|
||||
aggProof.Proof,
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBatchWithProof4844: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
// finalizeBatch4844 without proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBatch4844",
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbParentBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
blobDataProof,
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBatch4844: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV3AndV4(dbBatch *orm.Batch, aggProof *message.BundleProof) ([]byte, error) {
|
||||
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV4(dbBatch *orm.Batch, aggProof *message.BundleProof) ([]byte, error) {
|
||||
if aggProof != nil { // finalizeBundle with proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBundleWithProof",
|
||||
|
||||
@@ -51,21 +51,17 @@ func testCreateNewRelayer(t *testing.T) {
|
||||
}
|
||||
|
||||
func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3}
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
chainConfig = ¶ms.ChainConfig{}
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
} else {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
@@ -79,9 +75,9 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, rutils.CodecConfig{Version: codecVersion}, rutils.ChunkMetrics{})
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, rutils.CodecConfig{Version: codecVersion}, rutils.ChunkMetrics{})
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
@@ -92,7 +88,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: codecVersion}, rutils.BatchMetrics{})
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBatches()
|
||||
@@ -106,85 +102,16 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
chainConfig = ¶ms.ChainConfig{}
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}
|
||||
} else {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, rutils.CodecConfig{Version: codecVersion}, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, rutils.CodecConfig{Version: codecVersion}, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: codecVersion}, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), dbBatch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
// no valid proof, rollup status remains the same
|
||||
assert.Equal(t, types.RollupCommitted, statuses[0])
|
||||
|
||||
proof := &message.BatchProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), dbBatch.Hash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
relayer.StopSenders()
|
||||
}
|
||||
}
|
||||
|
||||
func testL2RelayerProcessPendingBundles(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV3}
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV3 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
@@ -197,7 +124,7 @@ func testL2RelayerProcessPendingBundles(t *testing.T) {
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: codecVersion}, rutils.BatchMetrics{})
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
@@ -235,79 +162,8 @@ func testL2RelayerProcessPendingBundles(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
|
||||
l2Cfg.RelayerConfig.FinalizeBatchWithoutProofTimeoutSec = 0
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
chainConfig = ¶ms.ChainConfig{}
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}
|
||||
} else {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, rutils.CodecConfig{Version: codecVersion}, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, rutils.CodecConfig{Version: codecVersion}, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: codecVersion}, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
relayer.ProcessCommittedBatches()
|
||||
|
||||
batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0)
|
||||
if batchErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
batchStatus := len(batchInDB) == 1 && types.RollupStatus(batchInDB[0].RollupStatus) == types.RollupFinalizing &&
|
||||
types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash)
|
||||
if chunkErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified &&
|
||||
types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
return batchStatus && chunkStatus
|
||||
}, 5*time.Second, 100*time.Millisecond, "Batch or Chunk status did not update as expected")
|
||||
relayer.StopSenders()
|
||||
}
|
||||
}
|
||||
|
||||
func testL2RelayerFinalizeTimeoutBundles(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV3}
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
@@ -316,8 +172,8 @@ func testL2RelayerFinalizeTimeoutBundles(t *testing.T) {
|
||||
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
|
||||
l2Cfg.RelayerConfig.FinalizeBundleWithoutProofTimeoutSec = 0
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV3 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
@@ -326,9 +182,9 @@ func testL2RelayerFinalizeTimeoutBundles(t *testing.T) {
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, rutils.CodecConfig{Version: codecVersion}, rutils.ChunkMetrics{})
|
||||
chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, rutils.CodecConfig{Version: codecVersion}, rutils.ChunkMetrics{})
|
||||
chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
@@ -339,7 +195,7 @@ func testL2RelayerFinalizeTimeoutBundles(t *testing.T) {
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: codecVersion}, rutils.BatchMetrics{})
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
|
||||
@@ -411,7 +267,7 @@ func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.BatchMetrics{})
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = dbBatch.Hash
|
||||
}
|
||||
@@ -443,62 +299,6 @@ func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func testL2RelayerFinalizeBatchConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
// Simulate message confirmations.
|
||||
isSuccessful := []bool{true, false}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHashes := make([]string, len(isSuccessful))
|
||||
for i := range batchHashes {
|
||||
batch := &encoding.Batch{
|
||||
Index: uint64(i + 1),
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = dbBatch.Hash
|
||||
}
|
||||
|
||||
for i, batchHash := range batchHashes {
|
||||
l2Relayer.finalizeSender.SendConfirmation(&sender.Confirmation{
|
||||
ContextID: batchHash,
|
||||
IsSuccessful: isSuccessful[i],
|
||||
TxHash: common.HexToHash("0x123456789abcdef"),
|
||||
SenderType: types.SenderTypeFinalizeBatch,
|
||||
})
|
||||
}
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
expectedStatuses := []types.RollupStatus{
|
||||
types.RollupFinalized,
|
||||
types.RollupFinalizeFailed,
|
||||
}
|
||||
|
||||
for i, batchHash := range batchHashes {
|
||||
batchInDB, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": batchHash}, nil, 0)
|
||||
if err != nil || len(batchInDB) != 1 || types.RollupStatus(batchInDB[0].RollupStatus) != expectedStatuses[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func testL2RelayerFinalizeBundleConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
@@ -525,11 +325,11 @@ func testL2RelayerFinalizeBundleConfirm(t *testing.T) {
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.BatchMetrics{})
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = dbBatch.Hash
|
||||
|
||||
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, encoding.CodecV3)
|
||||
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, encoding.CodecV4)
|
||||
assert.NoError(t, err)
|
||||
bundleHashes[i] = bundle.Hash
|
||||
|
||||
@@ -580,7 +380,7 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.BatchMetrics{})
|
||||
dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, encoding.CodecV0, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch2 := &encoding.Batch{
|
||||
@@ -590,7 +390,7 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
Chunks: []*encoding.Chunk{chunk2},
|
||||
}
|
||||
|
||||
dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.BatchMetrics{})
|
||||
dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, encoding.CodecV0, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
@@ -742,9 +542,9 @@ func testGetBatchStatusByIndex(t *testing.T) {
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.ChunkMetrics{})
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, encoding.CodecV0, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.ChunkMetrics{})
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, encoding.CodecV0, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
@@ -755,7 +555,7 @@ func testGetBatchStatusByIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.BatchMetrics{})
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
status, err := relayer.getBatchStatusByIndex(dbBatch)
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv0"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
@@ -83,7 +82,9 @@ func setupEnv(t *testing.T) {
|
||||
err = json.Unmarshal(templateBlockTrace1, block1)
|
||||
assert.NoError(t, err)
|
||||
chunk1 = &encoding.Chunk{Blocks: []*encoding.Block{block1}}
|
||||
daChunk1, err := codecv0.NewDAChunk(chunk1, 0)
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
daChunk1, err := codec.NewDAChunk(chunk1, 0)
|
||||
assert.NoError(t, err)
|
||||
chunkHash1, err = daChunk1.Hash()
|
||||
assert.NoError(t, err)
|
||||
@@ -94,7 +95,7 @@ func setupEnv(t *testing.T) {
|
||||
err = json.Unmarshal(templateBlockTrace2, block2)
|
||||
assert.NoError(t, err)
|
||||
chunk2 = &encoding.Chunk{Blocks: []*encoding.Block{block2}}
|
||||
daChunk2, err := codecv0.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
|
||||
daChunk2, err := codec.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
|
||||
assert.NoError(t, err)
|
||||
chunkHash2, err = daChunk2.Hash()
|
||||
assert.NoError(t, err)
|
||||
@@ -123,12 +124,9 @@ func TestFunctions(t *testing.T) {
|
||||
// Run l2 relayer test cases.
|
||||
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
|
||||
t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
|
||||
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
|
||||
t.Run("TestL2RelayerProcessPendingBundles", testL2RelayerProcessPendingBundles)
|
||||
t.Run("TestL2RelayerFinalizeTimeoutBatches", testL2RelayerFinalizeTimeoutBatches)
|
||||
t.Run("TestL2RelayerFinalizeTimeoutBundles", testL2RelayerFinalizeTimeoutBundles)
|
||||
t.Run("TestL2RelayerCommitConfirm", testL2RelayerCommitConfirm)
|
||||
t.Run("TestL2RelayerFinalizeBatchConfirm", testL2RelayerFinalizeBatchConfirm)
|
||||
t.Run("TestL2RelayerFinalizeBundleConfirm", testL2RelayerFinalizeBundleConfirm)
|
||||
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
|
||||
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)
|
||||
|
||||
@@ -38,7 +38,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTooManyPendingBlobTxs
|
||||
// ErrTooManyPendingBlobTxs error for too many pending blob txs
|
||||
ErrTooManyPendingBlobTxs = errors.New("the limit of pending blob-carrying transactions has been exceeded")
|
||||
)
|
||||
|
||||
@@ -175,7 +175,6 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
|
||||
s.metrics.sendTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
var (
|
||||
feeData *FeeData
|
||||
tx *gethTypes.Transaction
|
||||
sidecar *gethTypes.BlobTxSidecar
|
||||
err error
|
||||
)
|
||||
@@ -217,29 +216,44 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
|
||||
return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err)
|
||||
}
|
||||
|
||||
if tx, err = s.createAndSendTx(feeData, target, data, sidecar, nil); err != nil {
|
||||
signedTx, err := s.createTx(feeData, target, data, sidecar, s.transactionSigner.GetNonce())
|
||||
if err != nil {
|
||||
s.metrics.sendTransactionFailureSendTx.WithLabelValues(s.service, s.name).Inc()
|
||||
log.Error("failed to create and send tx (non-resubmit case)", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "err", err)
|
||||
return common.Hash{}, fmt.Errorf("failed to create and send transaction, err: %w", err)
|
||||
log.Error("failed to create signed tx (non-resubmit case)", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "err", err)
|
||||
return common.Hash{}, fmt.Errorf("failed to create signed transaction, err: %w", err)
|
||||
}
|
||||
|
||||
if err = s.pendingTransactionOrm.InsertPendingTransaction(s.ctx, contextID, s.getSenderMeta(), tx, blockNumber); err != nil {
|
||||
// Insert the transaction into the pending transaction table.
|
||||
// A corner case is that the transaction is inserted into the table but not sent to the chain, because the server is stopped in the middle.
|
||||
// This case will be handled by the checkPendingTransaction function.
|
||||
if err = s.pendingTransactionOrm.InsertPendingTransaction(s.ctx, contextID, s.getSenderMeta(), signedTx, blockNumber); err != nil {
|
||||
log.Error("failed to insert transaction", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "err", err)
|
||||
return common.Hash{}, fmt.Errorf("failed to insert transaction, err: %w", err)
|
||||
}
|
||||
return tx.Hash(), nil
|
||||
|
||||
if err := s.client.SendTransaction(s.ctx, signedTx); err != nil {
|
||||
// Delete the transaction from the pending transaction table if it fails to send.
|
||||
if updateErr := s.pendingTransactionOrm.DeleteTransactionByTxHash(s.ctx, signedTx.Hash()); updateErr != nil {
|
||||
log.Error("failed to delete transaction", "tx hash", signedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", signedTx.Nonce(), "err", updateErr)
|
||||
return common.Hash{}, fmt.Errorf("failed to delete transaction, err: %w", updateErr)
|
||||
}
|
||||
|
||||
log.Error("failed to send tx", "tx hash", signedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", signedTx.Nonce(), "err", err)
|
||||
// Check if contain nonce, and reset nonce
|
||||
// only reset nonce when it is not from resubmit
|
||||
if strings.Contains(err.Error(), "nonce too low") {
|
||||
s.resetNonce(context.Background())
|
||||
}
|
||||
return common.Hash{}, fmt.Errorf("failed to send transaction, err: %w", err)
|
||||
}
|
||||
|
||||
s.transactionSigner.SetNonce(signedTx.Nonce() + 1)
|
||||
|
||||
return signedTx.Hash(), nil
|
||||
}
|
||||
|
||||
func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, overrideNonce *uint64) (*gethTypes.Transaction, error) {
|
||||
var (
|
||||
nonce = s.transactionSigner.GetNonce()
|
||||
txData gethTypes.TxData
|
||||
)
|
||||
|
||||
// this is a resubmit call, override the nonce
|
||||
if overrideNonce != nil {
|
||||
nonce = *overrideNonce
|
||||
}
|
||||
func (s *Sender) createTx(feeData *FeeData, target *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, nonce uint64) (*gethTypes.Transaction, error) {
|
||||
var txData gethTypes.TxData
|
||||
|
||||
switch s.config.TxType {
|
||||
case LegacyTxType:
|
||||
@@ -292,16 +306,6 @@ func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, data
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = s.client.SendTransaction(s.ctx, signedTx); err != nil {
|
||||
log.Error("failed to send tx", "tx hash", signedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", signedTx.Nonce(), "err", err)
|
||||
// Check if contain nonce, and reset nonce
|
||||
// only reset nonce when it is not from resubmit
|
||||
if strings.Contains(err.Error(), "nonce too low") && overrideNonce == nil {
|
||||
s.resetNonce(context.Background())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if feeData.gasTipCap != nil {
|
||||
s.metrics.currentGasTipCap.WithLabelValues(s.service, s.name).Set(float64(feeData.gasTipCap.Uint64()))
|
||||
}
|
||||
@@ -320,10 +324,6 @@ func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, data
|
||||
|
||||
s.metrics.currentGasLimit.WithLabelValues(s.service, s.name).Set(float64(feeData.gasLimit))
|
||||
|
||||
// update nonce when it is not from resubmit
|
||||
if overrideNonce == nil {
|
||||
s.transactionSigner.SetNonce(nonce + 1)
|
||||
}
|
||||
return signedTx, nil
|
||||
}
|
||||
|
||||
@@ -337,7 +337,7 @@ func (s *Sender) resetNonce(ctx context.Context) {
|
||||
s.transactionSigner.SetNonce(nonce)
|
||||
}
|
||||
|
||||
func (s *Sender) resubmitTransaction(tx *gethTypes.Transaction, baseFee, blobBaseFee uint64) (*gethTypes.Transaction, error) {
|
||||
func (s *Sender) createReplacingTransaction(tx *gethTypes.Transaction, baseFee, blobBaseFee uint64) (*gethTypes.Transaction, error) {
|
||||
escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum)
|
||||
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
|
||||
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
|
||||
@@ -357,6 +357,10 @@ func (s *Sender) resubmitTransaction(tx *gethTypes.Transaction, baseFee, blobBas
|
||||
originalGasPrice := tx.GasPrice()
|
||||
gasPrice := new(big.Int).Mul(originalGasPrice, escalateMultipleNum)
|
||||
gasPrice = new(big.Int).Div(gasPrice, escalateMultipleDen)
|
||||
baseFeeInt := new(big.Int).SetUint64(baseFee)
|
||||
if gasPrice.Cmp(baseFeeInt) < 0 {
|
||||
gasPrice = baseFeeInt
|
||||
}
|
||||
if gasPrice.Cmp(maxGasPrice) > 0 {
|
||||
gasPrice = maxGasPrice
|
||||
}
|
||||
@@ -449,6 +453,15 @@ func (s *Sender) resubmitTransaction(tx *gethTypes.Transaction, baseFee, blobBas
|
||||
blobGasFeeCap = maxBlobGasPrice
|
||||
}
|
||||
|
||||
// Check if any fee cap is less than double
|
||||
doubledTipCap := new(big.Int).Mul(originalGasTipCap, big.NewInt(2))
|
||||
doubledFeeCap := new(big.Int).Mul(originalGasFeeCap, big.NewInt(2))
|
||||
doubledBlobFeeCap := new(big.Int).Mul(originalBlobGasFeeCap, big.NewInt(2))
|
||||
if gasTipCap.Cmp(doubledTipCap) < 0 || gasFeeCap.Cmp(doubledFeeCap) < 0 || blobGasFeeCap.Cmp(doubledBlobFeeCap) < 0 {
|
||||
log.Error("gas fees must be at least double", "originalTipCap", originalGasTipCap, "currentTipCap", gasTipCap, "requiredTipCap", doubledTipCap, "originalFeeCap", originalGasFeeCap, "currentFeeCap", gasFeeCap, "requiredFeeCap", doubledFeeCap, "originalBlobFeeCap", originalBlobGasFeeCap, "currentBlobFeeCap", blobGasFeeCap, "requiredBlobFeeCap", doubledBlobFeeCap)
|
||||
return nil, errors.New("gas fees must be at least double")
|
||||
}
|
||||
|
||||
feeData.gasFeeCap = gasFeeCap
|
||||
feeData.gasTipCap = gasTipCap
|
||||
feeData.blobGasFeeCap = blobGasFeeCap
|
||||
@@ -468,12 +481,12 @@ func (s *Sender) resubmitTransaction(tx *gethTypes.Transaction, baseFee, blobBas
|
||||
|
||||
nonce := tx.Nonce()
|
||||
s.metrics.resubmitTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
tx, err := s.createAndSendTx(&feeData, tx.To(), tx.Data(), tx.BlobTxSidecar(), &nonce)
|
||||
signedTx, err := s.createTx(&feeData, tx.To(), tx.Data(), tx.BlobTxSidecar(), nonce)
|
||||
if err != nil {
|
||||
log.Error("failed to create and send tx (resubmit case)", "from", s.transactionSigner.GetAddr().String(), "nonce", nonce, "err", err)
|
||||
log.Error("failed to create signed tx (resubmit case)", "from", s.transactionSigner.GetAddr().String(), "nonce", nonce, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
return tx, nil
|
||||
return signedTx, nil
|
||||
}
|
||||
|
||||
// checkPendingTransaction checks the confirmation status of pending transactions against the latest confirmed block number.
|
||||
@@ -500,30 +513,29 @@ func (s *Sender) checkPendingTransaction() {
|
||||
}
|
||||
|
||||
for _, txnToCheck := range transactionsToCheck {
|
||||
tx := new(gethTypes.Transaction)
|
||||
if err := tx.DecodeRLP(rlp.NewStream(bytes.NewReader(txnToCheck.RLPEncoding), 0)); err != nil {
|
||||
originalTx := new(gethTypes.Transaction)
|
||||
if err := originalTx.DecodeRLP(rlp.NewStream(bytes.NewReader(txnToCheck.RLPEncoding), 0)); err != nil {
|
||||
log.Error("failed to decode RLP", "context ID", txnToCheck.ContextID, "sender meta", s.getSenderMeta(), "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
receipt, err := s.client.TransactionReceipt(s.ctx, tx.Hash())
|
||||
receipt, err := s.client.TransactionReceipt(s.ctx, originalTx.Hash())
|
||||
if err == nil { // tx confirmed.
|
||||
if receipt.BlockNumber.Uint64() <= confirmed {
|
||||
err := s.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
if dbTxErr := s.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
// Update the status of the transaction to TxStatusConfirmed.
|
||||
if err := s.pendingTransactionOrm.UpdatePendingTransactionStatusByTxHash(s.ctx, tx.Hash(), types.TxStatusConfirmed, dbTX); err != nil {
|
||||
log.Error("failed to update transaction status by tx hash", "hash", tx.Hash().String(), "sender meta", s.getSenderMeta(), "from", s.transactionSigner.GetAddr().String(), "nonce", tx.Nonce(), "err", err)
|
||||
return err
|
||||
if updateErr := s.pendingTransactionOrm.UpdateTransactionStatusByTxHash(s.ctx, originalTx.Hash(), types.TxStatusConfirmed, dbTX); updateErr != nil {
|
||||
log.Error("failed to update transaction status by tx hash", "hash", originalTx.Hash().String(), "sender meta", s.getSenderMeta(), "from", s.transactionSigner.GetAddr().String(), "nonce", originalTx.Nonce(), "err", updateErr)
|
||||
return updateErr
|
||||
}
|
||||
// Update other transactions with the same nonce and sender address as failed.
|
||||
if err := s.pendingTransactionOrm.UpdateOtherTransactionsAsFailedByNonce(s.ctx, txnToCheck.SenderAddress, tx.Nonce(), tx.Hash(), dbTX); err != nil {
|
||||
log.Error("failed to update other transactions as failed by nonce", "senderAddress", txnToCheck.SenderAddress, "nonce", tx.Nonce(), "excludedTxHash", tx.Hash(), "err", err)
|
||||
return err
|
||||
if updateErr := s.pendingTransactionOrm.UpdateOtherTransactionsAsFailedByNonce(s.ctx, txnToCheck.SenderAddress, originalTx.Nonce(), originalTx.Hash(), dbTX); updateErr != nil {
|
||||
log.Error("failed to update other transactions as failed by nonce", "senderAddress", txnToCheck.SenderAddress, "nonce", originalTx.Nonce(), "excludedTxHash", originalTx.Hash(), "err", updateErr)
|
||||
return updateErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("db transaction failed after receiving confirmation", "err", err)
|
||||
}); dbTxErr != nil {
|
||||
log.Error("db transaction failed after receiving confirmation", "err", dbTxErr)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -531,7 +543,7 @@ func (s *Sender) checkPendingTransaction() {
|
||||
s.confirmCh <- &Confirmation{
|
||||
ContextID: txnToCheck.ContextID,
|
||||
IsSuccessful: receipt.Status == gethTypes.ReceiptStatusSuccessful,
|
||||
TxHash: tx.Hash(),
|
||||
TxHash: originalTx.Hash(),
|
||||
SenderType: s.senderType,
|
||||
}
|
||||
}
|
||||
@@ -548,52 +560,77 @@ func (s *Sender) checkPendingTransaction() {
|
||||
|
||||
// early return if the previous transaction has not been confirmed yet.
|
||||
// currentNonce is already the confirmed nonce + 1.
|
||||
if tx.Nonce() > currentNonce {
|
||||
log.Debug("previous transaction not yet confirmed, skip bumping gas price", "address", txnToCheck.SenderAddress, "currentNonce", currentNonce, "txNonce", tx.Nonce())
|
||||
if originalTx.Nonce() > currentNonce {
|
||||
log.Debug("previous transaction not yet confirmed, skip bumping gas price", "address", txnToCheck.SenderAddress, "currentNonce", currentNonce, "txNonce", originalTx.Nonce())
|
||||
continue
|
||||
}
|
||||
|
||||
// It's possible that the pending transaction was marked as failed earlier in this loop (e.g., if one of its replacements has already been confirmed).
|
||||
// Therefore, we fetch the current transaction status again for accuracy before proceeding.
|
||||
status, err := s.pendingTransactionOrm.GetTxStatusByTxHash(s.ctx, tx.Hash())
|
||||
status, err := s.pendingTransactionOrm.GetTxStatusByTxHash(s.ctx, originalTx.Hash())
|
||||
if err != nil {
|
||||
log.Error("failed to get transaction status by tx hash", "hash", tx.Hash().String(), "err", err)
|
||||
log.Error("failed to get transaction status by tx hash", "hash", originalTx.Hash().String(), "err", err)
|
||||
return
|
||||
}
|
||||
if status == types.TxStatusConfirmedFailed {
|
||||
log.Warn("transaction already marked as failed, skipping resubmission", "hash", tx.Hash().String())
|
||||
log.Warn("transaction already marked as failed, skipping resubmission", "hash", originalTx.Hash().String())
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info("resubmit transaction",
|
||||
"service", s.service,
|
||||
"name", s.name,
|
||||
"hash", tx.Hash().String(),
|
||||
"hash", originalTx.Hash().String(),
|
||||
"from", s.transactionSigner.GetAddr().String(),
|
||||
"nonce", tx.Nonce(),
|
||||
"nonce", originalTx.Nonce(),
|
||||
"submitBlockNumber", txnToCheck.SubmitBlockNumber,
|
||||
"currentBlockNumber", blockNumber,
|
||||
"escalateBlocks", s.config.EscalateBlocks)
|
||||
|
||||
if newTx, err := s.resubmitTransaction(tx, baseFee, blobBaseFee); err != nil {
|
||||
newSignedTx, err := s.createReplacingTransaction(originalTx, baseFee, blobBaseFee)
|
||||
if err != nil {
|
||||
s.metrics.resubmitTransactionFailedTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
log.Error("failed to resubmit transaction", "context ID", txnToCheck.ContextID, "sender meta", s.getSenderMeta(), "from", s.transactionSigner.GetAddr().String(), "nonce", tx.Nonce(), "err", err)
|
||||
} else {
|
||||
err := s.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
// Update the status of the original transaction as replaced, while still checking its confirmation status.
|
||||
if err := s.pendingTransactionOrm.UpdatePendingTransactionStatusByTxHash(s.ctx, tx.Hash(), types.TxStatusReplaced, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to update status of transaction with hash %s to TxStatusReplaced, err: %w", tx.Hash().String(), err)
|
||||
log.Error("failed to resubmit transaction", "context ID", txnToCheck.ContextID, "sender meta", s.getSenderMeta(), "from", s.transactionSigner.GetAddr().String(), "nonce", originalTx.Nonce(), "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Update the status of the original transaction as replaced, while still checking its confirmation status.
|
||||
// Insert the new transaction that has replaced the original one, and set the status as pending.
|
||||
// A corner case is that the transaction is inserted into the table but not sent to the chain, because the server is stopped in the middle.
|
||||
// This case will be handled by the checkPendingTransaction function.
|
||||
if dbTxErr := s.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
if updateErr := s.pendingTransactionOrm.UpdateTransactionStatusByTxHash(s.ctx, originalTx.Hash(), types.TxStatusReplaced, dbTX); updateErr != nil {
|
||||
return fmt.Errorf("failed to update status of transaction with hash %s to TxStatusReplaced, err: %w", newSignedTx.Hash().String(), updateErr)
|
||||
}
|
||||
if updateErr := s.pendingTransactionOrm.InsertPendingTransaction(s.ctx, txnToCheck.ContextID, s.getSenderMeta(), newSignedTx, blockNumber, dbTX); updateErr != nil {
|
||||
return fmt.Errorf("failed to insert new pending transaction with context ID: %s, nonce: %d, hash: %v, previous block number: %v, current block number: %v, err: %w", txnToCheck.ContextID, newSignedTx.Nonce(), newSignedTx.Hash().String(), txnToCheck.SubmitBlockNumber, blockNumber, updateErr)
|
||||
}
|
||||
return nil
|
||||
}); dbTxErr != nil {
|
||||
log.Error("db transaction failed after resubmitting", "err", dbTxErr)
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.client.SendTransaction(s.ctx, newSignedTx); err != nil {
|
||||
// SendTransaction failed, need to rollback the previous database changes
|
||||
if rollbackErr := s.db.Transaction(func(tx *gorm.DB) error {
|
||||
// Restore original transaction status back to pending
|
||||
if updateErr := s.pendingTransactionOrm.UpdateTransactionStatusByTxHash(s.ctx, originalTx.Hash(), types.TxStatusPending, tx); updateErr != nil {
|
||||
return fmt.Errorf("failed to rollback status of original transaction, err: %w", updateErr)
|
||||
}
|
||||
// Record the new transaction that has replaced the original one.
|
||||
if err := s.pendingTransactionOrm.InsertPendingTransaction(s.ctx, txnToCheck.ContextID, s.getSenderMeta(), newTx, blockNumber, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to insert new pending transaction with context ID: %s, nonce: %d, hash: %v, previous block number: %v, current block number: %v, err: %w", txnToCheck.ContextID, newTx.Nonce(), newTx.Hash().String(), txnToCheck.SubmitBlockNumber, blockNumber, err)
|
||||
// Delete the new transaction that was inserted
|
||||
if updateErr := s.pendingTransactionOrm.DeleteTransactionByTxHash(s.ctx, newSignedTx.Hash(), tx); updateErr != nil {
|
||||
return fmt.Errorf("failed to delete new transaction, err: %w", updateErr)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("db transaction failed after resubmitting", "err", err)
|
||||
}); rollbackErr != nil {
|
||||
// Both SendTransaction and rollback failed
|
||||
log.Error("failed to rollback database after SendTransaction failed", "tx hash", newSignedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", newSignedTx.Nonce(), "sendTxErr", err, "rollbackErr", rollbackErr)
|
||||
return
|
||||
}
|
||||
|
||||
log.Error("failed to send replacing tx", "tx hash", newSignedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", newSignedTx.Nonce(), "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -626,7 +663,7 @@ func (s *Sender) getSenderMeta() *orm.SenderMeta {
|
||||
}
|
||||
|
||||
func (s *Sender) getBlockNumberAndBaseFeeAndBlobFee(ctx context.Context) (uint64, uint64, uint64, error) {
|
||||
header, err := s.client.HeaderByNumber(ctx, nil)
|
||||
header, err := s.client.HeaderByNumber(ctx, big.NewInt(rpc.PendingBlockNumber.Int64()))
|
||||
if err != nil {
|
||||
return 0, 0, 0, fmt.Errorf("failed to get header by number, err: %w", err)
|
||||
}
|
||||
@@ -637,11 +674,11 @@ func (s *Sender) getBlockNumberAndBaseFeeAndBlobFee(ctx context.Context) (uint64
|
||||
}
|
||||
|
||||
var blobBaseFee uint64
|
||||
if header.ExcessBlobGas != nil && header.BlobGasUsed != nil {
|
||||
parentExcessBlobGas := misc.CalcExcessBlobGas(*header.ExcessBlobGas, *header.BlobGasUsed)
|
||||
blobBaseFee = misc.CalcBlobFee(parentExcessBlobGas).Uint64()
|
||||
if excess := header.ExcessBlobGas; excess != nil {
|
||||
blobBaseFee = misc.CalcBlobFee(*excess).Uint64()
|
||||
}
|
||||
return header.Number.Uint64(), baseFee, blobBaseFee, nil
|
||||
// header.Number.Uint64() returns the pendingBlockNumber, so we minus 1 to get the latestBlockNumber.
|
||||
return header.Number.Uint64() - 1, baseFee, blobBaseFee, nil
|
||||
}
|
||||
|
||||
func makeSidecar(blob *kzg4844.Blob) (*gethTypes.BlobTxSidecar, error) {
|
||||
|
||||
@@ -282,13 +282,17 @@ func testResubmitZeroGasPriceTransaction(t *testing.T) {
|
||||
gasFeeCap: big.NewInt(0),
|
||||
gasLimit: 50000,
|
||||
}
|
||||
tx, err := s.createAndSendTx(feeData, &common.Address{}, nil, nil, nil)
|
||||
tx, err := s.createTx(feeData, &common.Address{}, nil, nil, s.transactionSigner.GetNonce())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, tx)
|
||||
err = s.client.SendTransaction(s.ctx, tx)
|
||||
assert.NoError(t, err)
|
||||
// Increase at least 1 wei in gas price, gas tip cap and gas fee cap.
|
||||
// Bumping the fees enough times to let the transaction be included in a block.
|
||||
for i := 0; i < 30; i++ {
|
||||
tx, err = s.resubmitTransaction(tx, 0, 0)
|
||||
tx, err = s.createReplacingTransaction(tx, 0, 0)
|
||||
assert.NoError(t, err)
|
||||
err = s.client.SendTransaction(s.ctx, tx)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -369,10 +373,14 @@ func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
|
||||
sidecar, err = makeSidecar(txBlob[i])
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
tx, err := s.createAndSendTx(feeData, &common.Address{}, nil, sidecar, nil)
|
||||
tx, err := s.createTx(feeData, &common.Address{}, nil, sidecar, s.transactionSigner.GetNonce())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, tx)
|
||||
resubmittedTx, err := s.resubmitTransaction(tx, 0, 0)
|
||||
err = s.client.SendTransaction(s.ctx, tx)
|
||||
assert.NoError(t, err)
|
||||
resubmittedTx, err := s.createReplacingTransaction(tx, 0, 0)
|
||||
assert.NoError(t, err)
|
||||
err = s.client.SendTransaction(s.ctx, resubmittedTx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
@@ -412,10 +420,14 @@ func testResubmitUnderpricedTransaction(t *testing.T) {
|
||||
gasFeeCap: big.NewInt(1000000000),
|
||||
gasLimit: 50000,
|
||||
}
|
||||
tx, err := s.createAndSendTx(feeData, &common.Address{}, nil, nil, nil)
|
||||
tx, err := s.createTx(feeData, &common.Address{}, nil, nil, s.transactionSigner.GetNonce())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, tx)
|
||||
_, err = s.resubmitTransaction(tx, 0, 0)
|
||||
err = s.client.SendTransaction(s.ctx, tx)
|
||||
assert.NoError(t, err)
|
||||
resubmittedTx, err := s.createReplacingTransaction(tx, 0, 0)
|
||||
assert.NoError(t, err)
|
||||
err = s.client.SendTransaction(s.ctx, resubmittedTx)
|
||||
assert.Error(t, err, "replacement transaction underpriced")
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
@@ -462,7 +474,9 @@ func testResubmitDynamicFeeTransactionWithRisingBaseFee(t *testing.T) {
|
||||
// bump the basefee by 10x
|
||||
baseFeePerGas *= 10
|
||||
// resubmit and check that the gas fee has been adjusted accordingly
|
||||
newTx, err := s.resubmitTransaction(tx, baseFeePerGas, 0)
|
||||
resubmittedTx, err := s.createReplacingTransaction(tx, baseFeePerGas, 0)
|
||||
assert.NoError(t, err)
|
||||
err = s.client.SendTransaction(s.ctx, resubmittedTx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
|
||||
@@ -471,7 +485,7 @@ func testResubmitDynamicFeeTransactionWithRisingBaseFee(t *testing.T) {
|
||||
expectedGasFeeCap = maxGasPrice
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedGasFeeCap.Uint64(), newTx.GasFeeCap().Uint64())
|
||||
assert.Equal(t, expectedGasFeeCap.Uint64(), resubmittedTx.GasFeeCap().Uint64())
|
||||
s.Stop()
|
||||
}
|
||||
|
||||
@@ -511,7 +525,9 @@ func testResubmitBlobTransactionWithRisingBaseFeeAndBlobBaseFee(t *testing.T) {
|
||||
baseFeePerGas *= 10
|
||||
blobBaseFeePerGas *= 10
|
||||
// resubmit and check that the gas fee has been adjusted accordingly
|
||||
newTx, err := s.resubmitTransaction(tx, baseFeePerGas, blobBaseFeePerGas)
|
||||
resubmittedTx, err := s.createReplacingTransaction(tx, baseFeePerGas, blobBaseFeePerGas)
|
||||
assert.NoError(t, err)
|
||||
err = s.client.SendTransaction(s.ctx, resubmittedTx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
|
||||
@@ -526,8 +542,8 @@ func testResubmitBlobTransactionWithRisingBaseFeeAndBlobBaseFee(t *testing.T) {
|
||||
expectedBlobGasFeeCap = maxBlobGasPrice
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedGasFeeCap.Uint64(), newTx.GasFeeCap().Uint64())
|
||||
assert.Equal(t, expectedBlobGasFeeCap.Uint64(), newTx.BlobGasFeeCap().Uint64())
|
||||
assert.Equal(t, expectedGasFeeCap.Uint64(), resubmittedTx.GasFeeCap().Uint64())
|
||||
assert.Equal(t, expectedBlobGasFeeCap.Uint64(), resubmittedTx.BlobGasFeeCap().Uint64())
|
||||
s.Stop()
|
||||
}
|
||||
|
||||
|
||||
@@ -17,10 +17,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// PrivateKeySignerType
|
||||
// PrivateKeySignerType is the type of signer that uses a private key to sign transactions
|
||||
PrivateKeySignerType = "PrivateKey"
|
||||
|
||||
// RemoteSignerType
|
||||
// RemoteSignerType is the type of signer that uses a remote signer to sign transactions
|
||||
RemoteSignerType = "RemoteSigner"
|
||||
)
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package watcher
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -13,8 +14,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
@@ -35,7 +34,8 @@ type BatchProposer struct {
|
||||
gasCostIncreaseMultiplier float64
|
||||
maxUncompressedBatchBytesSize uint64
|
||||
|
||||
chainCfg *params.ChainConfig
|
||||
minCodecVersion encoding.CodecVersion
|
||||
chainCfg *params.ChainConfig
|
||||
|
||||
batchProposerCircleTotal prometheus.Counter
|
||||
proposeBatchFailureTotal prometheus.Counter
|
||||
@@ -59,7 +59,7 @@ type BatchProposer struct {
|
||||
}
|
||||
|
||||
// NewBatchProposer creates a new BatchProposer instance.
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
|
||||
log.Info("new batch proposer",
|
||||
"maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch,
|
||||
"maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
@@ -79,6 +79,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
|
||||
batchTimeoutSec: cfg.BatchTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
|
||||
minCodecVersion: minCodecVersion,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
@@ -162,13 +163,9 @@ func (p *BatchProposer) TryProposeBatch() {
|
||||
|
||||
func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion encoding.CodecVersion, metrics *utils.BatchMetrics) error {
|
||||
compatibilityBreachOccurred := false
|
||||
codecConfig := utils.CodecConfig{
|
||||
Version: codecVersion,
|
||||
EnableCompress: true, // codecv4 is the only version that supports conditional compression, default to enable compression
|
||||
}
|
||||
|
||||
for {
|
||||
compatible, err := utils.CheckBatchCompressedDataCompatibility(batch, codecVersion)
|
||||
compatible, err := encoding.CheckBatchCompressedDataCompatibility(batch, codecVersion)
|
||||
if err != nil {
|
||||
log.Error("Failed to check batch compressed data compatibility", "batch index", batch.Index, "codecVersion", codecVersion, "err", err)
|
||||
return err
|
||||
@@ -183,7 +180,6 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
|
||||
if len(batch.Chunks) == 1 {
|
||||
log.Warn("Disable compression: cannot truncate batch with only 1 chunk for compatibility", "start block number", batch.Chunks[0].Blocks[0].Header.Number.Uint64(),
|
||||
"end block number", batch.Chunks[0].Blocks[len(batch.Chunks[0].Blocks)-1].Header.Number.Uint64())
|
||||
codecConfig.EnableCompress = false
|
||||
break
|
||||
}
|
||||
|
||||
@@ -197,7 +193,7 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
|
||||
|
||||
// recalculate batch metrics after truncation
|
||||
var calcErr error
|
||||
metrics, calcErr = utils.CalculateBatchMetrics(batch, codecConfig)
|
||||
metrics, calcErr = utils.CalculateBatchMetrics(batch, codecVersion)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate batch metrics, batch index: %v, error: %w", batch.Index, calcErr)
|
||||
}
|
||||
@@ -214,15 +210,15 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
|
||||
|
||||
var totalGasUsed uint64
|
||||
for _, chunk := range batch.Chunks {
|
||||
totalGasUsed += chunk.L2GasUsed()
|
||||
totalGasUsed += chunk.TotalGasUsed()
|
||||
}
|
||||
p.batchProposeThroughput.Add(float64(totalGasUsed))
|
||||
|
||||
p.proposeBatchUpdateInfoTotal.Inc()
|
||||
err := p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
dbBatch, dbErr := p.batchOrm.InsertBatch(p.ctx, batch, codecConfig, *metrics, dbTX)
|
||||
dbBatch, dbErr := p.batchOrm.InsertBatch(p.ctx, batch, codecVersion, *metrics, dbTX)
|
||||
if dbErr != nil {
|
||||
log.Warn("BatchProposer.updateBatchInfoInDB insert batch failure", "index", batch.Index, "parent hash", batch.ParentBatchHash.Hex(), "codec version", codecVersion, "enable compress", codecConfig.EnableCompress, "error", dbErr)
|
||||
log.Warn("BatchProposer.updateDBBatchInfo insert batch failure", "index", batch.Index, "parent hash", batch.ParentBatchHash.Hex(), "codec version", codecVersion, "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
if dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex, dbBatch.Hash, dbTX); dbErr != nil {
|
||||
@@ -249,10 +245,19 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
return err
|
||||
}
|
||||
|
||||
maxChunksThisBatch := forks.GetMaxChunksPerBatch(p.chainCfg, firstUnbatchedChunk.StartBlockNumber, firstUnbatchedChunk.StartBlockTime)
|
||||
codec := encoding.CodecFromConfig(p.chainCfg, new(big.Int).SetUint64(firstUnbatchedChunk.StartBlockNumber), firstUnbatchedChunk.StartBlockTime)
|
||||
if codec == nil {
|
||||
return fmt.Errorf("failed to retrieve codec for block number %v and time %v", firstUnbatchedChunk.StartBlockNumber, firstUnbatchedChunk.StartBlockTime)
|
||||
}
|
||||
|
||||
if codec.Version() < p.minCodecVersion {
|
||||
return fmt.Errorf("unsupported codec version: %v, expected at least %v", codec.Version(), p.minCodecVersion)
|
||||
}
|
||||
|
||||
maxChunksThisBatch := codec.MaxNumChunksPerBatch()
|
||||
|
||||
// select at most maxChunkNumPerBatch chunks
|
||||
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, firstUnbatchedChunkIndex, int(maxChunksThisBatch))
|
||||
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, firstUnbatchedChunkIndex, maxChunksThisBatch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -263,12 +268,12 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
|
||||
// Ensure all chunks in the same batch use the same hardfork name
|
||||
// If a different hardfork name is found, truncate the chunks slice at that point
|
||||
hardforkName := forks.GetHardforkName(p.chainCfg, dbChunks[0].StartBlockNumber, dbChunks[0].StartBlockTime)
|
||||
hardforkName := encoding.GetHardforkName(p.chainCfg, dbChunks[0].StartBlockNumber, dbChunks[0].StartBlockTime)
|
||||
for i := 1; i < len(dbChunks); i++ {
|
||||
currentHardfork := forks.GetHardforkName(p.chainCfg, dbChunks[i].StartBlockNumber, dbChunks[i].StartBlockTime)
|
||||
currentHardfork := encoding.GetHardforkName(p.chainCfg, dbChunks[i].StartBlockNumber, dbChunks[i].StartBlockTime)
|
||||
if currentHardfork != hardforkName {
|
||||
dbChunks = dbChunks[:i]
|
||||
maxChunksThisBatch = uint64(len(dbChunks)) // update maxChunksThisBatch to trigger batching, because these chunks are the last chunks before the hardfork
|
||||
maxChunksThisBatch = len(dbChunks) // update maxChunksThisBatch to trigger batching, because these chunks are the last chunks before the hardfork
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -283,11 +288,6 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
return err
|
||||
}
|
||||
|
||||
codecConfig := utils.CodecConfig{
|
||||
Version: forks.GetCodecVersion(p.chainCfg, firstUnbatchedChunk.StartBlockNumber, firstUnbatchedChunk.StartBlockTime),
|
||||
EnableCompress: true, // codecv4 is the only version that supports conditional compression, default to enable compression
|
||||
}
|
||||
|
||||
var batch encoding.Batch
|
||||
batch.Index = dbParentBatch.Index + 1
|
||||
batch.ParentBatchHash = common.HexToHash(dbParentBatch.Hash)
|
||||
@@ -295,7 +295,7 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
|
||||
for i, chunk := range daChunks {
|
||||
batch.Chunks = append(batch.Chunks, chunk)
|
||||
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codecConfig)
|
||||
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version())
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
|
||||
}
|
||||
@@ -324,22 +324,22 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
|
||||
batch.Chunks = batch.Chunks[:len(batch.Chunks)-1]
|
||||
|
||||
metrics, err := utils.CalculateBatchMetrics(&batch, codecConfig)
|
||||
metrics, err := utils.CalculateBatchMetrics(&batch, codec.Version())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate batch metrics: %w", err)
|
||||
}
|
||||
|
||||
p.recordAllBatchMetrics(metrics)
|
||||
return p.updateDBBatchInfo(&batch, codecConfig.Version, metrics)
|
||||
return p.updateDBBatchInfo(&batch, codec.Version(), metrics)
|
||||
}
|
||||
}
|
||||
|
||||
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codecConfig)
|
||||
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version())
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
|
||||
}
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if metrics.FirstBlockTimestamp+p.batchTimeoutSec < currentTimeSec || metrics.NumChunks == maxChunksThisBatch {
|
||||
if metrics.FirstBlockTimestamp+p.batchTimeoutSec < currentTimeSec || metrics.NumChunks == uint64(maxChunksThisBatch) {
|
||||
log.Info("reached maximum number of chunks in batch or first block timeout",
|
||||
"chunk count", metrics.NumChunks,
|
||||
"start block number", dbChunks[0].StartBlockNumber,
|
||||
@@ -348,7 +348,7 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
|
||||
p.batchFirstBlockTimeoutReached.Inc()
|
||||
p.recordAllBatchMetrics(metrics)
|
||||
return p.updateDBBatchInfo(&batch, codecConfig.Version, metrics)
|
||||
return p.updateDBBatchInfo(&batch, codec.Version(), metrics)
|
||||
}
|
||||
|
||||
log.Debug("pending chunks do not reach one of the constraints or contain a timeout block")
|
||||
|
||||
@@ -20,430 +20,7 @@ import (
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
func testBatchProposerCodecv0Limits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxL1CommitGas uint64
|
||||
maxL1CommitCalldataSize uint64
|
||||
batchTimeoutSec uint64
|
||||
expectedBatchesLen int
|
||||
expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 0,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 2,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerBatchIs0",
|
||||
maxL1CommitGas: 0,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerBatchIs0",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 0,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerBatchIsFirstChunk",
|
||||
maxL1CommitGas: 200000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerBatchIsFirstChunk",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 298,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6042), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(298), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(94618), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(5737), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: tt.maxL1CommitGas,
|
||||
MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize,
|
||||
BatchTimeoutSec: tt.batchTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, tt.expectedBatchesLen+1)
|
||||
batches = batches[1:]
|
||||
if tt.expectedBatchesLen > 0 {
|
||||
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch))
|
||||
for _, chunk := range dbChunks {
|
||||
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchProposerCodecv1Limits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxL1CommitGas uint64
|
||||
maxL1CommitCalldataSize uint64
|
||||
batchTimeoutSec uint64
|
||||
expectedBatchesLen int
|
||||
expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 0,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 2,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerBatchIs0",
|
||||
maxL1CommitGas: 0,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerBatchIs0",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 0,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerBatchIsFirstChunk",
|
||||
maxL1CommitGas: 190330,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerBatchIsFirstChunk",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 60,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: tt.maxL1CommitGas,
|
||||
MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize,
|
||||
BatchTimeoutSec: tt.batchTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, tt.expectedBatchesLen+1)
|
||||
batches = batches[1:]
|
||||
if tt.expectedBatchesLen > 0 {
|
||||
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch))
|
||||
for _, chunk := range dbChunks {
|
||||
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchProposerCodecv2Limits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxL1CommitGas uint64
|
||||
maxL1CommitCalldataSize uint64
|
||||
batchTimeoutSec uint64
|
||||
expectedBatchesLen int
|
||||
expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 0,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 2,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerBatchIs0",
|
||||
maxL1CommitGas: 0,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerBatchIs0",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 0,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerBatchIsFirstChunk",
|
||||
maxL1CommitGas: 189179,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerBatchIsFirstChunk",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 60,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: tt.maxL1CommitGas,
|
||||
MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize,
|
||||
BatchTimeoutSec: tt.batchTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, tt.expectedBatchesLen+1)
|
||||
batches = batches[1:]
|
||||
if tt.expectedBatchesLen > 0 {
|
||||
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch))
|
||||
for _, chunk := range dbChunks {
|
||||
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchProposerCodecv3Limits(t *testing.T) {
|
||||
func testBatchProposerLimitsCodecV4(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxL1CommitGas uint64
|
||||
@@ -515,7 +92,7 @@ func testBatchProposerCodecv3Limits(t *testing.T) {
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{})
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
@@ -524,7 +101,7 @@ func testBatchProposerCodecv3Limits(t *testing.T) {
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
@@ -540,10 +117,12 @@ func testBatchProposerCodecv3Limits(t *testing.T) {
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{
|
||||
}, encoding.CodecV4, ¶ms.ChainConfig{
|
||||
LondonBlock: big.NewInt(0),
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
DarwinTime: new(uint64),
|
||||
DarwinV2Time: new(uint64),
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
@@ -561,10 +140,12 @@ func testBatchProposerCodecv3Limits(t *testing.T) {
|
||||
BatchTimeoutSec: tt.batchTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{
|
||||
}, encoding.CodecV4, ¶ms.ChainConfig{
|
||||
LondonBlock: big.NewInt(0),
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
DarwinTime: new(uint64),
|
||||
DarwinV2Time: new(uint64),
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
@@ -590,7 +171,7 @@ func testBatchProposerCodecv3Limits(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchCommitGasAndCalldataSizeCodecv0Estimation(t *testing.T) {
|
||||
func testBatchCommitGasAndCalldataSizeEstimationCodecV4(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
@@ -605,7 +186,7 @@ func testBatchCommitGasAndCalldataSizeCodecv0Estimation(t *testing.T) {
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{})
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
@@ -614,165 +195,7 @@ func testBatchCommitGasAndCalldataSizeCodecv0Estimation(t *testing.T) {
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6042), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(298), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(94618), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(5737), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 2)
|
||||
batches = batches[1:]
|
||||
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, uint64(2), batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunks, 2)
|
||||
for _, chunk := range dbChunks {
|
||||
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
|
||||
}
|
||||
|
||||
assert.Equal(t, uint64(258383), batches[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(6035), batches[0].TotalL1CommitCalldataSize)
|
||||
}
|
||||
|
||||
func testBatchCommitGasAndCalldataSizeCodecv1Estimation(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 2)
|
||||
batches = batches[1:]
|
||||
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, uint64(2), batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunks, 2)
|
||||
for _, chunk := range dbChunks {
|
||||
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
|
||||
}
|
||||
|
||||
assert.Equal(t, uint64(159350), batches[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize)
|
||||
}
|
||||
|
||||
func testBatchCommitGasAndCalldataSizeCodecv2Estimation(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
@@ -788,88 +211,7 @@ func testBatchCommitGasAndCalldataSizeCodecv2Estimation(t *testing.T) {
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 2)
|
||||
batches = batches[1:]
|
||||
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, uint64(2), batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunks, 2)
|
||||
for _, chunk := range dbChunks {
|
||||
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
|
||||
}
|
||||
|
||||
assert.Equal(t, uint64(159350), batches[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize)
|
||||
}
|
||||
|
||||
func testBatchCommitGasAndCalldataSizeCodecv3Estimation(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}, db, nil)
|
||||
}, encoding.CodecV4, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
@@ -886,7 +228,7 @@ func testBatchCommitGasAndCalldataSizeCodecv3Estimation(t *testing.T) {
|
||||
BatchTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}, db, nil)
|
||||
}, encoding.CodecV4, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
@@ -910,8 +252,8 @@ func testBatchCommitGasAndCalldataSizeCodecv3Estimation(t *testing.T) {
|
||||
assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize)
|
||||
}
|
||||
|
||||
func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3}
|
||||
func testBatchProposerBlobSizeLimitCodecV4(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupDB(t)
|
||||
|
||||
@@ -926,7 +268,7 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{})
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
@@ -935,18 +277,14 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV0 { // will never hit blob size limit
|
||||
chainConfig = ¶ms.ChainConfig{}
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
} else {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
@@ -958,7 +296,7 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
ChunkTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
blockHeight := int64(0)
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
@@ -979,7 +317,7 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
BatchTimeoutSec: math.MaxUint32,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
bp.TryProposeBatch()
|
||||
@@ -991,18 +329,11 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
|
||||
var expectedNumBatches int
|
||||
var numChunksMultiplier uint64
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
expectedNumBatches = 2
|
||||
numChunksMultiplier = 15
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
expectedNumBatches = 2
|
||||
numChunksMultiplier = 1
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
expectedNumBatches = 2
|
||||
numChunksMultiplier = 45
|
||||
} else {
|
||||
expectedNumBatches = 2
|
||||
numChunksMultiplier = 45
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
assert.Len(t, batches, expectedNumBatches)
|
||||
|
||||
@@ -1013,8 +344,8 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3}
|
||||
func testBatchProposerMaxChunkNumPerBatchLimitCodecV4(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupDB(t)
|
||||
|
||||
@@ -1029,7 +360,7 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) {
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{})
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
@@ -1038,23 +369,16 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) {
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var expectedChunkNum uint64
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
chainConfig = ¶ms.ChainConfig{}
|
||||
expectedChunkNum = 15
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}
|
||||
expectedChunkNum = 15
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
expectedChunkNum = 45
|
||||
} else {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}
|
||||
expectedChunkNum = 45
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
@@ -1066,7 +390,7 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) {
|
||||
ChunkTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
for blockHeight := int64(1); blockHeight <= 60; blockHeight++ {
|
||||
@@ -1082,7 +406,7 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) {
|
||||
BatchTimeoutSec: math.MaxUint32,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
@@ -1095,85 +419,3 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) {
|
||||
database.CloseDB(db)
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchProposerRespectHardforks(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
chainConfig := ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(1),
|
||||
CurieBlock: big.NewInt(2),
|
||||
DarwinTime: func() *uint64 { t := uint64(4); return &t }(),
|
||||
}
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
for i := int64(1); i <= 60; i++ {
|
||||
block.Header.Number = big.NewInt(i)
|
||||
block.Header.Time = uint64(i)
|
||||
err = orm.NewL2Block(db).InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
|
||||
BatchTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
bp.TryProposeBatch()
|
||||
}
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 4)
|
||||
|
||||
expectedEndChunkIndices := []uint64{0, 1, 3, 4}
|
||||
expectedEndBlockNumbers := []uint64{0, 1, 3, 60}
|
||||
for i, batch := range batches {
|
||||
assert.Equal(t, expectedEndChunkIndices[i], batch.EndChunkIndex)
|
||||
chunk, err := chunkOrm.GetChunkByIndex(context.Background(), batch.EndChunkIndex)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedEndBlockNumbers[i], chunk.EndBlockNumber)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package watcher
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -12,8 +13,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
@@ -30,7 +29,8 @@ type BundleProposer struct {
|
||||
maxBatchNumPerBundle uint64
|
||||
bundleTimeoutSec uint64
|
||||
|
||||
chainCfg *params.ChainConfig
|
||||
minCodecVersion encoding.CodecVersion
|
||||
chainCfg *params.ChainConfig
|
||||
|
||||
bundleProposerCircleTotal prometheus.Counter
|
||||
proposeBundleFailureTotal prometheus.Counter
|
||||
@@ -42,7 +42,7 @@ type BundleProposer struct {
|
||||
}
|
||||
|
||||
// NewBundleProposer creates a new BundleProposer instance.
|
||||
func NewBundleProposer(ctx context.Context, cfg *config.BundleProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BundleProposer {
|
||||
func NewBundleProposer(ctx context.Context, cfg *config.BundleProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BundleProposer {
|
||||
log.Info("new bundle proposer", "bundleBatchesNum", cfg.MaxBatchNumPerBundle, "bundleTimeoutSec", cfg.BundleTimeoutSec)
|
||||
|
||||
p := &BundleProposer{
|
||||
@@ -53,6 +53,7 @@ func NewBundleProposer(ctx context.Context, cfg *config.BundleProposerConfig, ch
|
||||
bundleOrm: orm.NewBundle(db),
|
||||
maxBatchNumPerBundle: cfg.MaxBatchNumPerBundle,
|
||||
bundleTimeoutSec: cfg.BundleTimeoutSec,
|
||||
minCodecVersion: minCodecVersion,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
bundleProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
@@ -132,7 +133,7 @@ func (p *BundleProposer) proposeBundle() error {
|
||||
|
||||
// select at most maxBlocksThisChunk blocks
|
||||
maxBatchesThisBundle := p.maxBatchNumPerBundle
|
||||
batches, err := p.batchOrm.GetBatchesGEIndexGECodecVersion(p.ctx, firstUnbundledBatchIndex, encoding.CodecV3, int(maxBatchesThisBundle))
|
||||
batches, err := p.batchOrm.GetBatchesGEIndexGECodecVersion(p.ctx, firstUnbundledBatchIndex, p.minCodecVersion, int(maxBatchesThisBundle))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -153,14 +154,19 @@ func (p *BundleProposer) proposeBundle() error {
|
||||
return errors.New("first chunk not found in proposeBundle")
|
||||
}
|
||||
|
||||
hardforkName := forks.GetHardforkName(p.chainCfg, firstChunk.StartBlockNumber, firstChunk.StartBlockTime)
|
||||
hardforkName := encoding.GetHardforkName(p.chainCfg, firstChunk.StartBlockNumber, firstChunk.StartBlockTime)
|
||||
codecVersion := encoding.CodecVersion(batches[0].CodecVersion)
|
||||
|
||||
if codecVersion < p.minCodecVersion {
|
||||
return fmt.Errorf("unsupported codec version: %v, expected at least %v", codecVersion, p.minCodecVersion)
|
||||
}
|
||||
|
||||
for i := 1; i < len(batches); i++ {
|
||||
chunk, err := p.chunkOrm.GetChunkByIndex(p.ctx, batches[i].StartChunkIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentHardfork := forks.GetHardforkName(p.chainCfg, chunk.StartBlockNumber, chunk.StartBlockTime)
|
||||
currentHardfork := encoding.GetHardforkName(p.chainCfg, chunk.StartBlockNumber, chunk.StartBlockTime)
|
||||
if currentHardfork != hardforkName {
|
||||
batches = batches[:i]
|
||||
maxBatchesThisBundle = uint64(i) // update maxBlocksThisChunk to trigger chunking, because these blocks are the last blocks before the hardfork
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
func testBundleProposerLimits(t *testing.T) {
|
||||
func testBundleProposerLimitsCodecV4(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxBatchNumPerBundle uint64
|
||||
@@ -72,7 +72,7 @@ func testBundleProposerLimits(t *testing.T) {
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{})
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
@@ -81,14 +81,14 @@ func testBundleProposerLimits(t *testing.T) {
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}
|
||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
@@ -99,7 +99,7 @@ func testBundleProposerLimits(t *testing.T) {
|
||||
ChunkTimeoutSec: math.MaxUint32,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
bap := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: math.MaxUint64,
|
||||
@@ -107,7 +107,7 @@ func testBundleProposerLimits(t *testing.T) {
|
||||
BatchTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
bap.TryProposeBatch() // batch1 contains chunk1
|
||||
@@ -117,7 +117,7 @@ func testBundleProposerLimits(t *testing.T) {
|
||||
bup := NewBundleProposer(context.Background(), &config.BundleProposerConfig{
|
||||
MaxBatchNumPerBundle: tt.maxBatchNumPerBundle,
|
||||
BundleTimeoutSec: tt.bundleTimeoutSec,
|
||||
}, chainConfig, db, nil)
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
bup.TryProposeBundle()
|
||||
|
||||
@@ -134,93 +134,3 @@ func testBundleProposerLimits(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testBundleProposerRespectHardforks(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
chainConfig := ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(1),
|
||||
CurieBlock: big.NewInt(2),
|
||||
DarwinTime: func() *uint64 { t := uint64(4); return &t }(),
|
||||
}
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
for i := int64(1); i <= 60; i++ {
|
||||
block.Header.Number = big.NewInt(i)
|
||||
block.Header.Time = uint64(i)
|
||||
err = orm.NewL2Block(db).InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
bap := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
|
||||
BatchTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
bap.TryProposeBatch()
|
||||
}
|
||||
|
||||
bup := NewBundleProposer(context.Background(), &config.BundleProposerConfig{
|
||||
MaxBatchNumPerBundle: math.MaxUint64,
|
||||
BundleTimeoutSec: 0,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
bup.TryProposeBundle()
|
||||
}
|
||||
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, bundles, 1)
|
||||
|
||||
expectedStartBatchIndices := []uint64{3}
|
||||
expectedEndChunkIndices := []uint64{3}
|
||||
for i, bundle := range bundles {
|
||||
assert.Equal(t, expectedStartBatchIndices[i], bundle.StartBatchIndex)
|
||||
assert.Equal(t, expectedEndChunkIndices[i], bundle.EndBatchIndex)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,8 +12,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
@@ -36,7 +34,8 @@ type ChunkProposer struct {
|
||||
gasCostIncreaseMultiplier float64
|
||||
maxUncompressedBatchBytesSize uint64
|
||||
|
||||
chainCfg *params.ChainConfig
|
||||
minCodecVersion encoding.CodecVersion
|
||||
chainCfg *params.ChainConfig
|
||||
|
||||
chunkProposerCircleTotal prometheus.Counter
|
||||
proposeChunkFailureTotal prometheus.Counter
|
||||
@@ -62,7 +61,7 @@ type ChunkProposer struct {
|
||||
}
|
||||
|
||||
// NewChunkProposer creates a new ChunkProposer instance.
|
||||
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
|
||||
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
|
||||
log.Info("new chunk proposer",
|
||||
"maxBlockNumPerChunk", cfg.MaxBlockNumPerChunk,
|
||||
"maxTxNumPerChunk", cfg.MaxTxNumPerChunk,
|
||||
@@ -87,6 +86,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
|
||||
chunkTimeoutSec: cfg.ChunkTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
|
||||
minCodecVersion: minCodecVersion,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
chunkProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
@@ -182,13 +182,9 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en
|
||||
}
|
||||
|
||||
compatibilityBreachOccurred := false
|
||||
codecConfig := utils.CodecConfig{
|
||||
Version: codecVersion,
|
||||
EnableCompress: true,
|
||||
}
|
||||
|
||||
for {
|
||||
compatible, err := utils.CheckChunkCompressedDataCompatibility(chunk, codecVersion)
|
||||
compatible, err := encoding.CheckChunkCompressedDataCompatibility(chunk, codecVersion)
|
||||
if err != nil {
|
||||
log.Error("Failed to check chunk compressed data compatibility", "start block number", chunk.Blocks[0].Header.Number, "codecVersion", codecVersion, "err", err)
|
||||
return err
|
||||
@@ -202,7 +198,6 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en
|
||||
|
||||
if len(chunk.Blocks) == 1 {
|
||||
log.Warn("Disable compression: cannot truncate chunk with only 1 block for compatibility", "block number", chunk.Blocks[0].Header.Number)
|
||||
codecConfig.EnableCompress = false
|
||||
break
|
||||
}
|
||||
|
||||
@@ -216,7 +211,7 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en
|
||||
|
||||
// recalculate chunk metrics after truncation
|
||||
var calcErr error
|
||||
metrics, calcErr = utils.CalculateChunkMetrics(chunk, codecConfig)
|
||||
metrics, calcErr = utils.CalculateChunkMetrics(chunk, codecVersion)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate chunk metrics, start block number: %v, error: %w", chunk.Blocks[0].Header.Number, calcErr)
|
||||
}
|
||||
@@ -228,13 +223,13 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en
|
||||
if len(chunk.Blocks) > 0 {
|
||||
p.chunkProposeBlockHeight.Set(float64(chunk.Blocks[len(chunk.Blocks)-1].Header.Number.Uint64()))
|
||||
}
|
||||
p.chunkProposeThroughput.Add(float64(chunk.L2GasUsed()))
|
||||
p.chunkProposeThroughput.Add(float64(chunk.TotalGasUsed()))
|
||||
|
||||
p.proposeChunkUpdateInfoTotal.Inc()
|
||||
err := p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, codecConfig, *metrics, dbTX)
|
||||
dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, codecVersion, *metrics, dbTX)
|
||||
if err != nil {
|
||||
log.Warn("ChunkProposer.InsertChunk failed", "codec version", codecVersion, "enable compress", codecConfig.EnableCompress, "err", err)
|
||||
log.Warn("ChunkProposer.InsertChunk failed", "codec version", codecVersion, "err", err)
|
||||
return err
|
||||
}
|
||||
if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
|
||||
@@ -272,9 +267,9 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
|
||||
// Ensure all blocks in the same chunk use the same hardfork name
|
||||
// If a different hardfork name is found, truncate the blocks slice at that point
|
||||
hardforkName := forks.GetHardforkName(p.chainCfg, blocks[0].Header.Number.Uint64(), blocks[0].Header.Time)
|
||||
hardforkName := encoding.GetHardforkName(p.chainCfg, blocks[0].Header.Number.Uint64(), blocks[0].Header.Time)
|
||||
for i := 1; i < len(blocks); i++ {
|
||||
currentHardfork := forks.GetHardforkName(p.chainCfg, blocks[i].Header.Number.Uint64(), blocks[i].Header.Time)
|
||||
currentHardfork := encoding.GetHardforkName(p.chainCfg, blocks[i].Header.Number.Uint64(), blocks[i].Header.Time)
|
||||
if currentHardfork != hardforkName {
|
||||
blocks = blocks[:i]
|
||||
maxBlocksThisChunk = uint64(i) // update maxBlocksThisChunk to trigger chunking, because these blocks are the last blocks before the hardfork
|
||||
@@ -282,27 +277,29 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
}
|
||||
}
|
||||
|
||||
codecConfig := utils.CodecConfig{
|
||||
Version: forks.GetCodecVersion(p.chainCfg, blocks[0].Header.Number.Uint64(), blocks[0].Header.Time),
|
||||
EnableCompress: true, // codecv4 is the only version that supports conditional compression, default to enable compression
|
||||
codecVersion := encoding.GetCodecVersion(p.chainCfg, blocks[0].Header.Number.Uint64(), blocks[0].Header.Time)
|
||||
|
||||
if codecVersion < p.minCodecVersion {
|
||||
return fmt.Errorf("unsupported codec version: %v, expected at least %v", codecVersion, p.minCodecVersion)
|
||||
}
|
||||
|
||||
// Including Curie block in a sole chunk.
|
||||
if p.chainCfg.CurieBlock != nil && blocks[0].Header.Number.Cmp(p.chainCfg.CurieBlock) == 0 {
|
||||
chunk := encoding.Chunk{Blocks: blocks[:1]}
|
||||
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecConfig)
|
||||
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
|
||||
}
|
||||
p.recordTimerChunkMetrics(metrics)
|
||||
return p.updateDBChunkInfo(&chunk, codecConfig.Version, metrics)
|
||||
return p.updateDBChunkInfo(&chunk, codecVersion, metrics)
|
||||
}
|
||||
|
||||
var chunk encoding.Chunk
|
||||
chunk.Blocks = make([]*encoding.Block, 0, len(blocks))
|
||||
for i, block := range blocks {
|
||||
chunk.Blocks = append(chunk.Blocks, block)
|
||||
|
||||
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecConfig)
|
||||
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
|
||||
}
|
||||
@@ -339,17 +336,17 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
|
||||
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1]
|
||||
|
||||
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecConfig)
|
||||
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
|
||||
}
|
||||
|
||||
p.recordAllChunkMetrics(metrics)
|
||||
return p.updateDBChunkInfo(&chunk, codecConfig.Version, metrics)
|
||||
return p.updateDBChunkInfo(&chunk, codecVersion, metrics)
|
||||
}
|
||||
}
|
||||
|
||||
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecConfig)
|
||||
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
|
||||
}
|
||||
@@ -364,7 +361,7 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
|
||||
p.chunkFirstBlockTimeoutReached.Inc()
|
||||
p.recordAllChunkMetrics(metrics)
|
||||
return p.updateDBChunkInfo(&chunk, codecConfig.Version, metrics)
|
||||
return p.updateDBChunkInfo(&chunk, codecVersion, metrics)
|
||||
}
|
||||
|
||||
log.Debug("pending blocks do not reach one of the constraints or contain a timeout block")
|
||||
|
||||
@@ -16,515 +16,7 @@ import (
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
func testChunkProposerCodecv0Limits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxBlockNum uint64
|
||||
maxTxNum uint64
|
||||
maxL1CommitGas uint64
|
||||
maxL1CommitCalldataSize uint64
|
||||
maxRowConsumption uint64
|
||||
chunkTimeoutSec uint64
|
||||
expectedChunksLen int
|
||||
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 0,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 2,
|
||||
},
|
||||
{
|
||||
name: "MaxTxNumPerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 0,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 0,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 0,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxRowConsumptionPerChunkIs0",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 0,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxBlockNumPerChunkIs1",
|
||||
maxBlockNum: 1,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxTxNumPerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 2,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 7250,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 298,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxRowConsumptionPerChunkIs1",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: tt.maxBlockNum,
|
||||
MaxTxNumPerChunk: tt.maxTxNum,
|
||||
MaxL1CommitGasPerChunk: tt.maxL1CommitGas,
|
||||
MaxL1CommitCalldataSizePerChunk: tt.maxL1CommitCalldataSize,
|
||||
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
|
||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, tt.expectedChunksLen)
|
||||
|
||||
if len(chunks) > 0 {
|
||||
blockOrm := orm.NewL2Block(db)
|
||||
chunkHashes, err := blockOrm.GetChunkHashes(context.Background(), tt.expectedBlocksInFirstChunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunkHashes, tt.expectedBlocksInFirstChunk)
|
||||
firstChunkHash := chunks[0].Hash
|
||||
for _, chunkHash := range chunkHashes {
|
||||
assert.Equal(t, firstChunkHash, chunkHash)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testChunkProposerCodecv1Limits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxBlockNum uint64
|
||||
maxTxNum uint64
|
||||
maxL1CommitGas uint64
|
||||
maxL1CommitCalldataSize uint64
|
||||
maxRowConsumption uint64
|
||||
chunkTimeoutSec uint64
|
||||
expectedChunksLen int
|
||||
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 0,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 2,
|
||||
},
|
||||
{
|
||||
name: "MaxTxNumPerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 0,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 0,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 0,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxRowConsumptionPerChunkIs0",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 0,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxBlockNumPerChunkIs1",
|
||||
maxBlockNum: 1,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxTxNumPerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 2,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 2500,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 60,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxRowConsumptionPerChunkIs1",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: tt.maxBlockNum,
|
||||
MaxTxNumPerChunk: tt.maxTxNum,
|
||||
MaxL1CommitGasPerChunk: tt.maxL1CommitGas,
|
||||
MaxL1CommitCalldataSizePerChunk: tt.maxL1CommitCalldataSize,
|
||||
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
|
||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, tt.expectedChunksLen)
|
||||
|
||||
if len(chunks) > 0 {
|
||||
blockOrm := orm.NewL2Block(db)
|
||||
chunkHashes, err := blockOrm.GetChunkHashes(context.Background(), tt.expectedBlocksInFirstChunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunkHashes, tt.expectedBlocksInFirstChunk)
|
||||
firstChunkHash := chunks[0].Hash
|
||||
for _, chunkHash := range chunkHashes {
|
||||
assert.Equal(t, firstChunkHash, chunkHash)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testChunkProposerCodecv2Limits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxBlockNum uint64
|
||||
maxTxNum uint64
|
||||
maxL1CommitGas uint64
|
||||
maxL1CommitCalldataSize uint64
|
||||
maxRowConsumption uint64
|
||||
chunkTimeoutSec uint64
|
||||
expectedChunksLen int
|
||||
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 0,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 2,
|
||||
},
|
||||
{
|
||||
name: "MaxTxNumPerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 0,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 0,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 0,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxRowConsumptionPerChunkIs0",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 0,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxBlockNumPerChunkIs1",
|
||||
maxBlockNum: 1,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxTxNumPerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 2,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 2500,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 60,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxRowConsumptionPerChunkIs1",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: tt.maxBlockNum,
|
||||
MaxTxNumPerChunk: tt.maxTxNum,
|
||||
MaxL1CommitGasPerChunk: tt.maxL1CommitGas,
|
||||
MaxL1CommitCalldataSizePerChunk: tt.maxL1CommitCalldataSize,
|
||||
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
|
||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, tt.expectedChunksLen)
|
||||
|
||||
if len(chunks) > 0 {
|
||||
blockOrm := orm.NewL2Block(db)
|
||||
chunkHashes, err := blockOrm.GetChunkHashes(context.Background(), tt.expectedBlocksInFirstChunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunkHashes, tt.expectedBlocksInFirstChunk)
|
||||
firstChunkHash := chunks[0].Hash
|
||||
for _, chunkHash := range chunkHashes {
|
||||
assert.Equal(t, firstChunkHash, chunkHash)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testChunkProposerCodecv3Limits(t *testing.T) {
|
||||
func testChunkProposerLimitsCodecV4(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxBlockNum uint64
|
||||
@@ -672,7 +164,7 @@ func testChunkProposerCodecv3Limits(t *testing.T) {
|
||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}, db, nil)
|
||||
}, encoding.CodecV4, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
@@ -694,8 +186,8 @@ func testChunkProposerCodecv3Limits(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testChunkProposerBlobSizeLimit(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3}
|
||||
func testChunkProposerBlobSizeLimitCodecV4(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupDB(t)
|
||||
block := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
@@ -707,14 +199,10 @@ func testChunkProposerBlobSizeLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV0 { // will never hit blob size limit
|
||||
chainConfig = ¶ms.ChainConfig{}
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
} else {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
@@ -726,7 +214,7 @@ func testChunkProposerBlobSizeLimit(t *testing.T) {
|
||||
ChunkTimeoutSec: math.MaxUint32,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
cp.TryProposeChunk()
|
||||
@@ -738,14 +226,10 @@ func testChunkProposerBlobSizeLimit(t *testing.T) {
|
||||
|
||||
var expectedNumChunks int = 2
|
||||
var numBlocksMultiplier uint64
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
numBlocksMultiplier = 255
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
numBlocksMultiplier = 22
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
numBlocksMultiplier = 255
|
||||
} else {
|
||||
numBlocksMultiplier = 255
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
assert.Len(t, chunks, expectedNumChunks)
|
||||
|
||||
@@ -759,46 +243,3 @@ func testChunkProposerBlobSizeLimit(t *testing.T) {
|
||||
database.CloseDB(db)
|
||||
}
|
||||
}
|
||||
|
||||
func testChunkProposerRespectHardforks(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
block := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
for i := int64(1); i <= 20; i++ {
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
block.Header.Number = big.NewInt(i)
|
||||
block.Header.Time = uint64(i)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(1),
|
||||
CurieBlock: big.NewInt(2),
|
||||
DarwinTime: func() *uint64 { t := uint64(4); return &t }(),
|
||||
}, db, nil)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, chunks, 4)
|
||||
expectedEndBlockNumbers := []uint64{1, 2, 3, 20}
|
||||
for i, chunk := range chunks {
|
||||
assert.Equal(t, expectedEndBlockNumbers[i], chunk.EndBlockNumber)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,13 +7,13 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv0"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/event"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -35,10 +35,12 @@ type L2WatcherClient struct {
|
||||
withdrawTrieRootSlot common.Hash
|
||||
|
||||
metrics *l2WatcherMetrics
|
||||
|
||||
chainCfg *params.ChainConfig
|
||||
}
|
||||
|
||||
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB, reg prometheus.Registerer) *L2WatcherClient {
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *L2WatcherClient {
|
||||
return &L2WatcherClient{
|
||||
ctx: ctx,
|
||||
Client: client,
|
||||
@@ -51,6 +53,8 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
|
||||
withdrawTrieRootSlot: withdrawTrieRootSlot,
|
||||
|
||||
metrics: initL2WatcherMetrics(reg),
|
||||
|
||||
chainCfg: chainCfg,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -147,7 +151,11 @@ func (w *L2WatcherClient) getAndStoreBlocks(ctx context.Context, from, to uint64
|
||||
|
||||
if len(blocks) > 0 {
|
||||
for _, block := range blocks {
|
||||
blockL1CommitCalldataSize, err := codecv0.EstimateBlockL1CommitCalldataSize(block)
|
||||
codec := encoding.CodecFromConfig(w.chainCfg, block.Header.Number, block.Header.Time)
|
||||
if codec == nil {
|
||||
return fmt.Errorf("failed to retrieve codec for block number %v and time %v", block.Header.Number, block.Header.Time)
|
||||
}
|
||||
blockL1CommitCalldataSize, err := codec.EstimateBlockL1CommitCalldataSize(block)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to estimate block L1 commit calldata size: %v", err)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
func setupL2Watcher(t *testing.T) (*L2WatcherClient, *gorm.DB) {
|
||||
db := setupDB(t)
|
||||
l2cfg := cfg.L2Config
|
||||
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db, nil)
|
||||
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, nil, db, nil)
|
||||
return watcher, db
|
||||
}
|
||||
|
||||
@@ -44,5 +44,5 @@ func testFetchRunningMissingBlocks(t *testing.T) {
|
||||
|
||||
func prepareWatcherClient(l2Cli *ethclient.Client, db *gorm.DB) *L2WatcherClient {
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, common.Address{}, common.Hash{}, db, nil)
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, common.Address{}, common.Hash{}, nil, db, nil)
|
||||
}
|
||||
|
||||
@@ -100,29 +100,17 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("TestFetchRunningMissingBlocks", testFetchRunningMissingBlocks)
|
||||
|
||||
// Run chunk proposer test cases.
|
||||
t.Run("TestChunkProposerCodecv0Limits", testChunkProposerCodecv0Limits)
|
||||
t.Run("TestChunkProposerCodecv1Limits", testChunkProposerCodecv1Limits)
|
||||
t.Run("TestChunkProposerCodecv2Limits", testChunkProposerCodecv2Limits)
|
||||
t.Run("TestChunkProposerCodecv3Limits", testChunkProposerCodecv3Limits)
|
||||
t.Run("TestChunkProposerBlobSizeLimit", testChunkProposerBlobSizeLimit)
|
||||
t.Run("TestChunkProposerRespectHardforks", testChunkProposerRespectHardforks)
|
||||
t.Run("TestChunkProposerLimitsCodecV4", testChunkProposerLimitsCodecV4)
|
||||
t.Run("TestChunkProposerBlobSizeLimitCodecV4", testChunkProposerBlobSizeLimitCodecV4)
|
||||
|
||||
// Run batch proposer test cases.
|
||||
t.Run("TestBatchProposerCodecv0Limits", testBatchProposerCodecv0Limits)
|
||||
t.Run("TestBatchProposerCodecv1Limits", testBatchProposerCodecv1Limits)
|
||||
t.Run("TestBatchProposerCodecv2Limits", testBatchProposerCodecv2Limits)
|
||||
t.Run("TestBatchProposerCodecv3Limits", testBatchProposerCodecv3Limits)
|
||||
t.Run("TestBatchCommitGasAndCalldataSizeCodecv0Estimation", testBatchCommitGasAndCalldataSizeCodecv0Estimation)
|
||||
t.Run("TestBatchCommitGasAndCalldataSizeCodecv1Estimation", testBatchCommitGasAndCalldataSizeCodecv1Estimation)
|
||||
t.Run("TestBatchCommitGasAndCalldataSizeCodecv2Estimation", testBatchCommitGasAndCalldataSizeCodecv2Estimation)
|
||||
t.Run("TestBatchCommitGasAndCalldataSizeCodecv3Estimation", testBatchCommitGasAndCalldataSizeCodecv3Estimation)
|
||||
t.Run("TestBatchProposerBlobSizeLimit", testBatchProposerBlobSizeLimit)
|
||||
t.Run("TestBatchProposerMaxChunkNumPerBatchLimit", testBatchProposerMaxChunkNumPerBatchLimit)
|
||||
t.Run("TestBatchProposerRespectHardforks", testBatchProposerRespectHardforks)
|
||||
t.Run("TestBatchProposerLimitsCodecV4", testBatchProposerLimitsCodecV4)
|
||||
t.Run("TestBatchCommitGasAndCalldataSizeEstimationCodecV4", testBatchCommitGasAndCalldataSizeEstimationCodecV4)
|
||||
t.Run("TestBatchProposerBlobSizeLimitCodecV4", testBatchProposerBlobSizeLimitCodecV4)
|
||||
t.Run("TestBatchProposerMaxChunkNumPerBatchLimitCodecV4", testBatchProposerMaxChunkNumPerBatchLimitCodecV4)
|
||||
|
||||
// Run bundle proposer test cases.
|
||||
t.Run("TestBundleProposerLimits", testBundleProposerLimits)
|
||||
t.Run("TestBundleProposerRespectHardforks", testBundleProposerRespectHardforks)
|
||||
t.Run("TestBundleProposerLimitsCodecV4", testBundleProposerLimitsCodecV4)
|
||||
}
|
||||
|
||||
func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {
|
||||
|
||||
@@ -35,7 +35,7 @@ type Batch struct {
|
||||
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
|
||||
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
|
||||
CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"`
|
||||
EnableCompress bool `json:"enable_compress" gorm:"column:enable_compress"`
|
||||
EnableCompress bool `json:"enable_compress" gorm:"column:enable_compress"` // use for debug
|
||||
BlobBytes []byte `json:"blob_bytes" gorm:"column:blob_bytes"`
|
||||
|
||||
// proof
|
||||
@@ -250,7 +250,7 @@ func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, erro
|
||||
}
|
||||
|
||||
// InsertBatch inserts a new batch into the database.
|
||||
func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecConfig rutils.CodecConfig, metrics rutils.BatchMetrics, dbTX ...*gorm.DB) (*Batch, error) {
|
||||
func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVersion encoding.CodecVersion, metrics rutils.BatchMetrics, dbTX ...*gorm.DB) (*Batch, error) {
|
||||
if batch == nil {
|
||||
return nil, errors.New("invalid args: batch is nil")
|
||||
}
|
||||
@@ -265,16 +265,23 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecCon
|
||||
parentBatch, getErr := o.GetBatchByIndex(ctx, batch.Index-1)
|
||||
if getErr != nil {
|
||||
log.Error("failed to get batch by index", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", getErr)
|
||||
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", getErr)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", getErr)
|
||||
}
|
||||
startChunkIndex = parentBatch.EndChunkIndex + 1
|
||||
}
|
||||
|
||||
batchMeta, err := rutils.GetBatchMetadata(batch, codecConfig)
|
||||
batchMeta, err := rutils.GetBatchMetadata(batch, codecVersion)
|
||||
if err != nil {
|
||||
log.Error("failed to get batch metadata", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
enableCompress, err := encoding.GetBatchEnableCompression(codecVersion, batch)
|
||||
if err != nil {
|
||||
log.Error("failed to get batch enable compress", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
@@ -290,8 +297,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecCon
|
||||
WithdrawRoot: batch.WithdrawRoot().Hex(),
|
||||
ParentBatchHash: batch.ParentBatchHash.Hex(),
|
||||
BatchHeader: batchMeta.BatchBytes,
|
||||
CodecVersion: int16(codecConfig.Version),
|
||||
EnableCompress: codecConfig.EnableCompress,
|
||||
CodecVersion: int16(codecVersion),
|
||||
EnableCompress: enableCompress,
|
||||
BlobBytes: batchMeta.BlobBytes,
|
||||
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
@@ -340,11 +347,11 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
|
||||
switch status {
|
||||
case types.ProvingTaskAssigned:
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
updateFields["prover_assigned_at"] = utils.NowUTC()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
updateFields["proved_at"] = utils.NowUTC()
|
||||
}
|
||||
|
||||
db := o.db
|
||||
@@ -412,7 +419,7 @@ func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash st
|
||||
updateFields["finalize_tx_hash"] = finalizeTxHash
|
||||
updateFields["rollup_status"] = int(status)
|
||||
if status == types.RollupFinalized {
|
||||
updateFields["finalized_at"] = time.Now()
|
||||
updateFields["finalized_at"] = utils.NowUTC()
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
@@ -471,11 +478,11 @@ func (o *Batch) UpdateProvingStatusByBundleHash(ctx context.Context, bundleHash
|
||||
|
||||
switch status {
|
||||
case types.ProvingTaskAssigned:
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
updateFields["prover_assigned_at"] = utils.NowUTC()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
updateFields["proved_at"] = utils.NowUTC()
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -67,7 +67,7 @@ func (o *Bundle) getLatestBundle(ctx context.Context) (*Bundle, error) {
|
||||
|
||||
var latestBundle Bundle
|
||||
if err := db.First(&latestBundle).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("getLatestBundle error: %w", err)
|
||||
@@ -189,15 +189,19 @@ func (o *Bundle) InsertBundle(ctx context.Context, batches []*Batch, codecVersio
|
||||
}
|
||||
|
||||
// UpdateFinalizeTxHashAndRollupStatus updates the finalize transaction hash and rollup status for a bundle.
|
||||
func (o *Bundle) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash string, finalizeTxHash string, status types.RollupStatus) error {
|
||||
func (o *Bundle) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash string, finalizeTxHash string, status types.RollupStatus, dbTX ...*gorm.DB) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["finalize_tx_hash"] = finalizeTxHash
|
||||
updateFields["rollup_status"] = int(status)
|
||||
if status == types.RollupFinalized {
|
||||
updateFields["finalized_at"] = time.Now()
|
||||
updateFields["finalized_at"] = utils.NowUTC()
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
@@ -214,7 +218,7 @@ func (o *Bundle) UpdateProvingStatus(ctx context.Context, hash string, status ty
|
||||
|
||||
switch status {
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
updateFields["proved_at"] = utils.NowUTC()
|
||||
}
|
||||
|
||||
db := o.db
|
||||
@@ -237,7 +241,7 @@ func (o *Bundle) UpdateRollupStatus(ctx context.Context, hash string, status typ
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["rollup_status"] = int(status)
|
||||
if status == types.RollupFinalized {
|
||||
updateFields["finalized_at"] = time.Now()
|
||||
updateFields["finalized_at"] = utils.NowUTC()
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
|
||||
@@ -11,8 +11,8 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
rutils "scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
@@ -178,7 +178,7 @@ func (o *Chunk) GetChunksByBatchHash(ctx context.Context, batchHash string) ([]*
|
||||
}
|
||||
|
||||
// InsertChunk inserts a new chunk into the database.
|
||||
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecConfig rutils.CodecConfig, metrics utils.ChunkMetrics, dbTX ...*gorm.DB) (*Chunk, error) {
|
||||
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, metrics rutils.ChunkMetrics, dbTX ...*gorm.DB) (*Chunk, error) {
|
||||
if chunk == nil || len(chunk.Blocks) == 0 {
|
||||
return nil, errors.New("invalid args")
|
||||
}
|
||||
@@ -203,12 +203,18 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecCon
|
||||
parentChunkStateRoot = parentChunk.StateRoot
|
||||
}
|
||||
|
||||
chunkHash, err := utils.GetChunkHash(chunk, totalL1MessagePoppedBefore, codecConfig.Version)
|
||||
chunkHash, err := rutils.GetChunkHash(chunk, totalL1MessagePoppedBefore, codecVersion)
|
||||
if err != nil {
|
||||
log.Error("failed to get chunk hash", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
enableCompress, err := encoding.GetChunkEnableCompression(codecVersion, chunk)
|
||||
if err != nil {
|
||||
log.Error("failed to get chunk enable compression", "version", codecVersion, "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
numBlocks := len(chunk.Blocks)
|
||||
newChunk := Chunk{
|
||||
Index: chunkIndex,
|
||||
@@ -217,7 +223,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecCon
|
||||
StartBlockHash: chunk.Blocks[0].Header.Hash().Hex(),
|
||||
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
|
||||
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
|
||||
TotalL2TxGas: chunk.L2GasUsed(),
|
||||
TotalL2TxGas: chunk.TotalGasUsed(),
|
||||
TotalL2TxNum: chunk.NumL2Transactions(),
|
||||
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
|
||||
TotalL1CommitGas: metrics.L1CommitGas,
|
||||
@@ -228,8 +234,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecCon
|
||||
StateRoot: chunk.Blocks[numBlocks-1].Header.Root.Hex(),
|
||||
ParentChunkStateRoot: parentChunkStateRoot,
|
||||
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
|
||||
CodecVersion: int16(codecConfig.Version),
|
||||
EnableCompress: codecConfig.EnableCompress,
|
||||
CodecVersion: int16(codecVersion),
|
||||
EnableCompress: enableCompress,
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
CrcMax: metrics.CrcMax,
|
||||
BlobSize: metrics.L1CommitBlobSize,
|
||||
@@ -256,11 +262,11 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
|
||||
switch status {
|
||||
case types.ProvingTaskAssigned:
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
updateFields["prover_assigned_at"] = utils.NowUTC()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
updateFields["proved_at"] = utils.NowUTC()
|
||||
}
|
||||
|
||||
db := o.db
|
||||
@@ -284,11 +290,11 @@ func (o *Chunk) UpdateProvingStatusByBatchHash(ctx context.Context, batchHash st
|
||||
|
||||
switch status {
|
||||
case types.ProvingTaskAssigned:
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
updateFields["prover_assigned_at"] = utils.NowUTC()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
updateFields["proved_at"] = utils.NowUTC()
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -8,10 +8,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv0"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv1"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv2"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv3"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -170,7 +166,7 @@ func TestL2BlockOrm(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChunkOrm(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3}
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3, encoding.CodecV4}
|
||||
chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}}
|
||||
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}}
|
||||
for _, codecVersion := range codecVersions {
|
||||
@@ -179,53 +175,23 @@ func TestChunkOrm(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
var chunkHash1 common.Hash
|
||||
var chunkHash2 common.Hash
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
daChunk1, createErr := codecv0.NewDAChunk(chunk1, 0)
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash1, err = daChunk1.Hash()
|
||||
assert.NoError(t, err)
|
||||
codec, err := encoding.CodecFromVersion(codecVersion)
|
||||
assert.NoError(t, err)
|
||||
daChunk1, createErr := codec.NewDAChunk(chunk1, 0)
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash1, err = daChunk1.Hash()
|
||||
assert.NoError(t, err)
|
||||
|
||||
daChunk2, createErr := codecv0.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash2, err = daChunk2.Hash()
|
||||
assert.NoError(t, err)
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
daChunk1, createErr := codecv1.NewDAChunk(chunk1, 0)
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash1, err = daChunk1.Hash()
|
||||
assert.NoError(t, err)
|
||||
daChunk2, createErr := codec.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash2, err = daChunk2.Hash()
|
||||
assert.NoError(t, err)
|
||||
|
||||
daChunk2, createErr := codecv1.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash2, err = daChunk2.Hash()
|
||||
assert.NoError(t, err)
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
daChunk1, createErr := codecv2.NewDAChunk(chunk1, 0)
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash1, err = daChunk1.Hash()
|
||||
assert.NoError(t, err)
|
||||
|
||||
daChunk2, createErr := codecv2.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash2, err = daChunk2.Hash()
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
daChunk1, createErr := codecv3.NewDAChunk(chunk1, 0)
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash1, err = daChunk1.Hash()
|
||||
assert.NoError(t, err)
|
||||
|
||||
daChunk2, createErr := codecv3.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash2, err = daChunk2.Hash()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1, utils.CodecConfig{Version: codecVersion}, utils.ChunkMetrics{})
|
||||
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
|
||||
|
||||
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2, utils.CodecConfig{Version: codecVersion}, utils.ChunkMetrics{})
|
||||
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
|
||||
|
||||
@@ -263,7 +229,7 @@ func TestChunkOrm(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBatchOrm(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3}
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3, encoding.CodecV4}
|
||||
chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}}
|
||||
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}}
|
||||
for _, codecVersion := range codecVersions {
|
||||
@@ -275,54 +241,34 @@ func TestBatchOrm(t *testing.T) {
|
||||
Index: 0,
|
||||
Chunks: []*encoding.Chunk{chunk1},
|
||||
}
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: codecVersion}, utils.BatchMetrics{})
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
hash1 := batch1.Hash
|
||||
|
||||
batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var batchHash1 string
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
daBatch1, createErr := codecv0.NewDABatchFromBytes(batch1.BatchHeader)
|
||||
assert.NoError(t, createErr)
|
||||
batchHash1 = daBatch1.Hash().Hex()
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
daBatch1, createErr := codecv1.NewDABatchFromBytes(batch1.BatchHeader)
|
||||
assert.NoError(t, createErr)
|
||||
batchHash1 = daBatch1.Hash().Hex()
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
daBatch1, createErr := codecv2.NewDABatchFromBytes(batch1.BatchHeader)
|
||||
assert.NoError(t, createErr)
|
||||
batchHash1 = daBatch1.Hash().Hex()
|
||||
} else {
|
||||
daBatch1, createErr := codecv3.NewDABatchFromBytes(batch1.BatchHeader)
|
||||
assert.NoError(t, createErr)
|
||||
batchHash1 = daBatch1.Hash().Hex()
|
||||
}
|
||||
codec, err := encoding.CodecFromVersion(codecVersion)
|
||||
assert.NoError(t, err)
|
||||
daBatch1, createErr := codec.NewDABatchFromBytes(batch1.BatchHeader)
|
||||
assert.NoError(t, createErr)
|
||||
batchHash1 := daBatch1.Hash().Hex()
|
||||
assert.Equal(t, hash1, batchHash1)
|
||||
|
||||
batch = &encoding.Batch{
|
||||
Index: 1,
|
||||
Chunks: []*encoding.Chunk{chunk2},
|
||||
}
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: codecVersion}, utils.BatchMetrics{})
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
hash2 := batch2.Hash
|
||||
|
||||
batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var batchHash2 string
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
daBatch2, createErr := codecv0.NewDABatchFromBytes(batch2.BatchHeader)
|
||||
assert.NoError(t, createErr)
|
||||
batchHash2 = daBatch2.Hash().Hex()
|
||||
} else {
|
||||
daBatch2, createErr := codecv1.NewDABatchFromBytes(batch2.BatchHeader)
|
||||
assert.NoError(t, createErr)
|
||||
batchHash2 = daBatch2.Hash().Hex()
|
||||
}
|
||||
daBatch2, createErr := codec.NewDABatchFromBytes(batch2.BatchHeader)
|
||||
assert.NoError(t, createErr)
|
||||
batchHash2 := daBatch2.Hash().Hex()
|
||||
assert.Equal(t, hash2, batchHash2)
|
||||
|
||||
count, err := batchOrm.GetBatchCount(context.Background())
|
||||
@@ -432,7 +378,7 @@ func TestBundleOrm(t *testing.T) {
|
||||
Index: 0,
|
||||
Chunks: []*encoding.Chunk{chunk1},
|
||||
}
|
||||
dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, utils.CodecConfig{Version: encoding.CodecV3}, utils.BatchMetrics{})
|
||||
dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, encoding.CodecV4, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}}
|
||||
@@ -440,30 +386,30 @@ func TestBundleOrm(t *testing.T) {
|
||||
Index: 1,
|
||||
Chunks: []*encoding.Chunk{chunk2},
|
||||
}
|
||||
dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, utils.CodecConfig{Version: encoding.CodecV3}, utils.BatchMetrics{})
|
||||
dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, encoding.CodecV4, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var bundle1 *Bundle
|
||||
var bundle2 *Bundle
|
||||
|
||||
t.Run("InsertBundle", func(t *testing.T) {
|
||||
bundle1, err = bundleOrm.InsertBundle(context.Background(), []*Batch{dbBatch1}, encoding.CodecV3)
|
||||
bundle1, err = bundleOrm.InsertBundle(context.Background(), []*Batch{dbBatch1}, encoding.CodecV4)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, bundle1)
|
||||
assert.Equal(t, uint64(0), bundle1.StartBatchIndex)
|
||||
assert.Equal(t, uint64(0), bundle1.EndBatchIndex)
|
||||
assert.Equal(t, dbBatch1.Hash, bundle1.StartBatchHash)
|
||||
assert.Equal(t, dbBatch1.Hash, bundle1.EndBatchHash)
|
||||
assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(bundle1.CodecVersion))
|
||||
assert.Equal(t, encoding.CodecV4, encoding.CodecVersion(bundle1.CodecVersion))
|
||||
|
||||
bundle2, err = bundleOrm.InsertBundle(context.Background(), []*Batch{dbBatch2}, encoding.CodecV3)
|
||||
bundle2, err = bundleOrm.InsertBundle(context.Background(), []*Batch{dbBatch2}, encoding.CodecV4)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, bundle2)
|
||||
assert.Equal(t, uint64(1), bundle2.StartBatchIndex)
|
||||
assert.Equal(t, uint64(1), bundle2.EndBatchIndex)
|
||||
assert.Equal(t, dbBatch2.Hash, bundle2.StartBatchHash)
|
||||
assert.Equal(t, dbBatch2.Hash, bundle2.EndBatchHash)
|
||||
assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(bundle2.CodecVersion))
|
||||
assert.Equal(t, encoding.CodecV4, encoding.CodecVersion(bundle2.CodecVersion))
|
||||
})
|
||||
|
||||
t.Run("GetFirstUnbundledBatchIndex", func(t *testing.T) {
|
||||
@@ -614,7 +560,7 @@ func TestPendingTransactionOrm(t *testing.T) {
|
||||
err = pendingTransactionOrm.InsertPendingTransaction(context.Background(), "test", senderMeta, tx1, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = pendingTransactionOrm.UpdatePendingTransactionStatusByTxHash(context.Background(), tx0.Hash(), types.TxStatusReplaced)
|
||||
err = pendingTransactionOrm.UpdateTransactionStatusByTxHash(context.Background(), tx0.Hash(), types.TxStatusReplaced)
|
||||
assert.NoError(t, err)
|
||||
|
||||
txs, err := pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), senderMeta.Type, 2)
|
||||
@@ -631,7 +577,7 @@ func TestPendingTransactionOrm(t *testing.T) {
|
||||
assert.Equal(t, senderMeta.Address.String(), txs[1].SenderAddress)
|
||||
assert.Equal(t, senderMeta.Type, txs[1].SenderType)
|
||||
|
||||
err = pendingTransactionOrm.UpdatePendingTransactionStatusByTxHash(context.Background(), tx1.Hash(), types.TxStatusConfirmed)
|
||||
err = pendingTransactionOrm.UpdateTransactionStatusByTxHash(context.Background(), tx1.Hash(), types.TxStatusConfirmed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
txs, err = pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), senderMeta.Type, 2)
|
||||
@@ -648,4 +594,17 @@ func TestPendingTransactionOrm(t *testing.T) {
|
||||
status, err := pendingTransactionOrm.GetTxStatusByTxHash(context.Background(), tx0.Hash())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.TxStatusConfirmedFailed, status)
|
||||
|
||||
// Test DeleteTransactionByTxHash
|
||||
err = pendingTransactionOrm.DeleteTransactionByTxHash(context.Background(), tx0.Hash())
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify the transaction is deleted
|
||||
status, err = pendingTransactionOrm.GetTxStatusByTxHash(context.Background(), tx0.Hash())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.TxStatusUnknown, status) // Should return unknown status for deleted transaction
|
||||
|
||||
// Try to delete non-existent transaction
|
||||
err = pendingTransactionOrm.DeleteTransactionByTxHash(context.Background(), common.HexToHash("0x123"))
|
||||
assert.Error(t, err) // Should return error for non-existent transaction
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
@@ -150,8 +151,33 @@ func (o *PendingTransaction) InsertPendingTransaction(ctx context.Context, conte
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdatePendingTransactionStatusByTxHash updates the status of a transaction based on the transaction hash.
|
||||
func (o *PendingTransaction) UpdatePendingTransactionStatusByTxHash(ctx context.Context, hash common.Hash, status types.TxStatus, dbTX ...*gorm.DB) error {
|
||||
// DeleteTransactionByTxHash permanently deletes a transaction record from the database by transaction hash.
|
||||
// Using permanent delete (Unscoped) instead of soft delete to prevent database bloat, as repeated SendTransaction failures
|
||||
// could write a large number of transactions to the database.
|
||||
func (o *PendingTransaction) DeleteTransactionByTxHash(ctx context.Context, hash common.Hash, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&PendingTransaction{})
|
||||
|
||||
// Perform permanent delete by using Unscoped()
|
||||
result := db.Where("hash = ?", hash.String()).Unscoped().Delete(&PendingTransaction{})
|
||||
if result.Error != nil {
|
||||
return fmt.Errorf("failed to delete transaction, err: %w", result.Error)
|
||||
}
|
||||
if result.RowsAffected == 0 {
|
||||
return fmt.Errorf("no transaction found with hash: %s", hash.String())
|
||||
}
|
||||
if result.RowsAffected > 0 {
|
||||
log.Warn("Successfully deleted transaction", "hash", hash.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateTransactionStatusByTxHash updates the status of a transaction based on the transaction hash.
|
||||
func (o *PendingTransaction) UpdateTransactionStatusByTxHash(ctx context.Context, hash common.Hash, status types.TxStatus, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
@@ -160,7 +186,7 @@ func (o *PendingTransaction) UpdatePendingTransactionStatusByTxHash(ctx context.
|
||||
db = db.Model(&PendingTransaction{})
|
||||
db = db.Where("hash = ?", hash.String())
|
||||
if err := db.Update("status", status).Error; err != nil {
|
||||
return fmt.Errorf("failed to UpdatePendingTransactionStatusByTxHash, txHash: %s, error: %w", hash, err)
|
||||
return fmt.Errorf("failed to UpdateTransactionStatusByTxHash, txHash: %s, error: %w", hash, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
73
rollup/internal/utils/exchange_rate.go
Normal file
73
rollup/internal/utils/exchange_rate.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
var BinanceApiEndpoint string = "https://api.binance.com/api/v3/ticker/price?symbol=%s"
|
||||
|
||||
type BinanceResponse struct {
|
||||
Price string `json:"price"`
|
||||
}
|
||||
|
||||
func GetExchangeRateFromBinanceApi(tokenSymbolPair string, maxRetries int) (float64, error) {
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
if i > 0 {
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
|
||||
// make HTTP GET request
|
||||
resp, err := http.Get(fmt.Sprintf(BinanceApiEndpoint, tokenSymbolPair))
|
||||
if err != nil {
|
||||
log.Error("error making HTTP request", "err", err)
|
||||
continue
|
||||
}
|
||||
defer func() {
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
log.Error("error closing response body", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// check for successful response
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
log.Error("unexpected status code", "code", resp.StatusCode)
|
||||
continue
|
||||
}
|
||||
|
||||
// read response body
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Error("error reading response body", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// unmarshal JSON response
|
||||
var data BinanceResponse
|
||||
err = json.Unmarshal(body, &data)
|
||||
if err != nil {
|
||||
log.Error("error unmarshaling JSON", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// convert price string to float64
|
||||
price, err := strconv.ParseFloat(data.Price, 64)
|
||||
if err != nil {
|
||||
log.Error("error parsing price string", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// successful response, return price
|
||||
return price, nil
|
||||
}
|
||||
|
||||
// all retries failed, return error
|
||||
return 0, fmt.Errorf("failed to get exchange rate after %d retries", maxRetries)
|
||||
}
|
||||
@@ -5,23 +5,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv0"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv1"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv2"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv3"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv4"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
)
|
||||
|
||||
// CodecConfig holds the configuration for codec-related operations
|
||||
type CodecConfig struct {
|
||||
Version encoding.CodecVersion
|
||||
EnableCompress bool
|
||||
}
|
||||
|
||||
// ChunkMetrics indicates the metrics for proposing a chunk.
|
||||
type ChunkMetrics struct {
|
||||
// common metrics
|
||||
NumBlocks uint64
|
||||
TxNum uint64
|
||||
CrcMax uint64
|
||||
@@ -30,10 +18,7 @@ type ChunkMetrics struct {
|
||||
L1CommitCalldataSize uint64
|
||||
L1CommitGas uint64
|
||||
|
||||
// codecv1 metrics, default 0 for codecv0
|
||||
L1CommitBlobSize uint64
|
||||
|
||||
// codecv2 metrics, default 0 for codecv0 & codecv1
|
||||
L1CommitBlobSize uint64
|
||||
L1CommitUncompressedBatchBytesSize uint64
|
||||
|
||||
// timing metrics
|
||||
@@ -43,147 +28,60 @@ type ChunkMetrics struct {
|
||||
}
|
||||
|
||||
// CalculateChunkMetrics calculates chunk metrics.
|
||||
func CalculateChunkMetrics(chunk *encoding.Chunk, codecConfig CodecConfig) (*ChunkMetrics, error) {
|
||||
var err error
|
||||
func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVersion) (*ChunkMetrics, error) {
|
||||
metrics := &ChunkMetrics{
|
||||
TxNum: chunk.NumTransactions(),
|
||||
NumBlocks: uint64(len(chunk.Blocks)),
|
||||
FirstBlockTimestamp: chunk.Blocks[0].Header.Time,
|
||||
}
|
||||
|
||||
var err error
|
||||
metrics.CrcMax, err = chunk.CrcMax()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get crc max: %w", err)
|
||||
return nil, fmt.Errorf("failed to get crc max, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
switch codecConfig.Version {
|
||||
case encoding.CodecV0:
|
||||
start := time.Now()
|
||||
metrics.L1CommitGas, err = codecv0.EstimateChunkL1CommitGas(chunk)
|
||||
metrics.EstimateGasTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv0 chunk L1 commit gas: %w", err)
|
||||
}
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitCalldataSize, err = codecv0.EstimateChunkL1CommitCalldataSize(chunk)
|
||||
metrics.EstimateCalldataSizeTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv0 chunk L1 commit calldata size: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
case encoding.CodecV1:
|
||||
start := time.Now()
|
||||
metrics.L1CommitGas = codecv1.EstimateChunkL1CommitGas(chunk)
|
||||
metrics.EstimateGasTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitCalldataSize = codecv1.EstimateChunkL1CommitCalldataSize(chunk)
|
||||
metrics.EstimateCalldataSizeTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitBlobSize, err = codecv1.EstimateChunkL1CommitBlobSize(chunk)
|
||||
metrics.EstimateBlobSizeTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv1 chunk L1 commit blob size: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
case encoding.CodecV2:
|
||||
start := time.Now()
|
||||
metrics.L1CommitGas = codecv2.EstimateChunkL1CommitGas(chunk)
|
||||
metrics.EstimateGasTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitCalldataSize = codecv2.EstimateChunkL1CommitCalldataSize(chunk)
|
||||
metrics.EstimateCalldataSizeTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv2.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk)
|
||||
metrics.EstimateBlobSizeTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv2 chunk L1 commit batch size and blob size: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
case encoding.CodecV3:
|
||||
start := time.Now()
|
||||
metrics.L1CommitGas = codecv3.EstimateChunkL1CommitGas(chunk)
|
||||
metrics.EstimateGasTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitCalldataSize = codecv3.EstimateChunkL1CommitCalldataSize(chunk)
|
||||
metrics.EstimateCalldataSizeTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv3.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk)
|
||||
metrics.EstimateBlobSizeTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv3 chunk L1 commit batch size and blob size: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
case encoding.CodecV4:
|
||||
start := time.Now()
|
||||
metrics.L1CommitGas = codecv4.EstimateChunkL1CommitGas(chunk)
|
||||
metrics.EstimateGasTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitCalldataSize = codecv4.EstimateChunkL1CommitCalldataSize(chunk)
|
||||
metrics.EstimateCalldataSizeTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv4.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk, codecConfig.EnableCompress)
|
||||
metrics.EstimateBlobSizeTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv4 chunk L1 commit batch size and blob size: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported codec version: %v", codecConfig.Version)
|
||||
codec, err := encoding.CodecFromVersion(codecVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
}
|
||||
|
||||
// CheckChunkCompressedDataCompatibility checks compressed data compatibility of a batch built by a single chunk.
|
||||
func CheckChunkCompressedDataCompatibility(chunk *encoding.Chunk, codecVersion encoding.CodecVersion) (bool, error) {
|
||||
switch codecVersion {
|
||||
case encoding.CodecV0, encoding.CodecV1:
|
||||
return true, nil
|
||||
case encoding.CodecV2:
|
||||
return codecv2.CheckChunkCompressedDataCompatibility(chunk)
|
||||
case encoding.CodecV3:
|
||||
return codecv3.CheckChunkCompressedDataCompatibility(chunk)
|
||||
case encoding.CodecV4:
|
||||
return codecv4.CheckChunkCompressedDataCompatibility(chunk)
|
||||
default:
|
||||
return false, fmt.Errorf("unsupported codec version: %v", codecVersion)
|
||||
metrics.EstimateGasTime, err = measureTime(func() error {
|
||||
metrics.L1CommitGas, err = codec.EstimateChunkL1CommitGas(chunk)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate chunk L1 commit gas, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
}
|
||||
|
||||
// CheckBatchCompressedDataCompatibility checks compressed data compatibility of a batch built by a single chunk.
|
||||
func CheckBatchCompressedDataCompatibility(batch *encoding.Batch, codecVersion encoding.CodecVersion) (bool, error) {
|
||||
switch codecVersion {
|
||||
case encoding.CodecV0, encoding.CodecV1:
|
||||
return true, nil
|
||||
case encoding.CodecV2:
|
||||
return codecv2.CheckBatchCompressedDataCompatibility(batch)
|
||||
case encoding.CodecV3:
|
||||
return codecv3.CheckBatchCompressedDataCompatibility(batch)
|
||||
case encoding.CodecV4:
|
||||
return codecv4.CheckBatchCompressedDataCompatibility(batch)
|
||||
default:
|
||||
return false, fmt.Errorf("unsupported codec version: %v", codecVersion)
|
||||
metrics.EstimateCalldataSizeTime, err = measureTime(func() error {
|
||||
metrics.L1CommitCalldataSize, err = codec.EstimateChunkL1CommitCalldataSize(chunk)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate chunk L1 commit calldata size, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
metrics.EstimateBlobSizeTime, err = measureTime(func() error {
|
||||
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codec.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate chunk L1 commit batch size and blob size, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
// BatchMetrics indicates the metrics for proposing a batch.
|
||||
type BatchMetrics struct {
|
||||
// common metrics
|
||||
NumChunks uint64
|
||||
FirstBlockTimestamp uint64
|
||||
|
||||
L1CommitCalldataSize uint64
|
||||
L1CommitGas uint64
|
||||
|
||||
// codecv1 metrics, default 0 for codecv0
|
||||
L1CommitBlobSize uint64
|
||||
|
||||
// codecv2 metrics, default 0 for codecv0 & codecv1
|
||||
L1CommitBlobSize uint64
|
||||
L1CommitUncompressedBatchBytesSize uint64
|
||||
|
||||
// timing metrics
|
||||
@@ -193,152 +91,62 @@ type BatchMetrics struct {
|
||||
}
|
||||
|
||||
// CalculateBatchMetrics calculates batch metrics.
|
||||
func CalculateBatchMetrics(batch *encoding.Batch, codecConfig CodecConfig) (*BatchMetrics, error) {
|
||||
var err error
|
||||
func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetrics, error) {
|
||||
metrics := &BatchMetrics{
|
||||
NumChunks: uint64(len(batch.Chunks)),
|
||||
FirstBlockTimestamp: batch.Chunks[0].Blocks[0].Header.Time,
|
||||
}
|
||||
switch codecConfig.Version {
|
||||
case encoding.CodecV0:
|
||||
start := time.Now()
|
||||
metrics.L1CommitGas, err = codecv0.EstimateBatchL1CommitGas(batch)
|
||||
metrics.EstimateGasTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv0 batch L1 commit gas: %w", err)
|
||||
}
|
||||
start = time.Now()
|
||||
metrics.L1CommitCalldataSize, err = codecv0.EstimateBatchL1CommitCalldataSize(batch)
|
||||
metrics.EstimateCalldataSizeTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv0 batch L1 commit calldata size: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
case encoding.CodecV1:
|
||||
start := time.Now()
|
||||
metrics.L1CommitGas = codecv1.EstimateBatchL1CommitGas(batch)
|
||||
metrics.EstimateGasTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitCalldataSize = codecv1.EstimateBatchL1CommitCalldataSize(batch)
|
||||
metrics.EstimateCalldataSizeTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitBlobSize, err = codecv1.EstimateBatchL1CommitBlobSize(batch)
|
||||
metrics.EstimateBlobSizeTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv1 batch L1 commit blob size: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
case encoding.CodecV2:
|
||||
start := time.Now()
|
||||
metrics.L1CommitGas = codecv2.EstimateBatchL1CommitGas(batch)
|
||||
metrics.EstimateGasTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitCalldataSize = codecv2.EstimateBatchL1CommitCalldataSize(batch)
|
||||
metrics.EstimateCalldataSizeTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv2.EstimateBatchL1CommitBatchSizeAndBlobSize(batch)
|
||||
metrics.EstimateBlobSizeTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv2 batch L1 commit batch size and blob size: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
case encoding.CodecV3:
|
||||
start := time.Now()
|
||||
metrics.L1CommitGas = codecv3.EstimateBatchL1CommitGas(batch)
|
||||
metrics.EstimateGasTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitCalldataSize = codecv3.EstimateBatchL1CommitCalldataSize(batch)
|
||||
metrics.EstimateCalldataSizeTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv3.EstimateBatchL1CommitBatchSizeAndBlobSize(batch)
|
||||
metrics.EstimateBlobSizeTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv3 batch L1 commit batch size and blob size: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
case encoding.CodecV4:
|
||||
start := time.Now()
|
||||
metrics.L1CommitGas = codecv4.EstimateBatchL1CommitGas(batch)
|
||||
metrics.EstimateGasTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitCalldataSize = codecv4.EstimateBatchL1CommitCalldataSize(batch)
|
||||
metrics.EstimateCalldataSizeTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv4.EstimateBatchL1CommitBatchSizeAndBlobSize(batch, codecConfig.EnableCompress)
|
||||
metrics.EstimateBlobSizeTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv4 batch L1 commit batch size and blob size: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported codec version: %v", codecConfig.Version)
|
||||
codec, err := encoding.CodecFromVersion(codecVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
metrics.EstimateGasTime, err = measureTime(func() error {
|
||||
metrics.L1CommitGas, err = codec.EstimateBatchL1CommitGas(batch)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate batch L1 commit gas, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
metrics.EstimateCalldataSizeTime, err = measureTime(func() error {
|
||||
metrics.L1CommitCalldataSize, err = codec.EstimateBatchL1CommitCalldataSize(batch)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate batch L1 commit calldata size, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
metrics.EstimateBlobSizeTime, err = measureTime(func() error {
|
||||
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codec.EstimateBatchL1CommitBatchSizeAndBlobSize(batch)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate batch L1 commit batch size and blob size, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
// GetChunkHash retrieves the hash of a chunk.
|
||||
func GetChunkHash(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64, codecVersion encoding.CodecVersion) (common.Hash, error) {
|
||||
switch codecVersion {
|
||||
case encoding.CodecV0:
|
||||
daChunk, err := codecv0.NewDAChunk(chunk, totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to create codecv0 DA chunk: %w", err)
|
||||
}
|
||||
chunkHash, err := daChunk.Hash()
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to get codecv0 DA chunk hash: %w", err)
|
||||
}
|
||||
return chunkHash, nil
|
||||
case encoding.CodecV1:
|
||||
daChunk, err := codecv1.NewDAChunk(chunk, totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to create codecv1 DA chunk: %w", err)
|
||||
}
|
||||
chunkHash, err := daChunk.Hash()
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to get codecv1 DA chunk hash: %w", err)
|
||||
}
|
||||
return chunkHash, nil
|
||||
case encoding.CodecV2:
|
||||
daChunk, err := codecv2.NewDAChunk(chunk, totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to create codecv2 DA chunk: %w", err)
|
||||
}
|
||||
chunkHash, err := daChunk.Hash()
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to get codecv2 DA chunk hash: %w", err)
|
||||
}
|
||||
return chunkHash, nil
|
||||
case encoding.CodecV3:
|
||||
daChunk, err := codecv3.NewDAChunk(chunk, totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to create codecv3 DA chunk: %w", err)
|
||||
}
|
||||
chunkHash, err := daChunk.Hash()
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to get codecv3 DA chunk hash: %w", err)
|
||||
}
|
||||
return chunkHash, nil
|
||||
case encoding.CodecV4:
|
||||
daChunk, err := codecv4.NewDAChunk(chunk, totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to create codecv4 DA chunk: %w", err)
|
||||
}
|
||||
chunkHash, err := daChunk.Hash()
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to get codecv4 DA chunk hash: %w", err)
|
||||
}
|
||||
return chunkHash, nil
|
||||
default:
|
||||
return common.Hash{}, fmt.Errorf("unsupported codec version: %v", codecVersion)
|
||||
codec, err := encoding.CodecFromVersion(codecVersion)
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
daChunk, err := codec.NewDAChunk(chunk, totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to create DA chunk, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
chunkHash, err := daChunk.Hash()
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to get DA chunk hash, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
return chunkHash, nil
|
||||
}
|
||||
|
||||
// BatchMetadata represents the metadata of a batch.
|
||||
@@ -353,205 +161,63 @@ type BatchMetadata struct {
|
||||
}
|
||||
|
||||
// GetBatchMetadata retrieves the metadata of a batch.
|
||||
// TODO: refactor this function to reduce cyclomatic complexity
|
||||
//
|
||||
//gocyclo:ignore
|
||||
func GetBatchMetadata(batch *encoding.Batch, codecConfig CodecConfig) (*BatchMetadata, error) {
|
||||
func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetadata, error) {
|
||||
codec, err := encoding.CodecFromVersion(codecVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
daBatch, err := codec.NewDABatch(batch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create DA batch, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
batchMeta := &BatchMetadata{
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchDataHash: daBatch.DataHash(),
|
||||
BatchBytes: daBatch.Encode(),
|
||||
BlobBytes: daBatch.BlobBytes(),
|
||||
}
|
||||
|
||||
batchMeta.BatchBlobDataProof, err = daBatch.BlobDataProofForPointEvaluation()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get blob data proof, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
numChunks := len(batch.Chunks)
|
||||
if numChunks == 0 {
|
||||
return nil, fmt.Errorf("batch contains no chunks, version: %v, index: %v", codecVersion, batch.Index)
|
||||
}
|
||||
|
||||
startDAChunk, err := codec.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create start DA chunk, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
batchMeta.StartChunkHash, err = startDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get start DA chunk hash, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
totalL1MessagePoppedBeforeEndDAChunk := batch.TotalL1MessagePoppedBefore
|
||||
for i := 0; i < numChunks-1; i++ {
|
||||
for i := 0; i < len(batch.Chunks)-1; i++ {
|
||||
totalL1MessagePoppedBeforeEndDAChunk += batch.Chunks[i].NumL1Messages(totalL1MessagePoppedBeforeEndDAChunk)
|
||||
}
|
||||
|
||||
switch codecConfig.Version {
|
||||
case encoding.CodecV0:
|
||||
daBatch, err := codecv0.NewDABatch(batch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv0 DA batch: %w", err)
|
||||
}
|
||||
|
||||
// BatchBlobDataProof is left as empty for codecv0.
|
||||
batchMeta := &BatchMetadata{
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchDataHash: daBatch.DataHash,
|
||||
BatchBytes: daBatch.Encode(),
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv0 start DA chunk: %w", err)
|
||||
}
|
||||
|
||||
batchMeta.StartChunkHash, err = startDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv0 start DA chunk hash: %w", err)
|
||||
}
|
||||
|
||||
endDAChunk, err := codecv0.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv0 end DA chunk: %w", err)
|
||||
}
|
||||
|
||||
batchMeta.EndChunkHash, err = endDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv0 end DA chunk hash: %w", err)
|
||||
}
|
||||
return batchMeta, nil
|
||||
case encoding.CodecV1:
|
||||
daBatch, err := codecv1.NewDABatch(batch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv1 DA batch: %w", err)
|
||||
}
|
||||
|
||||
blobDataProof, err := daBatch.BlobDataProof()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv1 blob data proof: %w", err)
|
||||
}
|
||||
|
||||
batchMeta := &BatchMetadata{
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchDataHash: daBatch.DataHash,
|
||||
BatchBlobDataProof: blobDataProof,
|
||||
BatchBytes: daBatch.Encode(),
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv1.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv1 start DA chunk: %w", err)
|
||||
}
|
||||
|
||||
batchMeta.StartChunkHash, err = startDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv1 start DA chunk hash: %w", err)
|
||||
}
|
||||
|
||||
endDAChunk, err := codecv1.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv1 end DA chunk: %w", err)
|
||||
}
|
||||
|
||||
batchMeta.EndChunkHash, err = endDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv1 end DA chunk hash: %w", err)
|
||||
}
|
||||
return batchMeta, nil
|
||||
case encoding.CodecV2:
|
||||
daBatch, err := codecv2.NewDABatch(batch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv2 DA batch: %w", err)
|
||||
}
|
||||
|
||||
blobDataProof, err := daBatch.BlobDataProof()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv2 blob data proof: %w", err)
|
||||
}
|
||||
|
||||
batchMeta := &BatchMetadata{
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchDataHash: daBatch.DataHash,
|
||||
BatchBlobDataProof: blobDataProof,
|
||||
BatchBytes: daBatch.Encode(),
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv2.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv2 start DA chunk: %w", err)
|
||||
}
|
||||
|
||||
batchMeta.StartChunkHash, err = startDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv2 start DA chunk hash: %w", err)
|
||||
}
|
||||
|
||||
endDAChunk, err := codecv2.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv2 end DA chunk: %w", err)
|
||||
}
|
||||
|
||||
batchMeta.EndChunkHash, err = endDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv2 end DA chunk hash: %w", err)
|
||||
}
|
||||
return batchMeta, nil
|
||||
case encoding.CodecV3:
|
||||
daBatch, err := codecv3.NewDABatch(batch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv3 DA batch: %w", err)
|
||||
}
|
||||
|
||||
blobDataProof, err := daBatch.BlobDataProofForPointEvaluation()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv3 blob data proof for point evaluation: %w", err)
|
||||
}
|
||||
|
||||
batchMeta := &BatchMetadata{
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchDataHash: daBatch.DataHash,
|
||||
BatchBlobDataProof: blobDataProof,
|
||||
BatchBytes: daBatch.Encode(),
|
||||
BlobBytes: daBatch.BlobBytes(),
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv3.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv3 start DA chunk: %w", err)
|
||||
}
|
||||
|
||||
batchMeta.StartChunkHash, err = startDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv3 start DA chunk hash: %w", err)
|
||||
}
|
||||
|
||||
endDAChunk, err := codecv3.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv3 end DA chunk: %w", err)
|
||||
}
|
||||
|
||||
batchMeta.EndChunkHash, err = endDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv3 end DA chunk hash: %w", err)
|
||||
}
|
||||
return batchMeta, nil
|
||||
case encoding.CodecV4:
|
||||
daBatch, err := codecv4.NewDABatch(batch, codecConfig.EnableCompress)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv4 DA batch: %w", err)
|
||||
}
|
||||
|
||||
blobDataProof, err := daBatch.BlobDataProofForPointEvaluation()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv4 blob data proof for point evaluation: %w", err)
|
||||
}
|
||||
|
||||
batchMeta := &BatchMetadata{
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchDataHash: daBatch.DataHash,
|
||||
BatchBlobDataProof: blobDataProof,
|
||||
BatchBytes: daBatch.Encode(),
|
||||
BlobBytes: daBatch.BlobBytes(),
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv4.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv4 start DA chunk: %w", err)
|
||||
}
|
||||
|
||||
batchMeta.StartChunkHash, err = startDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv4 start DA chunk hash: %w", err)
|
||||
}
|
||||
|
||||
endDAChunk, err := codecv4.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv4 end DA chunk: %w", err)
|
||||
}
|
||||
|
||||
batchMeta.EndChunkHash, err = endDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv4 end DA chunk hash: %w", err)
|
||||
}
|
||||
return batchMeta, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported codec version: %v", codecConfig.Version)
|
||||
endDAChunk, err := codec.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create end DA chunk, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
batchMeta.EndChunkHash, err = endDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get end DA chunk hash, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
return batchMeta, nil
|
||||
}
|
||||
|
||||
func measureTime(operation func() error) (time.Duration, error) {
|
||||
start := time.Now()
|
||||
err := operation()
|
||||
return time.Since(start), err
|
||||
}
|
||||
|
||||
@@ -102,10 +102,6 @@ contract MockBridge {
|
||||
|
||||
mapping(uint256 => bytes32) public withdrawRoots;
|
||||
|
||||
function setL1BaseFee(uint256 _l1BaseFee) external {
|
||||
l1BaseFee = _l1BaseFee;
|
||||
}
|
||||
|
||||
function setL1BaseFeeAndBlobBaseFee(uint256 _l1BaseFee, uint256 _l1BlobBaseFee) external {
|
||||
l1BaseFee = _l1BaseFee;
|
||||
l1BlobBaseFee = _l1BlobBaseFee;
|
||||
|
||||
@@ -208,12 +208,10 @@ func TestFunction(t *testing.T) {
|
||||
|
||||
// l1 rollup and watch rollup events
|
||||
t.Run("TestCommitAndFinalizeGenesisBatch", testCommitAndFinalizeGenesisBatch)
|
||||
t.Run("testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions", testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions)
|
||||
t.Run("TestCommitBatchAndFinalizeBatchOrBundleCrossingAllTransitions", testCommitBatchAndFinalizeBatchOrBundleCrossingAllTransitions)
|
||||
t.Run("TestCommitBatchAndFinalizeBundleCodecV4", testCommitBatchAndFinalizeBundleCodecV4)
|
||||
|
||||
// l1/l2 gas oracle
|
||||
t.Run("TestImportL1GasPrice", testImportL1GasPrice)
|
||||
t.Run("TestImportL1GasPriceAfterCurie", testImportL1GasPriceAfterCurie)
|
||||
t.Run("TestImportDefaultL1GasPriceDueToL1GasPriceSpike", testImportDefaultL1GasPriceDueToL1GasPriceSpike)
|
||||
t.Run("TestImportL2GasPrice", testImportL2GasPrice)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func testImportL1GasPrice(t *testing.T) {
|
||||
l1Cfg := rollupApp.Config.L1Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, ¶ms.ChainConfig{}, relayer.ServiceTypeL1GasOracle, nil)
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, relayer.ServiceTypeL1GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l1Relayer.StopSenders()
|
||||
|
||||
@@ -80,80 +80,7 @@ func testImportL1GasPrice(t *testing.T) {
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), dbBatch.Hash, common.Hash{}.String(), types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// relay gas price
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.NotEmpty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOracleImporting)
|
||||
}
|
||||
|
||||
func testImportL1GasPriceAfterCurie(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
l1Cfg := rollupApp.Config.L1Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, relayer.ServiceTypeL1GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l1Relayer.StopSenders()
|
||||
|
||||
// Create L1Watcher
|
||||
startHeight, err := l1Client.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, db, nil)
|
||||
|
||||
// fetch new blocks
|
||||
number, err := l1Client.BlockNumber(context.Background())
|
||||
assert.Greater(t, number, startHeight-1)
|
||||
assert.NoError(t, err)
|
||||
err = l1Watcher.FetchBlockHeader(number)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l1BlockOrm := orm.NewL1Block(db)
|
||||
// check db status
|
||||
latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, number, latestBlockHeight)
|
||||
blocks, err := l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.Empty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOraclePending)
|
||||
|
||||
// add fake batch to pass check for commit batch timeout
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{
|
||||
{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
},
|
||||
},
|
||||
}
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), dbBatch.Hash, common.Hash{}.String(), types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
@@ -178,7 +105,7 @@ func testImportDefaultL1GasPriceDueToL1GasPriceSpike(t *testing.T) {
|
||||
// set CheckCommittedBatchesWindowMinutes to zero to not pass check for commit batch timeout
|
||||
l1CfgCopy.RelayerConfig.GasOracleConfig.CheckCommittedBatchesWindowMinutes = 0
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1CfgCopy.RelayerConfig, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, relayer.ServiceTypeL1GasOracle, nil)
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1CfgCopy.RelayerConfig, relayer.ServiceTypeL1GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l1Relayer.StopSenders()
|
||||
|
||||
@@ -234,9 +161,9 @@ func testImportDefaultL1GasPriceDueToL1GasPriceSpike(t *testing.T) {
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch0, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch0, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), dbBatch.Hash, common.Hash{}.String(), types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
@@ -309,7 +236,7 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{})
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check db status
|
||||
|
||||
@@ -19,7 +19,7 @@ func testProcessStart(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
rollupApp.RunApp(t, cutils.GasOracleApp, "--genesis", "../conf/genesis.json")
|
||||
rollupApp.RunApp(t, cutils.RollupRelayerApp, "--genesis", "../conf/genesis.json")
|
||||
rollupApp.RunApp(t, cutils.RollupRelayerApp, "--genesis", "../conf/genesis.json", "--min-codec-version", "4")
|
||||
|
||||
rollupApp.WaitExit()
|
||||
}
|
||||
@@ -36,7 +36,7 @@ func testProcessStartEnableMetrics(t *testing.T) {
|
||||
port, err = rand.Int(rand.Reader, big.NewInt(10000))
|
||||
assert.NoError(t, err)
|
||||
svrPort = strconv.FormatInt(port.Int64()+30000, 10)
|
||||
rollupApp.RunApp(t, cutils.RollupRelayerApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort, "--genesis", "../conf/genesis.json")
|
||||
rollupApp.RunApp(t, cutils.RollupRelayerApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort, "--genesis", "../conf/genesis.json", "--min-codec-version", "4")
|
||||
|
||||
rollupApp.WaitExit()
|
||||
}
|
||||
|
||||
@@ -52,22 +52,18 @@ func testCommitAndFinalizeGenesisBatch(t *testing.T) {
|
||||
assert.Equal(t, types.RollupFinalized, types.RollupStatus(batch.RollupStatus))
|
||||
}
|
||||
|
||||
func testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3}
|
||||
func testCommitBatchAndFinalizeBundleCodecV4(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupDB(t)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
chainConfig = ¶ms.ChainConfig{}
|
||||
} else if codecVersion == encoding.CodecV1 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}
|
||||
} else if codecVersion == encoding.CodecV2 {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
} else {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
|
||||
// Create L2Relayer
|
||||
@@ -101,19 +97,19 @@ func testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions(t *testing.T) {
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
bap := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{
|
||||
MaxBatchNumPerBundle: 1000000,
|
||||
BundleTimeoutSec: 300,
|
||||
}, chainConfig, db, nil)
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks[:5])
|
||||
@@ -176,7 +172,6 @@ func testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions(t *testing.T) {
|
||||
}
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
l2Relayer.ProcessPendingBundles()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
@@ -196,26 +191,24 @@ func testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions(t *testing.T) {
|
||||
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
if codecVersion == encoding.CodecV0 || codecVersion == encoding.CodecV1 || codecVersion == encoding.CodecV2 {
|
||||
assert.Len(t, bundles, 0)
|
||||
} else {
|
||||
assert.Len(t, bundles, 1)
|
||||
bundle := bundles[0]
|
||||
if types.RollupStatus(bundle.RollupStatus) != types.RollupFinalized {
|
||||
return false
|
||||
}
|
||||
assert.NotEmpty(t, bundle.FinalizeTxHash)
|
||||
receipt, err := l1Client.TransactionReceipt(context.Background(), common.HexToHash(bundle.FinalizeTxHash))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status)
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"bundle_hash": bundle.Hash}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 2)
|
||||
for _, batch := range batches {
|
||||
assert.Equal(t, batch.RollupStatus, bundle.RollupStatus)
|
||||
assert.Equal(t, bundle.FinalizeTxHash, batch.FinalizeTxHash)
|
||||
}
|
||||
assert.Len(t, bundles, 1)
|
||||
|
||||
bundle := bundles[0]
|
||||
if types.RollupStatus(bundle.RollupStatus) != types.RollupFinalized {
|
||||
return false
|
||||
}
|
||||
assert.NotEmpty(t, bundle.FinalizeTxHash)
|
||||
receipt, err := l1Client.TransactionReceipt(context.Background(), common.HexToHash(bundle.FinalizeTxHash))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status)
|
||||
batches, err = batchOrm.GetBatches(context.Background(), map[string]interface{}{"bundle_hash": bundle.Hash}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 2)
|
||||
for _, batch := range batches {
|
||||
assert.Equal(t, batch.RollupStatus, bundle.RollupStatus)
|
||||
assert.Equal(t, bundle.FinalizeTxHash, batch.FinalizeTxHash)
|
||||
}
|
||||
|
||||
return true
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
@@ -223,177 +216,3 @@ func testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions(t *testing.T) {
|
||||
database.CloseDB(db)
|
||||
}
|
||||
}
|
||||
|
||||
func testCommitBatchAndFinalizeBatchOrBundleCrossingAllTransitions(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(1), CurieBlock: big.NewInt(2), DarwinTime: func() *uint64 { t := uint64(4); return &t }()}
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
// add some blocks to db
|
||||
var blocks []*encoding.Block
|
||||
for i := int64(0); i < 10; i++ {
|
||||
header := gethTypes.Header{
|
||||
Number: big.NewInt(i + 1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
Root: common.HexToHash("0x1"),
|
||||
Time: uint64(i + 1),
|
||||
}
|
||||
blocks = append(blocks, &encoding.Block{
|
||||
Header: &header,
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.HexToHash("0x2"),
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
})
|
||||
}
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 100,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
bap := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{
|
||||
MaxBatchNumPerBundle: 1000000,
|
||||
BundleTimeoutSec: 300,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
cp.TryProposeChunk()
|
||||
cp.TryProposeChunk()
|
||||
cp.TryProposeChunk()
|
||||
cp.TryProposeChunk()
|
||||
|
||||
bap.TryProposeBatch()
|
||||
bap.TryProposeBatch()
|
||||
bap.TryProposeBatch()
|
||||
bap.TryProposeBatch()
|
||||
|
||||
bup.TryProposeBundle()
|
||||
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
batches, getErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, getErr)
|
||||
assert.Len(t, batches, 4)
|
||||
batches = batches[1:]
|
||||
for _, batch := range batches {
|
||||
if types.RollupCommitted != types.RollupStatus(batch.RollupStatus) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
batchProof := &message.BatchProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
batches = batches[1:]
|
||||
for _, batch := range batches {
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, batchProof, 600)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
bundleProof := &message.BundleProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
for _, bundle := range bundles {
|
||||
err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, bundleProof, types.ProvingTaskVerified, 100)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 4)
|
||||
batches = batches[1:2]
|
||||
for _, batch := range batches {
|
||||
if types.RollupStatus(batch.RollupStatus) != types.RollupFinalized {
|
||||
return false
|
||||
}
|
||||
assert.NotEmpty(t, batch.FinalizeTxHash)
|
||||
receipt, getErr := l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
assert.NoError(t, getErr)
|
||||
assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status)
|
||||
}
|
||||
return true
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
l2Relayer.ProcessPendingBundles()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 4)
|
||||
batches = batches[3:]
|
||||
for _, batch := range batches {
|
||||
if types.RollupStatus(batch.RollupStatus) != types.RollupFinalized {
|
||||
return false
|
||||
}
|
||||
assert.NotEmpty(t, batch.FinalizeTxHash)
|
||||
receipt, getErr := l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
assert.NoError(t, getErr)
|
||||
assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status)
|
||||
}
|
||||
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, bundles, 1)
|
||||
bundle := bundles[0]
|
||||
if types.RollupStatus(bundle.RollupStatus) != types.RollupFinalized {
|
||||
return false
|
||||
}
|
||||
assert.NotEmpty(t, bundle.FinalizeTxHash)
|
||||
receipt, err := l1Client.TransactionReceipt(context.Background(), common.HexToHash(bundle.FinalizeTxHash))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status)
|
||||
batches, err = batchOrm.GetBatches(context.Background(), map[string]interface{}{"bundle_hash": bundle.Hash}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 1)
|
||||
for _, batch := range batches {
|
||||
assert.Equal(t, batch.RollupStatus, bundle.RollupStatus)
|
||||
assert.Equal(t, bundle.FinalizeTxHash, batch.FinalizeTxHash)
|
||||
}
|
||||
return true
|
||||
}, 30*time.Second, time.Second)
|
||||
}
|
||||
|
||||
@@ -3,21 +3,25 @@ module scroll-tech/integration-test
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
|
||||
github.com/scroll-tech/da-codec v0.1.2
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241011150208-4742882675d8
|
||||
github.com/stretchr/testify v1.9.0
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.12.1 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect
|
||||
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
@@ -26,9 +30,9 @@ require (
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/supranational/blst v0.3.12 // indirect
|
||||
@@ -36,9 +40,11 @@ require (
|
||||
github.com/tklauser/numcpus v0.8.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
golang.org/x/tools v0.13.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
)
|
||||
|
||||
@@ -1,10 +1,20 @@
|
||||
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
|
||||
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
@@ -16,6 +26,16 @@ github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=
|
||||
github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/pebble v1.1.0 h1:pcFh8CdCIt2kmEpK0OIatq67Ln9uGDYY3d5XnE0LJG4=
|
||||
github.com/cockroachdb/pebble v1.1.0/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E=
|
||||
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
|
||||
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
|
||||
@@ -25,19 +45,33 @@ github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXk
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI=
|
||||
github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
|
||||
github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
@@ -45,15 +79,17 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
@@ -61,14 +97,20 @@ github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
|
||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
|
||||
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
|
||||
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
|
||||
@@ -81,22 +123,28 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/prometheus/client_golang v1.12.0 h1:C+UIj/QWtmqY13Arb8kwMt5j34/0Z2iKamrJ+ryC0Gg=
|
||||
github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a h1:CmF68hwI0XsOQ5UwlBopMi2Ow4Pbg32akc4KIVCOm+Y=
|
||||
github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb h1:uOKdmDT0LsuS3gfynEjR4zA3Ooh6p2Z3O+IMRj2r8LA=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/da-codec v0.1.2 h1:QyJ+dQ4zWVVJwuqxNt4MiKyrymVc6rHe4YPtURkjiRc=
|
||||
github.com/scroll-tech/da-codec v0.1.2/go.mod h1:odz1ck3umvYccCG03osaQBISAYGinZktZYbpk94fYRE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241011150208-4742882675d8 h1:pEP6+ThQIgSRO5SILiO6iBpWnxAUjoRNBA9Nc6ooOS0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241011150208-4742882675d8/go.mod h1:MBHX2RcAV9KLWblo9DSa/xyPYd1Wpwnt64JSDOy85po=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8=
|
||||
@@ -107,27 +155,38 @@ github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZ
|
||||
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
|
||||
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
|
||||
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
|
||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user