Compare commits

...

19 Commits

Author SHA1 Message Date
colin
f9da81d587 feat(rollup-relayer): use db gas estimation as min gas limit (#901)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: Xi Lin <zimpha@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-09-01 14:54:57 +08:00
Xi Lin
38f64e70b7 feat(contracts): bench test for ScrollChain (#721)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
Co-authored-by: zimpha <zimpha@users.noreply.github.com>
Co-authored-by: icemelon <icemelon@users.noreply.github.com>
2023-08-31 23:52:50 -07:00
Haichen Shen
44b924170a fix(ci): don't trigger CI for version change (#898)
Co-authored-by: icemelon <icemelon@users.noreply.github.com>
2023-08-31 23:06:32 -07:00
georgehao
227f09a2cf feat(coordinator): pretty log (#900)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-09-01 12:56:03 +08:00
HAOYUatHZ
112e82a4ef feat(relayer): cache maxFeeData (#897)
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2023-09-01 12:06:02 +08:00
georgehao
82e6d28e82 feat(rollup-relayer): pretty the send transaction log (#896)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-09-01 11:42:04 +08:00
Xi Lin
8daa5d5496 fix(contracts): check actual number of transactions in each chunk (#887)
Co-authored-by: zimpha <zimpha@users.noreply.github.com>
2023-08-31 20:28:54 -07:00
maskpp
3958e8bd86 feat(rollup_relayer): add chain_monitor client (#885)
Co-authored-by: mask-pp <mask-pp@users.noreply.github.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-31 20:15:26 +08:00
colin
f553a70d20 feat(rollup-relayer): add l1 commit estimation fields in batch schema (#891)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-08-30 19:52:23 +08:00
colin
2dc5ceb44c fix(prover): refine error logs (#890)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-08-30 16:45:52 +08:00
colin
ff03924d76 feat(prover): add chunk & batch proving circuit error handling (#884)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-30 12:22:54 +08:00
Xi Lin
f6894bb82f feat(contracts): add usdc gateway (#426)
Co-authored-by: zimpha <zimpha@users.noreply.github.com>
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
2023-08-29 00:28:29 -07:00
georgehao
e990e02391 feat(database): add index (#881)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-28 17:07:29 +08:00
colin
f8d48a6326 fix(common): chunk test (#886)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-08-28 15:46:54 +08:00
colin
dba097e03d fix(rollup-relayer): block l1 commit calldata and gas estimation (#882)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-08-28 15:33:40 +08:00
HAOYUatHZ
30ad0bfe78 fix(coordinator): fix TestApis (#883)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-08-28 11:21:03 +08:00
georgehao
1dfca3b7c0 feat(coordinator): prover task record unique (#845)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-28 10:51:01 +08:00
colin
826e847b5a fix(rollup-relayer): determine first block height and first chunk index (#861)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: Haichen Shen <shenhaichen@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-08-26 15:04:52 +08:00
colin
8c71a6d22a fix(chunk-proposer): count l1+l2 txs into chunk (#879)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-08-26 14:53:59 +08:00
110 changed files with 2649 additions and 885 deletions

View File

@@ -20,7 +20,7 @@ Your PR title must follow [conventional commits](https://www.conventionalcommits
### Deployment tag versioning
Has `tag` in `common/version.go` been updated?
Has `tag` in `common/version.go` been updated or have you added `bump-version` tag to this PR?
- [ ] No, this PR doesn't involve a new deployment, git tag, docker image tag
- [ ] Yes

View File

@@ -10,6 +10,7 @@ on:
paths:
- 'bridge/**'
- 'common/**'
- '!common/version/version.go'
- 'database/**'
- '.github/workflows/bridge.yml'
pull_request:
@@ -21,6 +22,7 @@ on:
paths:
- 'bridge/**'
- 'common/**'
- '!common/version/version.go'
- 'database/**'
- '.github/workflows/bridge.yml'

View File

@@ -2,15 +2,12 @@ name: Bump Version
on:
pull_request:
branches: [develop]
types:
- opened
- reopened
- synchronize
- ready_for_review
branches: [ develop ]
types: [ labeled ]
jobs:
try-to-bump:
if: ${{ github.event.label.name == 'bump-version' }}
runs-on: ubuntu-latest
steps:
- name: Checkout code

View File

@@ -9,6 +9,7 @@ on:
- alpha
paths:
- 'common/**'
- '!common/version/version.go'
- '.github/workflows/common.yml'
pull_request:
types:
@@ -18,6 +19,7 @@ on:
- ready_for_review
paths:
- 'common/**'
- '!common/version/version.go'
- '.github/workflows/common.yml'
jobs:

View File

@@ -10,6 +10,7 @@ on:
paths:
- 'coordinator/**'
- 'common/**'
- '!common/version/version.go'
- 'database/**'
- '.github/workflows/coordinator.yml'
pull_request:
@@ -21,6 +22,7 @@ on:
paths:
- 'coordinator/**'
- 'common/**'
- '!common/version/version.go'
- 'database/**'
- '.github/workflows/coordinator.yml'

View File

@@ -10,6 +10,7 @@ on:
paths:
- 'database/**'
- 'common/**'
- '!common/version/version.go'
- '.github/workflows/database.yml'
pull_request:
types:
@@ -20,6 +21,7 @@ on:
paths:
- 'database/**'
- 'common/**'
- '!common/version/version.go'
- '.github/workflows/database.yml'
jobs:

View File

@@ -116,7 +116,7 @@ require (
golang.org/x/arch v0.4.0 // indirect
golang.org/x/crypto v0.12.0 // indirect
golang.org/x/exp v0.0.0-20230810033253-352e893a4cad // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/net v0.14.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/text v0.12.0 // indirect

View File

@@ -531,8 +531,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@@ -57,14 +57,20 @@
"min_gas_price": 0,
"gas_price_diff": 50000
},
"chain_monitor": {
"timeout": 3,
"try_times": 5,
"base_url": "http://localhost:8750"
},
"finalize_batch_interval_sec": 0,
"message_sender_private_key": "1212121212121212121212121212121212121212121212121212121212121212",
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313",
"commit_sender_private_key": "1414141414141414141414141414141414141414141414141414141414141414",
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515"
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515",
"gas_cost_increase_multiplier": 1.2
},
"chunk_proposer_config": {
"max_l2_tx_num_per_chunk": 1123,
"max_tx_num_per_chunk": 1123,
"max_l1_commit_gas_per_chunk": 11234567,
"max_l1_commit_calldata_size_per_chunk": 112345,
"chunk_timeout_sec": 300,

View File

@@ -4,6 +4,8 @@ go 1.19
require (
github.com/agiledragon/gomonkey/v2 v2.9.0
github.com/gin-gonic/gin v1.9.1
github.com/go-resty/resty/v2 v2.7.0
github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/prometheus/client_golang v1.14.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28
@@ -16,12 +18,20 @@ require (
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/bytedance/sonic v1.9.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.14.1 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect
@@ -33,13 +43,19 @@ require (
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
@@ -54,12 +70,17 @@ require (
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/arch v0.4.0 // indirect
golang.org/x/crypto v0.12.0 // indirect
golang.org/x/net v0.14.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/text v0.12.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect

View File

@@ -13,13 +13,20 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
github.com/bytedance/sonic v1.9.2 h1:GDaNjuWSGu09guE9Oql0MSTNhNCLlWwO8y/xM5BzcbM=
github.com/bytedance/sonic v1.9.2/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
@@ -27,11 +34,28 @@ github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS3
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k=
github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
@@ -41,6 +65,7 @@ github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXi
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@@ -67,14 +92,21 @@ github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkr
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
@@ -87,12 +119,19 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfr
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@@ -131,6 +170,15 @@ github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
@@ -138,14 +186,21 @@ github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+Kd
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.4.0 h1:A8WCeEWhLwPBKNbFi5Wv5UTCBx5zzubnXDlMOFAzFMc=
golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -154,6 +209,9 @@ golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98y
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -163,15 +221,22 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
@@ -186,7 +251,9 @@ gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHN
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho=
gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=

View File

@@ -28,7 +28,7 @@ type L2Config struct {
// ChunkProposerConfig loads chunk_proposer configuration items.
type ChunkProposerConfig struct {
MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"`
MaxTxNumPerChunk uint64 `json:"max_tx_num_per_chunk"`
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`

View File

@@ -37,6 +37,13 @@ type SenderConfig struct {
PendingLimit int `json:"pending_limit"`
}
// ChainMonitor this config is used to get batch status from chain_monitor API.
type ChainMonitor struct {
TimeOut int `json:"timeout"`
TryTimes int `json:"try_times"`
BaseURL string `json:"base_url"`
}
// RelayerConfig loads relayer configuration items.
// What we need to pay attention to is that
type RelayerConfig struct {
@@ -54,6 +61,10 @@ type RelayerConfig struct {
FinalizeBatchIntervalSec uint64 `json:"finalize_batch_interval_sec"`
// MessageRelayMinGasLimit to avoid OutOfGas error
MessageRelayMinGasLimit uint64 `json:"message_relay_min_gas_limit,omitempty"`
// ChainMonitor config of monitoring service
ChainMonitor *ChainMonitor `json:"chain_monitor,omitempty"`
// GasCostIncreaseMultiplier multiplier for min gas limit estimation
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier,omitempty"`
// The private key of the relayer
MessageSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
GasOracleSenderPrivateKey *ecdsa.PrivateKey `json:"-"`

View File

@@ -8,6 +8,7 @@ import (
"sync"
"time"
"github.com/go-resty/resty/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
@@ -59,6 +60,9 @@ type Layer2Relayer struct {
minGasPrice uint64
gasPriceDiff uint64
// Used to get batch status from chain_monitor api.
chainMonitorClient *resty.Client
// A list of processing message.
// key(string): confirmation ID, value(string): layer2 hash.
processingMessage sync.Map
@@ -114,6 +118,11 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
minGasLimitForMessageRelay = cfg.MessageRelayMinGasLimit
}
// chain_monitor client
chainMonitorClient := resty.New()
chainMonitorClient.SetRetryCount(cfg.ChainMonitor.TryTimes)
chainMonitorClient.SetTimeout(time.Duration(cfg.ChainMonitor.TimeOut) * time.Second)
layer2Relayer := &Layer2Relayer{
ctx: ctx,
db: db,
@@ -143,6 +152,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
processingMessage: sync.Map{},
processingCommitment: sync.Map{},
processingFinalization: sync.Map{},
chainMonitorClient: chainMonitorClient,
}
// Initialize genesis before we do anything else
@@ -192,8 +202,14 @@ func (r *Layer2Relayer) initializeGenesis() error {
return fmt.Errorf("failed to update genesis chunk proving status: %v", err)
}
batchMeta := &types.BatchMeta{
StartChunkIndex: 0,
StartChunkHash: dbChunk.Hash,
EndChunkIndex: 0,
EndChunkHash: dbChunk.Hash,
}
var batch *orm.Batch
batch, err = r.batchOrm.InsertBatch(r.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk}, dbTX)
batch, err = r.batchOrm.InsertBatch(r.ctx, []*types.Chunk{chunk}, batchMeta, dbTX)
if err != nil {
return fmt.Errorf("failed to insert batch: %v", err)
}
@@ -316,7 +332,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
func (r *Layer2Relayer) ProcessPendingBatches() {
// get pending batches from database in ascending order by their index.
pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 1)
pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 5)
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
@@ -379,9 +395,17 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
// send transaction
txID := batch.Hash + "-commit"
txHash, err := r.commitSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
minGasLimit := uint64(float64(batch.TotalL1CommitGas) * r.cfg.GasCostIncreaseMultiplier)
txHash, err := r.commitSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, minGasLimit)
if err != nil {
log.Error(
"Failed to send commitBatch tx to layer1",
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"err", err,
)
log.Debug(
"Failed to send commitBatch tx to layer1",
"index", batch.Index,
"hash", batch.Hash,
@@ -434,6 +458,19 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
log.Info("Start to roll up zk proof", "hash", hash)
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
// Check batch status before send `finalizeBatchWithProof` tx.
//batchStatus, err := r.getBatchStatusByIndex(batch.Index)
//if err != nil {
// r.metrics.bridgeL2ChainMonitorLatestFailedCall.Inc()
// log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", batch.Index, "err", err)
// return
//}
//if !batchStatus {
// r.metrics.bridgeL2ChainMonitorLatestFailedBatchStatus.Inc()
// log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", batch.Index)
// return
//}
var parentBatchStateRoot string
if batch.Index > 0 {
var parentBatch *orm.Batch
@@ -481,6 +518,13 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
// the client does not see the 1st tx's updates at this point.
// TODO: add more fine-grained error handling
log.Error(
"finalizeBatchWithProof in layer1 failed",
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"err", err,
)
log.Debug(
"finalizeBatchWithProof in layer1 failed",
"index", batch.Index,
"hash", batch.Hash,
@@ -527,6 +571,29 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
}
}
// batchStatusResponse the response schema
type batchStatusResponse struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
Data bool `json:"data"`
}
func (r *Layer2Relayer) getBatchStatusByIndex(batchIndex uint64) (bool, error) {
var response batchStatusResponse
resp, err := r.chainMonitorClient.R().SetResult(&response).Get(fmt.Sprintf("%s/v1/batch_status?batch_index=%d", r.cfg.ChainMonitor.BaseURL, batchIndex))
if err != nil {
return false, err
}
if resp.IsError() {
return false, resp.Error().(error)
}
if response.ErrCode != 0 {
return false, fmt.Errorf("failed to get batch status, errCode: %d, errMsg: %s", response.ErrCode, response.ErrMsg)
}
return response.Data, nil
}
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
transactionType := "Unknown"
// check whether it is CommitBatches transaction

View File

@@ -18,6 +18,8 @@ type l2RelayerMetrics struct {
bridgeL2BatchesCommittedConfirmedTotal prometheus.Counter
bridgeL2BatchesFinalizedConfirmedTotal prometheus.Counter
bridgeL2BatchesGasOraclerConfirmedTotal prometheus.Counter
bridgeL2ChainMonitorLatestFailedCall prometheus.Counter
bridgeL2ChainMonitorLatestFailedBatchStatus prometheus.Counter
}
var (
@@ -68,6 +70,14 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
Name: "bridge_layer2_process_gras_oracler_confirmed_total",
Help: "The total number of layer2 process finalized batches confirmed total",
}),
bridgeL2ChainMonitorLatestFailedCall: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_chain_monitor_latest_failed_batch_call",
Help: "The total number of failed call chain_monitor api",
}),
bridgeL2ChainMonitorLatestFailedBatchStatus: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_chain_monitor_latest_failed_batch_status",
Help: "The total number of failed batch status get from chain_monitor",
}),
}
})
return l2RelayerMetric

View File

@@ -4,9 +4,12 @@ import (
"context"
"errors"
"math/big"
"net/http"
"strings"
"testing"
"github.com/agiledragon/gomonkey/v2"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/common"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
@@ -56,8 +59,14 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
assert.NoError(t, err)
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
assert.NoError(t, err)
batchMeta := &types.BatchMeta{
StartChunkIndex: 0,
StartChunkHash: dbChunk1.Hash,
EndChunkIndex: 1,
EndChunkHash: dbChunk2.Hash,
}
batchOrm := orm.NewBatch(db)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*types.Chunk{chunk1, chunk2})
batch, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk1, chunk2}, batchMeta)
assert.NoError(t, err)
relayer.ProcessPendingBatches()
@@ -75,8 +84,14 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, nil)
assert.NoError(t, err)
batchMeta := &types.BatchMeta{
StartChunkIndex: 0,
StartChunkHash: chunkHash1.Hex(),
EndChunkIndex: 1,
EndChunkHash: chunkHash2.Hex(),
}
batchOrm := orm.NewBatch(db)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
batch, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk1, chunk2}, batchMeta)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
@@ -124,7 +139,13 @@ func testL2RelayerCommitConfirm(t *testing.T) {
batchOrm := orm.NewBatch(db)
batchHashes := make([]string, len(processingKeys))
for i := range batchHashes {
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
batchMeta := &types.BatchMeta{
StartChunkIndex: 0,
StartChunkHash: chunkHash1.Hex(),
EndChunkIndex: 1,
EndChunkHash: chunkHash2.Hex(),
}
batch, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk1, chunk2}, batchMeta)
assert.NoError(t, err)
batchHashes[i] = batch.Hash
}
@@ -174,7 +195,13 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
batchOrm := orm.NewBatch(db)
batchHashes := make([]string, len(processingKeys))
for i := range batchHashes {
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
batchMeta := &types.BatchMeta{
StartChunkIndex: 0,
StartChunkHash: chunkHash1.Hex(),
EndChunkIndex: 1,
EndChunkHash: chunkHash2.Hex(),
}
batch, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk1, chunk2}, batchMeta)
assert.NoError(t, err)
batchHashes[i] = batch.Hash
}
@@ -210,11 +237,23 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
batchMeta1 := &types.BatchMeta{
StartChunkIndex: 0,
StartChunkHash: chunkHash1.Hex(),
EndChunkIndex: 0,
EndChunkHash: chunkHash1.Hex(),
}
batchOrm := orm.NewBatch(db)
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1})
batch1, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk1}, batchMeta1)
assert.NoError(t, err)
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2})
batchMeta2 := &types.BatchMeta{
StartChunkIndex: 1,
StartChunkHash: chunkHash2.Hex(),
EndChunkIndex: 1,
EndChunkHash: chunkHash2.Hex(),
}
batch2, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk2}, batchMeta2)
assert.NoError(t, err)
// Create and set up the Layer2 Relayer.
@@ -331,3 +370,33 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
})
relayer.ProcessGasPriceOracle()
}
func mockChainMonitorServer(baseURL string) (*http.Server, error) {
router := gin.New()
r := router.Group("/v1")
r.GET("/batch_status", func(ctx *gin.Context) {
ctx.JSON(http.StatusOK, struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
Data bool `json:"data"`
}{
ErrCode: 0,
ErrMsg: "",
Data: true,
})
})
return utils.StartHTTPServer(strings.Split(baseURL, "//")[1], router)
}
func testGetBatchStatusByIndex(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
status, err := relayer.getBatchStatusByIndex(1)
assert.NoError(t, err)
assert.Equal(t, true, status)
}

View File

@@ -86,6 +86,10 @@ func TestMain(m *testing.M) {
func TestFunctions(t *testing.T) {
setupEnv(t)
srv, err := mockChainMonitorServer(cfg.L2Config.RelayerConfig.ChainMonitor.BaseURL)
assert.NoError(t, err)
defer srv.Close()
// Run l1 relayer test cases.
t.Run("TestCreateNewL1Relayer", testCreateNewL1Relayer)
t.Run("TestL1RelayerProcessSaveEvents", testL1RelayerProcessSaveEvents)
@@ -101,4 +105,6 @@ func TestFunctions(t *testing.T) {
t.Run("TestL2RelayerFinalizeConfirm", testL2RelayerFinalizeConfirm)
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)
// test getBatchStatusByIndex
t.Run("TestGetBatchStatusByIndex", testGetBatchStatusByIndex)
}

View File

@@ -7,15 +7,18 @@ import (
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
)
func (s *Sender) estimateLegacyGas(auth *bind.TransactOpts, contract *common.Address, value *big.Int, input []byte, minGasLimit uint64) (*FeeData, error) {
gasPrice, err := s.client.SuggestGasPrice(s.ctx)
if err != nil {
log.Error("estimateLegacyGas SuggestGasPrice failure", "error", err)
return nil, err
}
gasLimit, err := s.estimateGasLimit(auth, contract, input, gasPrice, nil, nil, value, minGasLimit)
if err != nil {
log.Error("estimateLegacyGas estimateGasLimit failure", "gasPrice", gasPrice, "error", err)
return nil, err
}
return &FeeData{
@@ -27,6 +30,7 @@ func (s *Sender) estimateLegacyGas(auth *bind.TransactOpts, contract *common.Add
func (s *Sender) estimateDynamicGas(auth *bind.TransactOpts, contract *common.Address, value *big.Int, input []byte, minGasLimit uint64) (*FeeData, error) {
gasTipCap, err := s.client.SuggestGasTipCap(s.ctx)
if err != nil {
log.Error("estimateDynamicGas SuggestGasTipCap failure", "error", err)
return nil, err
}
@@ -40,7 +44,11 @@ func (s *Sender) estimateDynamicGas(auth *bind.TransactOpts, contract *common.Ad
)
gasLimit, err := s.estimateGasLimit(auth, contract, input, nil, gasTipCap, gasFeeCap, value, minGasLimit)
if err != nil {
return nil, err
log.Error("estimateDynamicGas estimateGasLimit failure", "error", err)
if minGasLimit == 0 {
return nil, err
}
gasLimit = minGasLimit
}
return &FeeData{
gasLimit: gasLimit,
@@ -61,13 +69,14 @@ func (s *Sender) estimateGasLimit(opts *bind.TransactOpts, contract *common.Addr
}
gasLimit, err := s.client.EstimateGas(s.ctx, msg)
if err != nil {
log.Error("estimateGasLimit EstimateGas failure", "error", err)
return 0, err
}
if minGasLimit > gasLimit {
gasLimit = minGasLimit
}
gasLimit = gasLimit * 15 / 10 // 50% extra gas to void out of gas error
gasLimit = gasLimit * 15 / 10 // 50% extra gas to avoid out of gas error
return gasLimit, nil
}

View File

@@ -217,6 +217,7 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
if feeData, err = s.getFeeData(s.auth, target, value, data, minGasLimit); err != nil {
s.metrics.sendTransactionFailureGetFee.WithLabelValues(s.service, s.name).Inc()
log.Error("failed to get fee data", "err", err)
return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err)
}

View File

@@ -104,19 +104,19 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
// TryProposeBatch tries to propose a new batches.
func (p *BatchProposer) TryProposeBatch() {
p.batchProposerCircleTotal.Inc()
dbChunks, err := p.proposeBatchChunks()
dbChunks, batchMeta, err := p.proposeBatchChunks()
if err != nil {
p.proposeBatchFailureTotal.Inc()
log.Error("proposeBatchChunks failed", "err", err)
return
}
if err := p.updateBatchInfoInDB(dbChunks); err != nil {
if err := p.updateBatchInfoInDB(dbChunks, batchMeta); err != nil {
p.proposeBatchUpdateInfoFailureTotal.Inc()
log.Error("update batch info in db failed", "err", err)
}
}
func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk, batchMeta *types.BatchMeta) error {
p.proposeBatchUpdateInfoTotal.Inc()
numChunks := len(dbChunks)
if numChunks <= 0 {
@@ -127,17 +127,18 @@ func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
return err
}
startChunkIndex := dbChunks[0].Index
startChunkHash := dbChunks[0].Hash
endChunkIndex := dbChunks[numChunks-1].Index
endChunkHash := dbChunks[numChunks-1].Hash
batchMeta.StartChunkIndex = dbChunks[0].Index
batchMeta.StartChunkHash = dbChunks[0].Hash
batchMeta.EndChunkIndex = dbChunks[numChunks-1].Index
batchMeta.EndChunkHash = dbChunks[numChunks-1].Hash
err = p.db.Transaction(func(dbTX *gorm.DB) error {
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, startChunkIndex, endChunkIndex, startChunkHash, endChunkHash, chunks, dbTX)
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, chunks, batchMeta, dbTX)
if dbErr != nil {
log.Warn("BatchProposer.updateBatchInfoInDB insert batch failure", "error", "start chunk index", startChunkIndex, "end chunk index", endChunkIndex, dbErr)
log.Warn("BatchProposer.updateBatchInfoInDB insert batch failure",
"start chunk index", batchMeta.StartChunkIndex, "end chunk index", batchMeta.EndChunkIndex, "error", dbErr)
return dbErr
}
dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, startChunkIndex, endChunkIndex, batch.Hash, dbTX)
dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, batchMeta.StartChunkIndex, batchMeta.EndChunkIndex, batch.Hash, dbTX)
if dbErr != nil {
log.Warn("BatchProposer.UpdateBatchHashInRange update the chunk's batch hash failure", "hash", batch.Hash, "error", dbErr)
return dbErr
@@ -147,24 +148,30 @@ func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
return err
}
func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
dbChunks, err := p.chunkOrm.GetUnbatchedChunks(p.ctx, int(p.maxChunkNumPerBatch)+1)
func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, error) {
unbatchedChunkIndex, err := p.batchOrm.GetFirstUnbatchedChunkIndex(p.ctx)
if err != nil {
return nil, err
return nil, nil, err
}
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, unbatchedChunkIndex, int(p.maxChunkNumPerBatch)+1)
if err != nil {
return nil, nil, err
}
if len(dbChunks) == 0 {
return nil, nil
return nil, nil, nil
}
var totalL1CommitCalldataSize uint32
var totalL1CommitGas uint64
var totalChunks uint64
var totalL1MessagePopped uint64
var batchMeta types.BatchMeta
parentBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
if err != nil {
return nil, err
return nil, nil, err
}
// Add extra gas costs
@@ -184,8 +191,8 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
for i, chunk := range dbChunks {
// metric values
lastTotalL1CommitCalldataSize := totalL1CommitCalldataSize
lastTotalL1CommitGas := totalL1CommitGas
batchMeta.TotalL1CommitGas = totalL1CommitGas
batchMeta.TotalL1CommitCalldataSize = totalL1CommitCalldataSize
totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize
totalL1CommitGas += chunk.TotalL1CommitGas
@@ -207,7 +214,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
if i == 0 {
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
return nil, fmt.Errorf(
return nil, nil, fmt.Errorf(
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
dbChunks[0].StartBlockNumber,
dbChunks[0].EndBlockNumber,
@@ -216,7 +223,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
return nil, fmt.Errorf(
return nil, nil, fmt.Errorf(
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
dbChunks[0].StartBlockNumber,
dbChunks[0].EndBlockNumber,
@@ -234,10 +241,10 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitGasPerBatch", p.maxL1CommitGasPerBatch)
p.totalL1CommitGas.Set(float64(lastTotalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(lastTotalL1CommitCalldataSize))
p.totalL1CommitGas.Set(float64(batchMeta.TotalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(batchMeta.TotalL1CommitCalldataSize))
p.batchChunksNum.Set(float64(i))
return dbChunks[:i], nil
return dbChunks[:i], &batchMeta, nil
}
}
@@ -248,16 +255,18 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
"first block timestamp", dbChunks[0].StartBlockTime,
"chunk outdated time threshold", currentTimeSec,
)
batchMeta.TotalL1CommitGas = totalL1CommitGas
batchMeta.TotalL1CommitCalldataSize = totalL1CommitCalldataSize
p.batchFirstBlockTimeoutReached.Inc()
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.totalL1CommitGas.Set(float64(batchMeta.TotalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(batchMeta.TotalL1CommitCalldataSize))
p.batchChunksNum.Set(float64(len(dbChunks)))
return dbChunks, nil
return dbChunks, &batchMeta, nil
}
log.Debug("pending chunks do not reach one of the constraints or contain a timeout block")
p.batchChunksProposeNotEnoughTotal.Inc()
return nil, nil
return nil, nil, nil
}
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {

View File

@@ -23,7 +23,7 @@ func testBatchProposer(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxL2TxNumPerChunk: 10000,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1048319,
@@ -39,11 +39,6 @@ func testBatchProposer(t *testing.T) {
}, db, nil)
bp.TryProposeBatch()
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background(), 0)
assert.NoError(t, err)
assert.Empty(t, chunks)
batchOrm := orm.NewBatch(db)
// get all batches.
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
@@ -54,6 +49,7 @@ func testBatchProposer(t *testing.T) {
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
chunkOrm := orm.NewChunk(db)
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, batches, 1)

View File

@@ -55,7 +55,7 @@ type ChunkProposer struct {
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
maxL2TxNumPerChunk uint64
maxTxNumPerChunk uint64
maxL1CommitGasPerChunk uint64
maxL1CommitCalldataSizePerChunk uint64
maxRowConsumptionPerChunk uint64
@@ -66,7 +66,7 @@ type ChunkProposer struct {
proposeChunkFailureTotal prometheus.Counter
proposeChunkUpdateInfoTotal prometheus.Counter
proposeChunkUpdateInfoFailureTotal prometheus.Counter
chunkL2TxNum prometheus.Gauge
chunkTxNum prometheus.Gauge
chunkEstimateL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
totalTxGasUsed prometheus.Gauge
@@ -79,7 +79,7 @@ type ChunkProposer struct {
// NewChunkProposer creates a new ChunkProposer instance.
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
log.Debug("new chunk proposer",
"maxL2TxNumPerChunk", cfg.MaxL2TxNumPerChunk,
"maxTxNumPerChunk", cfg.MaxTxNumPerChunk,
"maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk,
"maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk,
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
@@ -90,7 +90,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
db: db,
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
maxL2TxNumPerChunk: cfg.MaxL2TxNumPerChunk,
maxTxNumPerChunk: cfg.MaxTxNumPerChunk,
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
@@ -113,9 +113,9 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
Name: "bridge_propose_chunk_update_info_failure_total",
Help: "Total number of propose chunk update info failure total.",
}),
chunkL2TxNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_l2_tx_num",
Help: "The chunk l2 tx num",
chunkTxNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_tx_num",
Help: "The chunk tx num",
}),
chunkEstimateL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_estimate_l1_commit_gas",
@@ -186,7 +186,12 @@ func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
}
func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
blocks, err := p.l2BlockOrm.GetUnchunkedBlocks(p.ctx, maxNumBlockPerChunk)
unchunkedBlockHeight, err := p.chunkOrm.GetUnchunkedBlockHeight(p.ctx)
if err != nil {
return nil, err
}
blocks, err := p.l2BlockOrm.GetL2WrappedBlocksGEHeight(p.ctx, unchunkedBlockHeight, maxNumBlockPerChunk)
if err != nil {
return nil, err
}
@@ -197,21 +202,21 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
var chunk types.Chunk
var totalTxGasUsed uint64
var totalL2TxNum uint64
var totalTxNum uint64
var totalL1CommitCalldataSize uint64
var totalL1CommitGas uint64
crc := chunkRowConsumption{}
for i, block := range blocks {
// metric values
lastTotalL2TxNum := totalL2TxNum
lastTotalTxNum := totalTxNum
lastTotalL1CommitGas := totalL1CommitGas
lastCrcMax := crc.max()
lastTotalL1CommitCalldataSize := totalL1CommitCalldataSize
lastTotalTxGasUsed := totalTxGasUsed
totalTxGasUsed += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalTxNum += uint64(len(block.Transactions))
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas = chunk.EstimateL1CommitGas()
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
@@ -220,19 +225,19 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
}
crcMax := crc.max()
if totalL2TxNum > p.maxL2TxNumPerChunk ||
if totalTxNum > p.maxTxNumPerChunk ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk ||
crcMax > p.maxRowConsumptionPerChunk {
// Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed.
if i == 0 {
if totalL2TxNum > p.maxL2TxNumPerChunk {
if totalTxNum > p.maxTxNumPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
block.Header.Number,
totalL2TxNum,
p.maxL2TxNumPerChunk,
totalTxNum,
p.maxTxNumPerChunk,
)
}
@@ -266,8 +271,8 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
}
log.Debug("breaking limit condition in chunking",
"totalL2TxNum", totalL2TxNum,
"maxL2TxNumPerChunk", p.maxL2TxNumPerChunk,
"totalTxNum", totalTxNum,
"maxTxNumPerChunk", p.maxTxNumPerChunk,
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
"maxL1CommitCalldataSizePerChunk", p.maxL1CommitCalldataSizePerChunk,
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
@@ -276,7 +281,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
"chunkRowConsumption", crc,
"p.maxRowConsumptionPerChunk", p.maxRowConsumptionPerChunk)
p.chunkL2TxNum.Set(float64(lastTotalL2TxNum))
p.chunkTxNum.Set(float64(lastTotalTxNum))
p.chunkEstimateL1CommitGas.Set(float64(lastTotalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(lastTotalL1CommitCalldataSize))
p.maxTxConsumption.Set(float64(lastCrcMax))
@@ -295,7 +300,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
"block outdated time threshold", currentTimeSec,
)
p.chunkFirstBlockTimeoutReached.Inc()
p.chunkL2TxNum.Set(float64(totalL2TxNum))
p.chunkTxNum.Set(float64(totalTxNum))
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.maxTxConsumption.Set(float64(crc.max()))

View File

@@ -23,7 +23,7 @@ func testChunkProposer(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxL2TxNumPerChunk: 10000,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1048319,
@@ -38,7 +38,7 @@ func testChunkProposer(t *testing.T) {
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background(), 0)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 1)
assert.Equal(t, expectedHash.Hex(), chunks[0].Hash)
@@ -53,7 +53,7 @@ func testChunkProposerRowConsumption(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxL2TxNumPerChunk: 10000,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 0, // !
@@ -62,7 +62,7 @@ func testChunkProposerRowConsumption(t *testing.T) {
cp.TryProposeChunk()
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background(), 0)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 0)
}

View File

@@ -53,9 +53,11 @@ type Batch struct {
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas;default:0"`
TotalL1CommitCalldataSize uint32 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size;default:0"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewBatch creates a new Batch database instance.
@@ -142,6 +144,21 @@ func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
return &latestBatch, nil
}
// GetFirstUnbatchedChunkIndex retrieves the first unbatched chunk index.
func (o *Batch) GetFirstUnbatchedChunkIndex(ctx context.Context) (uint64, error) {
// Get the latest batch
latestBatch, err := o.GetLatestBatch(ctx)
if err != nil {
return 0, fmt.Errorf("Chunk.GetChunkedBlockHeight error: %w", err)
}
// if parentBatch==nil then err==gorm.ErrRecordNotFound,
// which means there is not batched chunk yet, thus returns 0
if latestBatch == nil {
return 0, nil
}
return latestBatch.EndChunkIndex + 1, nil
}
// GetRollupStatusByHashList retrieves the rollup statuses for a list of batch hashes.
func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string) ([]types.RollupStatus, error) {
if len(hashes) == 0 {
@@ -209,7 +226,7 @@ func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, erro
}
// InsertBatch inserts a new batch into the database.
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*types.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
func (o *Batch) InsertBatch(ctx context.Context, chunks []*types.Chunk, batchMeta *types.BatchMeta, dbTX ...*gorm.DB) (*Batch, error) {
if len(chunks) == 0 {
return nil, errors.New("invalid args")
}
@@ -255,20 +272,22 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
lastChunkBlockNum := len(chunks[numChunks-1].Blocks)
newBatch := Batch{
Index: batchIndex,
Hash: batchHeader.Hash().Hex(),
StartChunkHash: startChunkHash,
StartChunkIndex: startChunkIndex,
EndChunkHash: endChunkHash,
EndChunkIndex: endChunkIndex,
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawRoot.Hex(),
ParentBatchHash: parentBatchHash.Hex(),
BatchHeader: batchHeader.Encode(),
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
Index: batchIndex,
Hash: batchHeader.Hash().Hex(),
StartChunkHash: batchMeta.StartChunkHash,
StartChunkIndex: batchMeta.StartChunkIndex,
EndChunkHash: batchMeta.EndChunkHash,
EndChunkIndex: batchMeta.EndChunkIndex,
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawRoot.Hex(),
ParentBatchHash: parentBatchHash.Hex(),
BatchHeader: batchHeader.Encode(),
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
TotalL1CommitGas: batchMeta.TotalL1CommitGas,
TotalL1CommitCalldataSize: batchMeta.TotalL1CommitCalldataSize,
}
db := o.db

View File

@@ -87,24 +87,6 @@ func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endInde
return chunks, nil
}
// GetUnbatchedChunks retrieves unbatched chunks from the database.
func (o *Chunk) GetUnbatchedChunks(ctx context.Context, limit int) ([]*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("batch_hash IS NULL")
db = db.Order("index asc")
if limit > 0 {
db = db.Limit(limit)
}
var chunks []*Chunk
if err := db.Find(&chunks).Error; err != nil {
return nil, fmt.Errorf("Chunk.GetUnbatchedChunks error: %w", err)
}
return chunks, nil
}
// GetLatestChunk retrieves the latest chunk from the database.
func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) {
db := o.db.WithContext(ctx)
@@ -118,6 +100,40 @@ func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) {
return &latestChunk, nil
}
// GetUnchunkedBlockHeight retrieves the first unchunked block number.
func (o *Chunk) GetUnchunkedBlockHeight(ctx context.Context) (uint64, error) {
// Get the latest chunk
latestChunk, err := o.GetLatestChunk(ctx)
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
// if there is no chunk, return block number 1,
// because no need to chunk genesis block number
return 1, nil
}
return 0, fmt.Errorf("Chunk.GetChunkedBlockHeight error: %w", err)
}
return latestChunk.EndBlockNumber + 1, nil
}
// GetChunksGEIndex retrieves chunks that have a chunk index greater than the or equal to the given index.
// The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetChunksGEIndex(ctx context.Context, index uint64, limit int) ([]*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("index >= ?", index)
db = db.Order("index ASC")
if limit > 0 {
db = db.Limit(limit)
}
var chunks []*Chunk
if err := db.Find(&chunks).Error; err != nil {
return nil, fmt.Errorf("Chunk.GetChunksGEIndex error: %w", err)
}
return chunks, nil
}
// InsertChunk inserts a new chunk into the database.
func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 {
@@ -155,7 +171,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
var totalL1CommitCalldataSize uint64
for _, block := range chunk.Blocks {
totalL2TxGas += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalL2TxNum += block.NumL2Transactions()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
}

View File

@@ -64,13 +64,14 @@ func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (uint64, error) {
return maxNumber, nil
}
// GetUnchunkedBlocks get the l2 blocks that have not been put into a chunk.
// GetL2WrappedBlocksGEHeight retrieves L2 blocks that have a block number greater than or equal to the given height.
// The blocks are converted into WrappedBlock format for output.
// The returned blocks are sorted in ascending order by their block number.
func (o *L2Block) GetUnchunkedBlocks(ctx context.Context, limit int) ([]*types.WrappedBlock, error) {
func (o *L2Block) GetL2WrappedBlocksGEHeight(ctx context.Context, height uint64, limit int) ([]*types.WrappedBlock, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_root, row_consumption")
db = db.Where("chunk_hash IS NULL")
db = db.Where("number >= ?", height)
db = db.Order("number ASC")
if limit > 0 {
@@ -79,7 +80,7 @@ func (o *L2Block) GetUnchunkedBlocks(ctx context.Context, limit int) ([]*types.W
var l2Blocks []L2Block
if err := db.Find(&l2Blocks).Error; err != nil {
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
return nil, fmt.Errorf("L2Block.GetL2WrappedBlocksGEHeight error: %w", err)
}
var wrappedBlocks []*types.WrappedBlock
@@ -87,18 +88,18 @@ func (o *L2Block) GetUnchunkedBlocks(ctx context.Context, limit int) ([]*types.W
var wrappedBlock types.WrappedBlock
if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil {
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
return nil, fmt.Errorf("L2Block.GetL2WrappedBlocksGEHeight error: %w", err)
}
wrappedBlock.Header = &gethTypes.Header{}
if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil {
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
return nil, fmt.Errorf("L2Block.GetL2WrappedBlocksGEHeight error: %w", err)
}
wrappedBlock.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
if err := json.Unmarshal([]byte(v.RowConsumption), &wrappedBlock.RowConsumption); err != nil {
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
return nil, fmt.Errorf("L2Block.GetL2WrappedBlocksGEHeight error: %w", err)
}
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)

View File

@@ -101,25 +101,26 @@ func TestL2BlockOrm(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, uint64(3), height)
blocks, err := l2BlockOrm.GetUnchunkedBlocks(context.Background(), 0)
blocks, err := l2BlockOrm.GetL2Blocks(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, blocks, 2)
assert.Equal(t, wrappedBlock1, blocks[0])
assert.Equal(t, wrappedBlock2, blocks[1])
assert.Equal(t, "", blocks[0].ChunkHash)
assert.Equal(t, "", blocks[1].ChunkHash)
blocks, err = l2BlockOrm.GetL2BlocksInRange(context.Background(), 2, 3)
wrappedBlocks, err := l2BlockOrm.GetL2BlocksInRange(context.Background(), 2, 3)
assert.NoError(t, err)
assert.Len(t, blocks, 2)
assert.Equal(t, wrappedBlock1, blocks[0])
assert.Equal(t, wrappedBlock2, blocks[1])
assert.Equal(t, wrappedBlock1, wrappedBlocks[0])
assert.Equal(t, wrappedBlock2, wrappedBlocks[1])
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 2, 2, "test hash")
assert.NoError(t, err)
blocks, err = l2BlockOrm.GetUnchunkedBlocks(context.Background(), 0)
blocks, err = l2BlockOrm.GetL2Blocks(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, blocks, 1)
assert.Equal(t, wrappedBlock2, blocks[0])
assert.Len(t, blocks, 2)
assert.Equal(t, "test hash", blocks[0].ChunkHash)
assert.Equal(t, "", blocks[1].ChunkHash)
}
func TestChunkOrm(t *testing.T) {
@@ -135,11 +136,13 @@ func TestChunkOrm(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background(), 0)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
assert.Equal(t, "", chunks[0].BatchHash)
assert.Equal(t, "", chunks[1].BatchHash)
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash1.Hex(), types.ProvingTaskVerified)
assert.NoError(t, err)
@@ -156,9 +159,13 @@ func TestChunkOrm(t *testing.T) {
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, "test hash")
assert.NoError(t, err)
chunks, err = chunkOrm.GetUnbatchedChunks(context.Background(), 0)
chunks, err = chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 1)
assert.Len(t, chunks, 2)
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
assert.Equal(t, "test hash", chunks[0].BatchHash)
assert.Equal(t, "", chunks[1].BatchHash)
}
func TestBatchOrm(t *testing.T) {
@@ -166,7 +173,13 @@ func TestBatchOrm(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1})
batchMeta1 := &types.BatchMeta{
StartChunkIndex: 0,
StartChunkHash: chunkHash1.Hex(),
EndChunkIndex: 0,
EndChunkHash: chunkHash1.Hex(),
}
batch1, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk1}, batchMeta1)
assert.NoError(t, err)
hash1 := batch1.Hash
@@ -177,7 +190,13 @@ func TestBatchOrm(t *testing.T) {
batchHash1 := batchHeader1.Hash().Hex()
assert.Equal(t, hash1, batchHash1)
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2})
batchMeta2 := &types.BatchMeta{
StartChunkIndex: 1,
StartChunkHash: chunkHash2.Hex(),
EndChunkIndex: 1,
EndChunkHash: chunkHash2.Hex(),
}
batch2, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk2}, batchMeta2)
assert.NoError(t, err)
hash2 := batch2.Hash

View File

@@ -2,8 +2,11 @@ package tests
import (
"context"
"net/http"
"strings"
"testing"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
@@ -13,6 +16,7 @@ import (
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/utils"
"scroll-tech/database/migrate"
@@ -90,6 +94,23 @@ func setupEnv(t *testing.T) {
assert.NoError(t, err)
}
func mockChainMonitorServer(baseURL string) (*http.Server, error) {
router := gin.New()
r := router.Group("/v1")
r.GET("/batch_status", func(ctx *gin.Context) {
ctx.JSON(http.StatusOK, struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
Data bool `json:"data"`
}{
ErrCode: 0,
ErrMsg: "",
Data: true,
})
})
return utils.StartHTTPServer(strings.Split(baseURL, "//")[1], router)
}
func prepareContracts(t *testing.T) {
var err error
var tx *types.Transaction
@@ -128,6 +149,9 @@ func prepareContracts(t *testing.T) {
func TestFunction(t *testing.T) {
setupEnv(t)
srv, err := mockChainMonitorServer(bridgeApp.Config.L2Config.RelayerConfig.ChainMonitor.BaseURL)
assert.NoError(t, err)
defer srv.Close()
// process start test
t.Run("TestProcessStart", testProcessStart)

View File

@@ -90,8 +90,14 @@ func testImportL2GasPrice(t *testing.T) {
chunkHash, err := chunk.Hash(0)
assert.NoError(t, err)
batchMeta := &types.BatchMeta{
StartChunkIndex: 0,
StartChunkHash: chunkHash.Hex(),
EndChunkIndex: 0,
EndChunkHash: chunkHash.Hex(),
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash.Hex(), chunkHash.Hex(), []*types.Chunk{chunk})
_, err = batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk}, batchMeta)
assert.NoError(t, err)
// check db status

View File

@@ -58,7 +58,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.NoError(t, err)
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxL2TxNumPerChunk: 10000,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1048319,
@@ -66,8 +66,12 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
}, db, nil)
cp.TryProposeChunk()
batchOrm := orm.NewBatch(db)
unbatchedChunkIndex, err := batchOrm.GetFirstUnbatchedChunkIndex(context.Background())
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background(), 0)
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), unbatchedChunkIndex, 0)
assert.NoError(t, err)
assert.Len(t, chunks, 1)
@@ -81,7 +85,6 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
l2Relayer.ProcessPendingBatches()
batchOrm := orm.NewBatch(db)
batch, err := batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, batch)

View File

@@ -5,8 +5,9 @@ import (
"os"
"os/exec"
"strings"
"sync"
"sync/atomic"
"github.com/docker/docker/pkg/reexec"
cmap "github.com/orcaman/concurrent-map"
)
@@ -26,8 +27,9 @@ type Cmd struct {
name string
args []string
mu sync.Mutex
cmd *exec.Cmd
isRunning uint64
cmd *exec.Cmd
app *exec.Cmd
checkFuncs cmap.ConcurrentMap //map[string]checkFunc
@@ -38,13 +40,23 @@ type Cmd struct {
}
// NewCmd create Cmd instance.
func NewCmd(name string, args ...string) *Cmd {
return &Cmd{
func NewCmd(name string, params ...string) *Cmd {
cmd := &Cmd{
checkFuncs: cmap.New(),
name: name,
args: args,
args: params,
ErrChan: make(chan error, 10),
cmd: exec.Command(name, params...),
app: &exec.Cmd{
Path: reexec.Self(),
Args: append([]string{name}, params...),
},
}
cmd.cmd.Stdout = cmd
cmd.cmd.Stderr = cmd
cmd.app.Stdout = cmd
cmd.app.Stderr = cmd
return cmd
}
// RegistFunc register check func
@@ -58,15 +70,14 @@ func (c *Cmd) UnRegistFunc(key string) {
}
func (c *Cmd) runCmd() {
cmd := exec.Command(c.args[0], c.args[1:]...) //nolint:gosec
cmd.Stdout = c
cmd.Stderr = c
c.ErrChan <- cmd.Run()
fmt.Println("cmd:", append([]string{c.name}, c.args...))
if atomic.CompareAndSwapUint64(&c.isRunning, 0, 1) {
c.ErrChan <- c.cmd.Run()
}
}
// RunCmd parallel running when parallel is true.
func (c *Cmd) RunCmd(parallel bool) {
fmt.Println("cmd:", c.args)
if parallel {
go c.runCmd()
} else {

View File

@@ -3,41 +3,45 @@ package cmd
import (
"fmt"
"os"
"os/exec"
"strings"
"sync/atomic"
"syscall"
"testing"
"time"
"github.com/docker/docker/pkg/reexec"
"github.com/stretchr/testify/assert"
)
// IsRunning 1 started, 0 not started.
func (c *Cmd) IsRunning() bool {
return atomic.LoadUint64(&c.isRunning) == 1
}
func (c *Cmd) runApp() {
fmt.Println("cmd:", append([]string{c.name}, c.args...))
if atomic.CompareAndSwapUint64(&c.isRunning, 0, 1) {
c.ErrChan <- c.app.Run()
}
}
// RunApp exec's the current binary using name as argv[0] which will trigger the
// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go)
func (c *Cmd) RunApp(waitResult func() bool) {
fmt.Println("cmd: ", append([]string{c.name}, c.args...))
cmd := &exec.Cmd{
Path: reexec.Self(),
Args: append([]string{c.name}, c.args...),
Stderr: c,
Stdout: c,
}
if waitResult != nil {
go func() {
_ = cmd.Run()
c.runApp()
}()
waitResult()
} else {
_ = cmd.Run()
c.runApp()
}
c.mu.Lock()
c.cmd = cmd
c.mu.Unlock()
}
// WaitExit wait util process exit.
func (c *Cmd) WaitExit() {
if atomic.LoadUint64(&c.isRunning) == 0 {
return
}
// Wait all the check functions are finished, interrupt loop when appear error.
var err error
for err == nil && !c.checkFuncs.IsEmpty() {
@@ -52,20 +56,18 @@ func (c *Cmd) WaitExit() {
}
// Send interrupt signal.
c.mu.Lock()
_ = c.cmd.Process.Signal(os.Interrupt)
// should use `_ = c.cmd.Process.Wait()` here, but we have some bugs in coordinator's graceful exit,
_ = c.app.Process.Signal(os.Interrupt)
// should use `_ = c.app.Process.Wait()` here, but we have some bugs in coordinator's graceful exit,
// so we use `Kill` as a temp workaround. And since `WaitExit` is only used in integration tests, so
// it won't really affect our functionalities.
_ = c.cmd.Process.Kill()
c.mu.Unlock()
if err = c.app.Process.Signal(syscall.SIGTERM); err != nil {
_ = c.app.Process.Kill()
}
}
// Interrupt send interrupt signal.
func (c *Cmd) Interrupt() {
c.mu.Lock()
c.ErrChan <- c.cmd.Process.Signal(os.Interrupt)
c.mu.Unlock()
c.ErrChan <- c.app.Process.Signal(os.Interrupt)
}
// WaitResult return true when get the keyword during timeout.

View File

@@ -12,7 +12,7 @@ import (
)
func TestCmd(t *testing.T) {
app := cmd.NewCmd("curTime", "date", "+%Y-%m-%d")
app := cmd.NewCmd("date", "+%Y-%m-%d")
tm := time.Now()
curTime := fmt.Sprintf("%d-%02d-%02d", tm.Year(), tm.Month(), tm.Day())

View File

@@ -27,6 +27,7 @@ var (
// AppAPI app interface.
type AppAPI interface {
IsRunning() bool
WaitResult(t *testing.T, timeout time.Duration, keyword string) bool
RunApp(waitResult func() bool)
WaitExit()

View File

@@ -36,7 +36,7 @@ func NewImgDB(image, password, dbName string, port int) ImgInstance {
dbName: dbName,
port: port,
}
img.cmd = cmd.NewCmd(img.name, img.prepare()...)
img.cmd = cmd.NewCmd("docker", img.prepare()...)
return img
}
@@ -89,7 +89,7 @@ func (i *ImgDB) IsRunning() bool {
}
func (i *ImgDB) prepare() []string {
cmd := []string{"docker", "run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
cmd := []string{"run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
envs := []string{
"-e", "POSTGRES_PASSWORD=" + i.password,
"-e", fmt.Sprintf("POSTGRES_DB=%s", i.dbName),

View File

@@ -42,7 +42,7 @@ func NewImgGeth(image, volume, ipc string, hPort, wPort int) GethImgInstance {
httpPort: hPort,
wsPort: wPort,
}
img.cmd = cmd.NewCmd(img.name, img.prepare()...)
img.cmd = cmd.NewCmd("docker", img.params()...)
return img
}
@@ -149,8 +149,8 @@ func (i *ImgGeth) Stop() error {
return cli.ContainerRemove(ctx, i.id, types.ContainerRemoveOptions{})
}
func (i *ImgGeth) prepare() []string {
cmds := []string{"docker", "run", "--rm", "--name", i.name}
func (i *ImgGeth) params() []string {
cmds := []string{"run", "--rm", "--name", i.name}
var ports []string
if i.httpPort != 0 {
ports = append(ports, []string{"-p", strconv.Itoa(i.httpPort) + ":8545"}...)

View File

@@ -117,7 +117,7 @@ require (
golang.org/x/arch v0.4.0 // indirect
golang.org/x/crypto v0.12.0 // indirect
golang.org/x/mod v0.12.0 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/net v0.14.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/text v0.12.0 // indirect

View File

@@ -576,8 +576,8 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=

View File

@@ -1,4 +1,7 @@
use crate::utils::{c_char_to_str, c_char_to_vec, string_to_c_char, vec_to_c_char, OUTPUT_DIR};
use crate::{
types::{CheckChunkProofsResponse, ProofResult},
utils::{c_char_to_str, c_char_to_vec, string_to_c_char, vec_to_c_char, OUTPUT_DIR},
};
use libc::c_char;
use prover::{
aggregator::{Prover, Verifier},
@@ -54,13 +57,35 @@ pub unsafe extern "C" fn get_batch_vk() -> *const c_char {
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> c_char {
let chunk_proofs = c_char_to_vec(chunk_proofs);
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs).unwrap();
assert!(!chunk_proofs.is_empty());
pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> *const c_char {
let check_result: Result<bool, String> = panic::catch_unwind(|| {
let chunk_proofs = c_char_to_vec(chunk_proofs);
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
.map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?;
let valid = panic::catch_unwind(|| PROVER.get().unwrap().check_chunk_proofs(&chunk_proofs));
valid.unwrap_or(false) as c_char
if chunk_proofs.is_empty() {
return Err("provided chunk proofs are empty.".to_string());
}
let prover_ref = PROVER.get().expect("failed to get reference to PROVER.");
let valid = prover_ref.check_chunk_proofs(&chunk_proofs);
Ok(valid)
})
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
let r = match check_result {
Ok(valid) => CheckChunkProofsResponse {
ok: valid,
error: None,
},
Err(err) => CheckChunkProofsResponse {
ok: false,
error: Some(err),
},
};
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
}
/// # Safety
@@ -69,28 +94,47 @@ pub unsafe extern "C" fn gen_batch_proof(
chunk_hashes: *const c_char,
chunk_proofs: *const c_char,
) -> *const c_char {
let chunk_hashes = c_char_to_vec(chunk_hashes);
let chunk_proofs = c_char_to_vec(chunk_proofs);
let proof_result: Result<Vec<u8>, String> = panic::catch_unwind(|| {
let chunk_hashes = c_char_to_vec(chunk_hashes);
let chunk_proofs = c_char_to_vec(chunk_proofs);
let chunk_hashes = serde_json::from_slice::<Vec<ChunkHash>>(&chunk_hashes).unwrap();
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs).unwrap();
assert_eq!(chunk_hashes.len(), chunk_proofs.len());
let chunk_hashes = serde_json::from_slice::<Vec<ChunkHash>>(&chunk_hashes)
.map_err(|e| format!("failed to deserialize chunk hashes: {e:?}"))?;
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
.map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?;
let chunk_hashes_proofs = chunk_hashes
.into_iter()
.zip(chunk_proofs.into_iter())
.collect();
if chunk_hashes.len() != chunk_proofs.len() {
return Err(format!("chunk hashes and chunk proofs lengths mismatch: chunk_hashes.len() = {}, chunk_proofs.len() = {}",
chunk_hashes.len(), chunk_proofs.len()));
}
let chunk_hashes_proofs = chunk_hashes
.into_iter()
.zip(chunk_proofs.into_iter())
.collect();
let proof_result = panic::catch_unwind(|| {
let proof = PROVER
.get_mut()
.unwrap()
.expect("failed to get mutable reference to PROVER.")
.gen_agg_evm_proof(chunk_hashes_proofs, None, OUTPUT_DIR.as_deref())
.unwrap();
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
serde_json::to_vec(&proof).unwrap()
});
proof_result.map_or(null(), vec_to_c_char)
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))
})
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
let r = match proof_result {
Ok(proof_bytes) => ProofResult {
message: Some(proof_bytes),
error: None,
},
Err(err) => ProofResult {
message: None,
error: Some(err),
},
};
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
}
/// # Safety

View File

@@ -1,4 +1,7 @@
use crate::utils::{c_char_to_str, c_char_to_vec, string_to_c_char, vec_to_c_char, OUTPUT_DIR};
use crate::{
types::ProofResult,
utils::{c_char_to_str, c_char_to_vec, string_to_c_char, vec_to_c_char, OUTPUT_DIR},
};
use libc::c_char;
use prover::{
utils::init_env_and_log,
@@ -55,20 +58,33 @@ pub unsafe extern "C" fn get_chunk_vk() -> *const c_char {
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const c_char {
let block_traces = c_char_to_vec(block_traces);
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces).unwrap();
let proof_result: Result<Vec<u8>, String> = panic::catch_unwind(|| {
let block_traces = c_char_to_vec(block_traces);
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces)
.map_err(|e| format!("failed to deserialize block traces: {e:?}"))?;
let proof_result = panic::catch_unwind(|| {
let proof = PROVER
.get_mut()
.unwrap()
.expect("failed to get mutable reference to PROVER.")
.gen_chunk_proof(block_traces, None, OUTPUT_DIR.as_deref())
.unwrap();
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
serde_json::to_vec(&proof).unwrap()
});
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))
})
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
proof_result.map_or(null(), vec_to_c_char)
let r = match proof_result {
Ok(proof_bytes) => ProofResult {
message: Some(proof_bytes),
error: None,
},
Err(err) => ProofResult {
message: None,
error: Some(err),
},
};
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
}
/// # Safety

View File

@@ -2,4 +2,5 @@
mod batch;
mod chunk;
mod types;
mod utils;

View File

@@ -0,0 +1,22 @@
use serde::{Deserialize, Serialize};
// Represents the result of a chunk proof checking operation.
// `ok` indicates whether the proof checking was successful.
// `error` provides additional details in case the check failed.
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct CheckChunkProofsResponse {
pub ok: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
}
// Encapsulates the result from generating a proof.
// `message` holds the generated proof in byte slice format.
// `error` provides additional details in case the proof generation failed.
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ProofResult {
#[serde(skip_serializing_if = "Option::is_none")]
pub message: Option<Vec<u8>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
}

View File

@@ -1,7 +1,7 @@
void init_batch_prover(char* params_dir, char* assets_dir);
void init_batch_verifier(char* params_dir, char* assets_dir);
char* get_batch_vk();
char check_chunk_proofs(char* chunk_proofs);
char* check_chunk_proofs(char* chunk_proofs);
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
char verify_batch_proof(char* proof);

View File

@@ -10,6 +10,16 @@ import (
"github.com/scroll-tech/go-ethereum/crypto"
)
// BatchMeta contains metadata of a batch.
type BatchMeta struct {
StartChunkIndex uint64
StartChunkHash string
EndChunkIndex uint64
EndChunkHash string
TotalL1CommitGas uint64
TotalL1CommitCalldataSize uint32
}
// BatchHeader contains batch header info to be committed.
type BatchHeader struct {
// Encoded in BatchHeaderV0Codec

View File

@@ -98,9 +98,10 @@ func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
if txData.Type == types.L1MessageTxType {
continue
}
size += 64 // 60 bytes BlockContext + 4 bytes payload length
size += 4 // 4 bytes payload length
size += w.getTxPayloadLength(txData)
}
size += 60 // 60 bytes BlockContext
return size
}
@@ -116,10 +117,13 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
txPayloadLength := w.getTxPayloadLength(txData)
total += CalldataNonZeroByteGas * txPayloadLength // an over-estimate: treat each byte as non-zero
total += CalldataNonZeroByteGas * 64 // 60 bytes BlockContext + 4 bytes payload length
total += CalldataNonZeroByteGas * 4 // 4 bytes payload length
total += GetKeccak256Gas(txPayloadLength) // l2 tx hash
}
// 60 bytes BlockContext calldata
total += CalldataNonZeroByteGas * 60
// sload
total += 2100 * numL1Messages // numL1Messages times cold sload in L1MessageQueue
@@ -130,17 +134,6 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
return total
}
// L2TxsNum calculates the number of l2 txs.
func (w *WrappedBlock) L2TxsNum() uint64 {
var count uint64
for _, txData := range w.Transactions {
if txData.Type != types.L1MessageTxType {
count++
}
}
return count
}
func (w *WrappedBlock) getTxPayloadLength(txData *types.TransactionData) uint64 {
if w.txPayloadLengthCache == nil {
w.txPayloadLengthCache = make(map[string]uint64)

View File

@@ -38,15 +38,15 @@ func TestChunkEncode(t *testing.T) {
wrappedBlock := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
assert.Equal(t, uint64(0), wrappedBlock.NumL1Messages(0))
assert.Equal(t, uint64(358), wrappedBlock.EstimateL1CommitCalldataSize())
assert.Equal(t, uint64(2), wrappedBlock.L2TxsNum())
assert.Equal(t, uint64(298), wrappedBlock.EstimateL1CommitCalldataSize())
assert.Equal(t, uint64(2), wrappedBlock.NumL2Transactions())
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock,
},
}
assert.Equal(t, uint64(0), chunk.NumL1Messages(0))
assert.Equal(t, uint64(6966), chunk.EstimateL1CommitGas())
assert.Equal(t, uint64(6006), chunk.EstimateL1CommitGas())
bytes, err = chunk.Encode(0)
hexString := hex.EncodeToString(bytes)
assert.NoError(t, err)
@@ -61,7 +61,7 @@ func TestChunkEncode(t *testing.T) {
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
assert.Equal(t, uint64(11), wrappedBlock2.NumL1Messages(0)) // 0..=9 skipped, 10 included
assert.Equal(t, uint64(96), wrappedBlock2.EstimateL1CommitCalldataSize())
assert.Equal(t, uint64(1), wrappedBlock2.L2TxsNum())
assert.Equal(t, uint64(1), wrappedBlock2.NumL2Transactions())
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock2,

View File

@@ -211,6 +211,7 @@ func (a *ProofMsg) PublicKey() (string, error) {
// TaskMsg is a wrapper type around db ProveTask type.
type TaskMsg struct {
UUID string `json:"uuid"`
ID string `json:"id"`
Type ProofType `json:"type,omitempty"`
BatchTaskDetail *BatchTaskDetail `json:"batch_task_detail,omitempty"`

27
common/utils/http.go Normal file
View File

@@ -0,0 +1,27 @@
package utils
import (
"net/http"
"time"
)
// StartHTTPServer a public http server to be used.
func StartHTTPServer(address string, handler http.Handler) (*http.Server, error) {
srv := &http.Server{
Handler: handler,
Addr: address,
ReadTimeout: time.Second * 3,
WriteTimeout: time.Second * 3,
IdleTimeout: time.Second * 12,
}
errCh := make(chan error, 1)
go func() {
errCh <- srv.ListenAndServe()
}()
select {
case err := <-errCh:
return nil, err
case <-time.After(time.Second):
}
return srv, nil
}

View File

@@ -2,6 +2,9 @@ package utils
import (
"context"
"crypto/rand"
"fmt"
"math/big"
"time"
"github.com/modern-go/reflect2"
@@ -50,3 +53,9 @@ func Loop(ctx context.Context, period time.Duration, f func()) {
func IsNil(i interface{}) bool {
return i == nil || reflect2.IsNil(i)
}
// RandomURL return a random port endpoint.
func RandomURL() string {
id, _ := rand.Int(rand.Reader, big.NewInt(5000-1))
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
}

View File

@@ -0,0 +1,55 @@
package version
import (
"strconv"
"strings"
)
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
func CheckScrollProverVersion(proverVersion string) bool {
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// so split-by-'-' length should be 4
remote := strings.Split(proverVersion, "-")
if len(remote) != 4 {
return false
}
local := strings.Split(Version, "-")
if len(local) != 4 {
return false
}
// compare the `scroll_prover` version
return remote[2] == local[2]
}
// CheckScrollProverVersionTag check the "scroll-prover" version's tag, if it's too old, return false
func CheckScrollProverVersionTag(proverVersion string) bool {
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// so split-by-'-' length should be 4
remote := strings.Split(proverVersion, "-")
if len(remote) != 4 {
return false
}
remoteTagNums := strings.Split(strings.TrimPrefix(remote[0], "v"), ".")
if len(remoteTagNums) != 3 {
return false
}
remoteTagMajor, err := strconv.Atoi(remoteTagNums[0])
if err != nil {
return false
}
remoteTagMinor, err := strconv.Atoi(remoteTagNums[1])
if err != nil {
return false
}
remoteTagPatch, err := strconv.Atoi(remoteTagNums[2])
if err != nil {
return false
}
if remoteTagMajor < 4 {
return false
}
if remoteTagMinor == 1 && remoteTagPatch < 98 {
return false
}
return true
}

View File

@@ -3,11 +3,9 @@ package version
import (
"fmt"
"runtime/debug"
"strconv"
"strings"
)
var tag = "v4.1.115"
var tag = "v4.2.14"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -31,55 +29,3 @@ var ZkVersion = "000000-000000"
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, prover, contracts and etc.
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZkVersion)
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
func CheckScrollProverVersion(proverVersion string) bool {
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// so split-by-'-' length should be 4
remote := strings.Split(proverVersion, "-")
if len(remote) != 4 {
return false
}
local := strings.Split(Version, "-")
if len(local) != 4 {
return false
}
// compare the `scroll_prover` version
return remote[2] == local[2]
}
// CheckScrollProverVersionTag check the "scroll-prover" version's tag, if it's too old, return false
func CheckScrollProverVersionTag(proverVersion string) bool {
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// so split-by-'-' length should be 4
remote := strings.Split(proverVersion, "-")
if len(remote) != 4 {
return false
}
remoteTagNums := strings.Split(strings.TrimPrefix(remote[0], "v"), ".")
if len(remoteTagNums) != 3 {
return false
}
remoteTagMajor, err := strconv.Atoi(remoteTagNums[0])
if err != nil {
return false
}
remoteTagMinor, err := strconv.Atoi(remoteTagNums[1])
if err != nil {
return false
}
remoteTagPatch, err := strconv.Atoi(remoteTagNums[2])
if err != nil {
return false
}
if remoteTagMajor != 4 {
return false
}
if remoteTagMinor != 1 {
return false
}
if remoteTagPatch < 98 {
return false
}
return true
}

View File

@@ -104,45 +104,6 @@ Mapping from L2 message hash to sent status.
|---|---|---|
| _0 | bool | undefined |
### l1MessageFailedTimes
```solidity
function l1MessageFailedTimes(bytes32) external view returns (uint256)
```
Mapping from L1 message hash to the number of failure times.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
### maxFailedExecutionTimes
```solidity
function maxFailedExecutionTimes() external view returns (uint256)
```
The maximum number of times each L1 message can fail on L2.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
### messageQueue
```solidity
@@ -329,22 +290,6 @@ Update fee vault contract.
|---|---|---|
| _newFeeVault | address | The address of new fee vault contract. |
### updateMaxFailedExecutionTimes
```solidity
function updateMaxFailedExecutionTimes(uint256 _newMaxFailedExecutionTimes) external nonpayable
```
Update max failed execution times.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newMaxFailedExecutionTimes | uint256 | The new max failed execution times. |
### updateRateLimiter
```solidity

View File

@@ -84,6 +84,9 @@ const config: HardhatUserConfig = {
etherscan: {
apiKey: process.env.ETHERSCAN_API_KEY,
},
mocha: {
timeout: 10000000,
},
dodoc: {
runOnCompile: true,
keepFileStructure: false,

View File

@@ -1,5 +1,6 @@
/* eslint-disable node/no-unpublished-import */
/* eslint-disable node/no-missing-import */
import { concat } from "ethers/lib/utils";
import { constants } from "ethers";
import { ethers } from "hardhat";
import { ScrollChain, L1MessageQueue } from "../typechain";
@@ -11,22 +12,27 @@ describe("ScrollChain", async () => {
beforeEach(async () => {
const [deployer] = await ethers.getSigners();
const ProxyAdmin = await ethers.getContractFactory("ProxyAdmin", deployer);
const admin = await ProxyAdmin.deploy();
await admin.deployed();
const TransparentUpgradeableProxy = await ethers.getContractFactory("TransparentUpgradeableProxy", deployer);
const L1MessageQueue = await ethers.getContractFactory("L1MessageQueue", deployer);
queue = await L1MessageQueue.deploy();
await queue.deployed();
const queueImpl = await L1MessageQueue.deploy();
await queueImpl.deployed();
const queueProxy = await TransparentUpgradeableProxy.deploy(queueImpl.address, admin.address, "0x");
await queueProxy.deployed();
queue = await ethers.getContractAt("L1MessageQueue", queueProxy.address, deployer);
const RollupVerifier = await ethers.getContractFactory("RollupVerifier", deployer);
const verifier = await RollupVerifier.deploy();
await verifier.deployed();
const ScrollChain = await ethers.getContractFactory("ScrollChain", deployer);
const chainImpl = await ScrollChain.deploy(0);
await chainImpl.deployed();
const chainProxy = await TransparentUpgradeableProxy.deploy(chainImpl.address, admin.address, "0x");
await chainProxy.deployed();
chain = await ethers.getContractAt("ScrollChain", chainProxy.address, deployer);
const ScrollChain = await ethers.getContractFactory("ScrollChain", {
signer: deployer,
libraries: { RollupVerifier: verifier.address },
});
chain = await ScrollChain.deploy(0);
await chain.deployed();
await chain.initialize(queue.address, constants.AddressZero, 44);
await chain.initialize(queue.address, constants.AddressZero, 100);
await chain.addSequencer(deployer.address);
await queue.initialize(
constants.AddressZero,
@@ -38,79 +44,54 @@ describe("ScrollChain", async () => {
});
// @note skip this benchmark tests
/*
it("should succeed", async () => {
await chain.importGenesisBatch({
blocks: [
{
blockHash: "0x92826bd3aad2ef70d8061dc4e25150b305d1233d9cd7579433a77d6eb01dae1c",
parentHash: constants.HashZero,
blockNumber: 0,
timestamp: 1639724192,
baseFee: 1000000000,
gasLimit: 940000000,
numTransactions: 0,
numL1Messages: 0,
},
],
prevStateRoot: constants.HashZero,
newStateRoot: "0x1b186a7a90ec3b41a2417062fe44dce8ce82ae76bfbb09eae786a4f1be1895f5",
withdrawTrieRoot: constants.HashZero,
batchIndex: 0,
parentBatchHash: constants.HashZero,
l2Transactions: [],
});
const parentBatchHash = await chain.lastFinalizedBatchHash();
it.skip("should succeed", async () => {
const batchHeader0 = new Uint8Array(89);
batchHeader0[25] = 1;
await chain.importGenesisBatch(batchHeader0, "0x0000000000000000000000000000000000000000000000000000000000000001");
const parentBatchHash = await chain.committedBatches(0);
console.log("genesis batch hash:", parentBatchHash);
console.log(`ChunkPerBatch`, `BlockPerChunk`, `TxPerBlock`, `BytesPerTx`, `TotalBytes`, `EstimateGas`);
for (let numChunks = 3; numChunks <= 6; ++numChunks) {
for (let numBlocks = 1; numBlocks <= 5; ++numBlocks) {
for (let numTx = 20; numTx <= Math.min(30, 100 / numBlocks); ++numTx) {
for (let txLength = 800; txLength <= 1000; txLength += 100) {
const txs: Array<Uint8Array> = [];
for (let i = 0; i < numTx; i++) {
const tx = new Uint8Array(4 + txLength);
let offset = 3;
for (let x = txLength; x > 0; x = Math.floor(x / 256)) {
tx[offset] = x % 256;
offset -= 1;
}
tx.fill(1, 4);
txs.push(tx);
}
const chunk = new Uint8Array(1 + 60 * numBlocks);
chunk[0] = numBlocks;
for (let i = 0; i < numBlocks; i++) {
chunk[1 + i * 60 + 57] = numTx;
}
const chunks: Array<Uint8Array> = [];
for (let i = 0; i < numChunks; i++) {
const txsInChunk: Array<Uint8Array> = [];
for (let j = 0; j < numBlocks; j++) {
txsInChunk.push(concat(txs));
}
chunks.push(concat([chunk, concat(txsInChunk)]));
}
for (let numTx = 1; numTx <= 25; ++numTx) {
for (let txLength = 100; txLength <= 1000; txLength += 100) {
const txs: Array<Uint8Array> = [];
for (let i = 0; i < numTx; i++) {
const tx = new Uint8Array(4 + txLength);
let offset = 3;
for (let x = txLength; x > 0; x = Math.floor(x / 256)) {
tx[offset] = x % 256;
offset -= 1;
const estimateGas = await chain.estimateGas.commitBatch(0, batchHeader0, chunks, "0x");
console.log(
`${numChunks}`,
`${numBlocks}`,
`${numTx}`,
`${txLength}`,
`${numChunks * numBlocks * numTx * (txLength + 1)}`,
`${estimateGas.toString()}`
);
}
tx.fill(1, 4);
txs.push(tx);
}
const batch = {
blocks: [
{
blockHash: "0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6",
parentHash: "0x92826bd3aad2ef70d8061dc4e25150b305d1233d9cd7579433a77d6eb01dae1c",
blockNumber: 1,
timestamp: numTx * 100000 + txLength,
baseFee: 0,
gasLimit: 0,
numTransactions: 0,
numL1Messages: 0,
},
],
prevStateRoot: "0x1b186a7a90ec3b41a2417062fe44dce8ce82ae76bfbb09eae786a4f1be1895f5",
newStateRoot: "0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6",
withdrawTrieRoot: "0xb5baa665b2664c3bfed7eb46e00ebc110ecf2ebd257854a9bf2b9dbc9b2c08f6",
batchIndex: 1,
parentBatchHash: parentBatchHash,
l2Transactions: concat(txs),
};
const estimateGas = await chain.estimateGas.commitBatch(batch);
const tx = await chain.commitBatch(batch, { gasLimit: estimateGas.mul(12).div(10) });
const receipt = await tx.wait();
console.log(
"Commit batch with l2TransactionsBytes:",
numTx * (txLength + 4),
"gasLimit:",
tx.gasLimit.toString(),
"estimateGas:",
estimateGas.toString(),
"gasUsed:",
receipt.gasUsed.toString()
);
}
}
});
*/
});

View File

@@ -25,18 +25,18 @@ async function main() {
const L2StandardERC20Impl = process.env.L2_SCROLL_STANDARD_ERC20_ADDR!;
const L2StandardERC20FactoryAddress = process.env.L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR!;
// if ((await L1StandardERC20Gateway.counterpart()) === constants.AddressZero) {
const tx = await L1StandardERC20Gateway.initialize(
L2StandardERC20GatewayAddress,
L1GatewayRouterAddress,
L1ScrollMessengerAddress,
L2StandardERC20Impl,
L2StandardERC20FactoryAddress
);
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
// }
if ((await L1StandardERC20Gateway.counterpart()) === constants.AddressZero) {
const tx = await L1StandardERC20Gateway.initialize(
L2StandardERC20GatewayAddress,
L1GatewayRouterAddress,
L1ScrollMessengerAddress,
L2StandardERC20Impl,
L2StandardERC20FactoryAddress
);
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
}
}
// We recommend this pattern to be able to use async/await everywhere

View File

@@ -23,16 +23,16 @@ async function main() {
const L1ScrollMessengerAddress = addressFile.get("L1ScrollMessenger.proxy");
const L2GatewayRouterAddress = process.env.L2_GATEWAY_ROUTER_PROXY_ADDR!;
// if ((await L1GatewayRouter.counterpart()) === constants.AddressZero) {
const tx = await L1GatewayRouter.initialize(
L1StandardERC20GatewayAddress,
L2GatewayRouterAddress,
L1ScrollMessengerAddress
);
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
// }
if ((await L1GatewayRouter.counterpart()) === constants.AddressZero) {
const tx = await L1GatewayRouter.initialize(
L1StandardERC20GatewayAddress,
L2GatewayRouterAddress,
L1ScrollMessengerAddress
);
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
}
}
// We recommend this pattern to be able to use async/await everywhere

View File

@@ -21,12 +21,12 @@ async function main() {
const ZKRollupAddress = addressFile.get("ZKRollup.proxy");
// if ((await L1ScrollMessenger.rollup()) === constants.AddressZero) {
const tx = await L1ScrollMessenger.initialize(ZKRollupAddress);
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
// }
if ((await L1ScrollMessenger.rollup()) === constants.AddressZero) {
const tx = await L1ScrollMessenger.initialize(ZKRollupAddress);
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
}
}
// We recommend this pattern to be able to use async/await everywhere

View File

@@ -24,17 +24,17 @@ async function main() {
const L2StandardERC20FactoryAddress = addressFile.get("ScrollStandardERC20Factory");
const L1StandardERC20GatewayAddress = process.env.L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR!;
// if ((await L2StandardERC20Gateway.counterpart()) === constants.AddressZero) {
const tx = await L2StandardERC20Gateway.initialize(
L1StandardERC20GatewayAddress,
L2GatewayRouterAddress,
L2ScrollMessengerAddress,
L2StandardERC20FactoryAddress
);
console.log("initialize L2StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
// }
if ((await L2StandardERC20Gateway.counterpart()) === constants.AddressZero) {
const tx = await L2StandardERC20Gateway.initialize(
L1StandardERC20GatewayAddress,
L2GatewayRouterAddress,
L2ScrollMessengerAddress,
L2StandardERC20FactoryAddress
);
console.log("initialize L2StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
}
}
// We recommend this pattern to be able to use async/await everywhere

View File

@@ -23,16 +23,16 @@ async function main() {
const L2ScrollMessengerAddress = addressFile.get("L2ScrollMessenger");
const L1GatewayRouterAddress = process.env.L1_GATEWAY_ROUTER_PROXY_ADDR!;
// if ((await L2GatewayRouter.counterpart()) === constants.AddressZero) {
const tx = await L2GatewayRouter.initialize(
L2StandardERC20GatewayAddress,
L1GatewayRouterAddress,
L2ScrollMessengerAddress
);
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
// }
if ((await L2GatewayRouter.counterpart()) === constants.AddressZero) {
const tx = await L2GatewayRouter.initialize(
L2StandardERC20GatewayAddress,
L1GatewayRouterAddress,
L2ScrollMessengerAddress
);
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
}
}
// We recommend this pattern to be able to use async/await everywhere

View File

@@ -66,7 +66,7 @@ abstract contract L1ERC20Gateway is IL1ERC20Gateway, IMessageDropCallback, Scrol
address _to,
uint256 _amount,
bytes calldata _data
) external payable override onlyCallByCounterpart nonReentrant {
) external payable virtual override onlyCallByCounterpart nonReentrant {
_beforeFinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
// @note can possible trigger reentrant call to this contract or messenger,

View File

@@ -2,10 +2,176 @@
pragma solidity =0.8.16;
import {L1CustomERC20Gateway} from "../L1CustomERC20Gateway.sol";
import {OwnableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol";
import {IERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/IERC20Upgradeable.sol";
// solhint-disable no-empty-blocks
import {IFiatToken} from "../../../interfaces/IFiatToken.sol";
import {IUSDCBurnableSourceBridge} from "../../../interfaces/IUSDCBurnableSourceBridge.sol";
import {IL2ERC20Gateway} from "../../../L2/gateways/IL2ERC20Gateway.sol";
import {IL1ScrollMessenger} from "../../IL1ScrollMessenger.sol";
import {IL1ERC20Gateway} from "../IL1ERC20Gateway.sol";
contract L1USDCGateway is L1CustomERC20Gateway {
import {ScrollGatewayBase} from "../../../libraries/gateway/ScrollGatewayBase.sol";
import {L1ERC20Gateway} from "../L1ERC20Gateway.sol";
/// @title L1USDCGateway
/// @notice The `L1USDCGateway` contract is used to deposit `USDC` token in layer 1 and
/// finalize withdraw `USDC` from layer 2, before USDC become native in layer 2.
contract L1USDCGateway is L1ERC20Gateway, IUSDCBurnableSourceBridge {
/*************
* Constants *
*************/
/// @notice The address of L1 USDC address.
// solhint-disable-next-line var-name-mixedcase
address public immutable l1USDC;
/// @notice The address of L2 USDC address.
address public immutable l2USDC;
/*************
* Variables *
*************/
/// @notice The address of caller from Circle.
address public circleCaller;
/// @notice The flag indicates whether USDC deposit is paused.
bool public depositPaused;
/// @notice The flag indicates whether USDC withdrawal is paused.
/// @dev This is not necessary to be set `true` since we will set `L2USDCGateway.withdrawPaused` first.
/// This is kept just in case and will be set after all pending messages are relayed.
bool public withdrawPaused;
/// @notice The total amount of bridged USDC in this contract.
/// @dev Only deposited USDC will count. Accidentally transferred USDC will be ignored.
uint256 public totalBridgedUSDC;
/***************
* Constructor *
***************/
constructor(address _l1USDC, address _l2USDC) {
_disableInitializers();
l1USDC = _l1USDC;
l2USDC = _l2USDC;
}
/// @notice Initialize the storage of L1WETHGateway.
/// @param _counterpart The address of L2ETHGateway in L2.
/// @param _router The address of L1GatewayRouter.
/// @param _messenger The address of L1ScrollMessenger.
function initialize(
address _counterpart,
address _router,
address _messenger
) external initializer {
require(_router != address(0), "zero router address");
ScrollGatewayBase._initialize(_counterpart, _router, _messenger);
}
/*************************
* Public View Functions *
*************************/
/// @inheritdoc IL1ERC20Gateway
function getL2ERC20Address(address) public view override returns (address) {
return l2USDC;
}
/*******************************
* Public Restricted Functions *
*******************************/
/// @inheritdoc IUSDCBurnableSourceBridge
function burnAllLockedUSDC() external override {
require(msg.sender == circleCaller, "only circle caller");
// @note Only bridged USDC will be burned. We may refund the rest if possible.
uint256 _balance = totalBridgedUSDC;
totalBridgedUSDC = 0;
IFiatToken(l1USDC).burn(_balance);
}
/// @notice Update the Circle EOA address.
/// @param _caller The address to update.
function updateCircleCaller(address _caller) external onlyOwner {
circleCaller = _caller;
}
/// @notice Change the deposit pause status of this contract.
/// @param _paused The new status, `true` means paused and `false` means not paused.
function pauseDeposit(bool _paused) external onlyOwner {
depositPaused = _paused;
}
/// @notice Change the withdraw pause status of this contract.
/// @param _paused The new status, `true` means paused and `false` means not paused.
function pauseWithdraw(bool _paused) external onlyOwner {
withdrawPaused = _paused;
}
/**********************
* Internal Functions *
**********************/
/// @inheritdoc L1ERC20Gateway
function _beforeFinalizeWithdrawERC20(
address _l1Token,
address _l2Token,
address,
address,
uint256 _amount,
bytes calldata
) internal virtual override {
require(msg.value == 0, "nonzero msg.value");
require(_l1Token == l1USDC, "l1 token not USDC");
require(_l2Token == l2USDC, "l2 token not USDC");
require(!withdrawPaused, "withdraw paused");
totalBridgedUSDC -= _amount;
}
/// @inheritdoc L1ERC20Gateway
function _beforeDropMessage(
address,
address,
uint256 _amount
) internal virtual override {
require(msg.value == 0, "nonzero msg.value");
totalBridgedUSDC -= _amount;
}
/// @inheritdoc L1ERC20Gateway
function _deposit(
address _token,
address _to,
uint256 _amount,
bytes memory _data,
uint256 _gasLimit
) internal virtual override nonReentrant {
require(_amount > 0, "deposit zero amount");
require(_token == l1USDC, "only USDC is allowed");
require(!depositPaused, "deposit paused");
// 1. Transfer token into this contract.
address _from;
(_from, _amount, _data) = _transferERC20In(_token, _amount, _data);
require(_data.length == 0, "call is not allowed");
totalBridgedUSDC += _amount;
// 2. Generate message passed to L2USDCGateway.
bytes memory _message = abi.encodeCall(
IL2ERC20Gateway.finalizeDepositERC20,
(_token, l2USDC, _from, _to, _amount, _data)
);
// 3. Send message to L1ScrollMessenger.
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit, _from);
emit DepositERC20(_token, l2USDC, _from, _to, _amount, _data);
}
}

View File

@@ -0,0 +1,180 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.16;
import {OwnableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol";
import {ITokenMessenger} from "../../../interfaces/ITokenMessenger.sol";
import {IL2ERC20Gateway} from "../../../L2/gateways/IL2ERC20Gateway.sol";
import {IL1ScrollMessenger} from "../../IL1ScrollMessenger.sol";
import {IL1ERC20Gateway} from "../IL1ERC20Gateway.sol";
import {CCTPGatewayBase} from "../../../libraries/gateway/CCTPGatewayBase.sol";
import {ScrollGatewayBase} from "../../../libraries/gateway/ScrollGatewayBase.sol";
import {L1ERC20Gateway} from "../L1ERC20Gateway.sol";
/// @title L1USDCGatewayCCTP
/// @notice The `L1USDCGateway` contract is used to deposit `USDC` token in layer 1 and
/// finalize withdraw `USDC` from layer 2, after USDC become native in layer 2.
contract L1USDCGatewayCCTP is CCTPGatewayBase, L1ERC20Gateway {
/***************
* Constructor *
***************/
constructor(
address _l1USDC,
address _l2USDC,
uint32 _destinationDomain
) CCTPGatewayBase(_l1USDC, _l2USDC, _destinationDomain) {
_disableInitializers();
}
/// @notice Initialize the storage of L1USDCGatewayCCTP.
/// @param _counterpart The address of L2USDCGatewayCCTP in L2.
/// @param _router The address of L1GatewayRouter.
/// @param _messenger The address of L1ScrollMessenger.
/// @param _cctpMessenger The address of TokenMessenger in local domain.
/// @param _cctpTransmitter The address of MessageTransmitter in local domain.
function initialize(
address _counterpart,
address _router,
address _messenger,
address _cctpMessenger,
address _cctpTransmitter
) external initializer {
require(_router != address(0), "zero router address");
ScrollGatewayBase._initialize(_counterpart, _router, _messenger);
CCTPGatewayBase._initialize(_cctpMessenger, _cctpTransmitter);
}
/*************************
* Public View Functions *
*************************/
/// @inheritdoc IL1ERC20Gateway
function getL2ERC20Address(address) public view override returns (address) {
return l2USDC;
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Relay cross chain message and claim USDC that has been cross chained.
/// @dev The `_scrollMessage` is actually encoded calldata for `L1ScrollMessenger.relayMessageWithProof`.
///
/// @dev This helper function is aimed to claim USDC in single transaction.
/// Normally, an user should call `L1ScrollMessenger.relayMessageWithProof` first,
/// then `L1USDCGatewayCCTP.claimUSDC`.
///
/// @param _nonce The nonce of the message from CCTP.
/// @param _cctpMessage The message passed to MessageTransmitter contract in CCTP.
/// @param _cctpSignature The message passed to MessageTransmitter contract in CCTP.
/// @param _scrollMessage The message passed to L1ScrollMessenger contract.
function relayAndClaimUSDC(
uint256 _nonce,
bytes calldata _cctpMessage,
bytes calldata _cctpSignature,
bytes calldata _scrollMessage
) external {
require(status[_nonce] == CCTPMessageStatus.None, "message relayed");
// call messenger to set `status[_nonce]` to `CCTPMessageStatus.Pending`.
(bool _success, ) = messenger.call(_scrollMessage);
require(_success, "call messenger failed");
claimUSDC(_nonce, _cctpMessage, _cctpSignature);
}
/// @inheritdoc IL1ERC20Gateway
/// @dev The function will not mint the USDC, users need to call `claimUSDC` after this function is done.
function finalizeWithdrawERC20(
address _l1Token,
address _l2Token,
address _from,
address _to,
uint256 _amount,
bytes memory _data
) external payable override onlyCallByCounterpart {
require(msg.value == 0, "nonzero msg.value");
require(_l1Token == l1USDC, "l1 token not USDC");
require(_l2Token == l2USDC, "l2 token not USDC");
uint256 _nonce;
(_nonce, _data) = abi.decode(_data, (uint256, bytes));
require(status[_nonce] == CCTPMessageStatus.None, "message relayed");
status[_nonce] = CCTPMessageStatus.Pending;
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}
/*******************************
* Public Restricted Functions *
*******************************/
/// @notice Update the CCTP contract addresses.
/// @param _messenger The address of TokenMessenger in local domain.
/// @param _transmitter The address of MessageTransmitter in local domain.
function updateCCTPContracts(address _messenger, address _transmitter) external onlyOwner {
cctpMessenger = _messenger;
cctpTransmitter = _transmitter;
}
/**********************
* Internal Functions *
**********************/
/// @inheritdoc L1ERC20Gateway
function _beforeFinalizeWithdrawERC20(
address,
address,
address,
address,
uint256,
bytes calldata
) internal virtual override {}
/// @inheritdoc L1ERC20Gateway
function _beforeDropMessage(
address,
address,
uint256
) internal virtual override {
require(msg.value == 0, "nonzero msg.value");
}
/// @inheritdoc L1ERC20Gateway
function _deposit(
address _token,
address _to,
uint256 _amount,
bytes memory _data,
uint256 _gasLimit
) internal virtual override nonReentrant {
require(_amount > 0, "deposit zero amount");
require(_token == l1USDC, "only USDC is allowed");
// 1. Extract real sender if this call is from L1GatewayRouter.
address _from;
(_from, _amount, _data) = _transferERC20In(_token, _amount, _data);
// 2. Burn token through CCTP TokenMessenger
uint256 _nonce = ITokenMessenger(cctpMessenger).depositForBurnWithCaller(
_amount,
destinationDomain,
bytes32(uint256(uint160(_to))),
address(this),
bytes32(uint256(uint160(counterpart)))
);
// 3. Generate message passed to L2USDCGatewayCCTP.
bytes memory _message = abi.encodeCall(
IL2ERC20Gateway.finalizeDepositERC20,
(_token, l2USDC, _from, _to, _amount, abi.encode(_nonce, _data))
);
// 4. Send message to L1ScrollMessenger.
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit);
emit DepositERC20(_token, l2USDC, _from, _to, _amount, _data);
}
}

View File

@@ -36,10 +36,10 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
/// @param newVerifier The address of new rollup verifier.
event UpdateVerifier(address indexed oldVerifier, address indexed newVerifier);
/// @notice Emitted when the value of `maxNumL2TxInChunk` is updated.
/// @param oldMaxNumL2TxInChunk The old value of `maxNumL2TxInChunk`.
/// @param newMaxNumL2TxInChunk The new value of `maxNumL2TxInChunk`.
event UpdateMaxNumL2TxInChunk(uint256 oldMaxNumL2TxInChunk, uint256 newMaxNumL2TxInChunk);
/// @notice Emitted when the value of `maxNumTxInChunk` is updated.
/// @param oldMaxNumTxInChunk The old value of `maxNumTxInChunk`.
/// @param newMaxNumTxInChunk The new value of `maxNumTxInChunk`.
event UpdateMaxNumTxInChunk(uint256 oldMaxNumTxInChunk, uint256 newMaxNumTxInChunk);
/*************
* Constants *
@@ -53,7 +53,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
*************/
/// @notice The maximum number of transactions allowed in each chunk.
uint256 public maxNumL2TxInChunk;
uint256 public maxNumTxInChunk;
/// @notice The address of L1MessageQueue.
address public messageQueue;
@@ -107,16 +107,16 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
function initialize(
address _messageQueue,
address _verifier,
uint256 _maxNumL2TxInChunk
uint256 _maxNumTxInChunk
) public initializer {
OwnableUpgradeable.__Ownable_init();
messageQueue = _messageQueue;
verifier = _verifier;
maxNumL2TxInChunk = _maxNumL2TxInChunk;
maxNumTxInChunk = _maxNumTxInChunk;
emit UpdateVerifier(address(0), _verifier);
emit UpdateMaxNumL2TxInChunk(0, _maxNumL2TxInChunk);
emit UpdateMaxNumTxInChunk(0, _maxNumTxInChunk);
}
/*************************
@@ -398,13 +398,13 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
emit UpdateVerifier(_oldVerifier, _newVerifier);
}
/// @notice Update the value of `maxNumL2TxInChunk`.
/// @param _maxNumL2TxInChunk The new value of `maxNumL2TxInChunk`.
function updateMaxNumL2TxInChunk(uint256 _maxNumL2TxInChunk) external onlyOwner {
uint256 _oldMaxNumL2TxInChunk = maxNumL2TxInChunk;
maxNumL2TxInChunk = _maxNumL2TxInChunk;
/// @notice Update the value of `maxNumTxInChunk`.
/// @param _maxNumTxInChunk The new value of `maxNumTxInChunk`.
function updateMaxNumTxInChunk(uint256 _maxNumTxInChunk) external onlyOwner {
uint256 _oldMaxNumTxInChunk = maxNumTxInChunk;
maxNumTxInChunk = _maxNumTxInChunk;
emit UpdateMaxNumL2TxInChunk(_oldMaxNumL2TxInChunk, _maxNumL2TxInChunk);
emit UpdateMaxNumTxInChunk(_oldMaxNumTxInChunk, _maxNumTxInChunk);
}
/// @notice Pause the contract
@@ -462,19 +462,26 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
uint256 _numBlocks = ChunkCodec.validateChunkLength(chunkPtr, _chunk.length);
// concatenate block contexts
uint256 _totalTransactionsInChunk;
for (uint256 i = 0; i < _numBlocks; i++) {
dataPtr = ChunkCodec.copyBlockContext(chunkPtr, dataPtr, i);
uint256 _numTransactionsInBlock = ChunkCodec.numTransactions(blockPtr);
unchecked {
_totalTransactionsInChunk += _numTransactionsInBlock;
blockPtr += ChunkCodec.BLOCK_CONTEXT_LENGTH;
// concatenate block contexts, use scope to avoid stack too deep
{
uint256 _totalTransactionsInChunk;
for (uint256 i = 0; i < _numBlocks; i++) {
dataPtr = ChunkCodec.copyBlockContext(chunkPtr, dataPtr, i);
uint256 _numTransactionsInBlock = ChunkCodec.numTransactions(blockPtr);
unchecked {
_totalTransactionsInChunk += _numTransactionsInBlock;
blockPtr += ChunkCodec.BLOCK_CONTEXT_LENGTH;
}
}
assembly {
mstore(0x40, add(dataPtr, mul(_totalTransactionsInChunk, 0x20))) // reserve memory for tx hashes
}
}
// It is used to compute the actual number of transactions in chunk.
uint256 txHashStartDataPtr;
assembly {
mstore(0x40, add(dataPtr, mul(_totalTransactionsInChunk, 0x20))) // reserve memory for tx hashes
txHashStartDataPtr := dataPtr
blockPtr := add(chunkPtr, 1) // reset block ptr
}
@@ -513,11 +520,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
}
}
// check the number of L2 transactions in the chunk
require(
_totalTransactionsInChunk - _totalNumL1MessagesInChunk <= maxNumL2TxInChunk,
"too many L2 txs in one chunk"
);
// check the actual number of transactions in the chunk
require((dataPtr - txHashStartDataPtr) / 32 <= maxNumTxInChunk, "too many txs in one chunk");
// check chunk has correct length
require(l2TxPtr - chunkPtr == _chunk.length, "incomplete l2 transaction data");
@@ -550,9 +554,10 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
unchecked {
uint256 _bitmap;
uint256 rem;
for (uint256 i = 0; i < _numL1Messages; i++) {
uint256 quo = _totalL1MessagesPoppedInBatch >> 8;
uint256 rem = _totalL1MessagesPoppedInBatch & 0xff;
rem = _totalL1MessagesPoppedInBatch & 0xff;
// load bitmap every 256 bits
if (i == 0 || rem == 0) {
@@ -574,7 +579,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
}
// check last L1 message is not skipped, _totalL1MessagesPoppedInBatch must > 0
uint256 rem = (_totalL1MessagesPoppedInBatch - 1) & 0xff;
rem = (_totalL1MessagesPoppedInBatch - 1) & 0xff;
require(((_bitmap >> rem) & 1) == 0, "cannot skip last L1 message");
}

View File

@@ -2,10 +2,12 @@
pragma solidity =0.8.16;
import {OwnableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol";
import {IERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/IERC20Upgradeable.sol";
import {SafeERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/utils/SafeERC20Upgradeable.sol";
import {IFiatToken} from "../../../interfaces/IFiatToken.sol";
import {IUSDCDestinationBridge} from "../../../interfaces/IUSDCDestinationBridge.sol";
import {IL1ERC20Gateway} from "../../../L1/gateways/IL1ERC20Gateway.sol";
import {IL2ScrollMessenger} from "../../IL2ScrollMessenger.sol";
import {IL2ERC20Gateway} from "../IL2ERC20Gateway.sol";
@@ -16,7 +18,7 @@ import {L2ERC20Gateway} from "../L2ERC20Gateway.sol";
/// @title L2USDCGateway
/// @notice The `L2USDCGateway` contract is used to withdraw `USDC` token on layer 2 and
/// finalize deposit `USDC` from layer 1.
contract L2USDCGateway is L2ERC20Gateway {
contract L2USDCGateway is L2ERC20Gateway, IUSDCDestinationBridge {
using SafeERC20Upgradeable for IERC20Upgradeable;
/*************
@@ -33,8 +35,15 @@ contract L2USDCGateway is L2ERC20Gateway {
* Variables *
*************/
/// @notice The address of caller from Circle.
address public circleCaller;
/// @notice The flag indicates whether USDC deposit is paused.
/// @dev This is not necessary to be set `true` since we will set `L1USDCGateway.depositPaused` first.
/// This is kept just in case and will be set after all pending messages are relayed.
bool public depositPaused;
/// @notice The flag indicates whether USDC withdrawal is paused.
bool public withdrawPaused;
/***************
@@ -91,7 +100,8 @@ contract L2USDCGateway is L2ERC20Gateway {
require(IFiatToken(_l2Token).mint(_to, _amount), "mint USDC failed");
_doCallback(_to, _data);
// disable call for USDC
// _doCallback(_to, _data);
emit FinalizeDepositERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}
@@ -100,6 +110,19 @@ contract L2USDCGateway is L2ERC20Gateway {
* Public Restricted Functions *
*******************************/
/// @inheritdoc IUSDCDestinationBridge
function transferUSDCRoles(address _owner) external {
require(msg.sender == circleCaller, "only circle caller");
OwnableUpgradeable(l2USDC).transferOwnership(_owner);
}
/// @notice Update the Circle EOA address.
/// @param _caller The address to update.
function updateCircleCaller(address _caller) external onlyOwner {
circleCaller = _caller;
}
/// @notice Change the deposit pause status of this contract.
/// @param _paused The new status, `true` means paused and `false` means not paused.
function pauseDeposit(bool _paused) external onlyOwner {
@@ -133,10 +156,11 @@ contract L2USDCGateway is L2ERC20Gateway {
if (router == msg.sender) {
(_from, _data) = abi.decode(_data, (address, bytes));
}
require(_data.length == 0, "call is not allowed");
// 2. Transfer token into this contract.
IERC20Upgradeable(_token).safeTransferFrom(_from, address(this), _amount);
require(IFiatToken(_token).burn(_amount), "burn USDC failed");
IFiatToken(_token).burn(_amount);
// 3. Generate message passed to L1USDCGateway.
address _l1USDC = l1USDC;

View File

@@ -0,0 +1,156 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.16;
import {OwnableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol";
import {IERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/IERC20Upgradeable.sol";
import {SafeERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/utils/SafeERC20Upgradeable.sol";
import {ITokenMessenger} from "../../../interfaces/ITokenMessenger.sol";
import {IL1ERC20Gateway} from "../../../L1/gateways/IL1ERC20Gateway.sol";
import {IL2ScrollMessenger} from "../../IL2ScrollMessenger.sol";
import {IL2ERC20Gateway} from "../IL2ERC20Gateway.sol";
import {CCTPGatewayBase} from "../../../libraries/gateway/CCTPGatewayBase.sol";
import {ScrollGatewayBase} from "../../../libraries/gateway/ScrollGatewayBase.sol";
import {L2ERC20Gateway} from "../L2ERC20Gateway.sol";
/// @title L2USDCGatewayCCTP
/// @notice The `L2USDCGatewayCCTP` contract is used to withdraw `USDC` token in layer 2 and
/// finalize deposit `USDC` from layer 1.
contract L2USDCGatewayCCTP is CCTPGatewayBase, L2ERC20Gateway {
using SafeERC20Upgradeable for IERC20Upgradeable;
/***************
* Constructor *
***************/
constructor(
address _l1USDC,
address _l2USDC,
uint32 _destinationDomain
) CCTPGatewayBase(_l1USDC, _l2USDC, _destinationDomain) {
_disableInitializers();
}
/// @notice Initialize the storage of L2USDCGatewayCCTP.
/// @param _counterpart The address of L1USDCGatewayCCTP in L1.
/// @param _router The address of L2GatewayRouter.
/// @param _messenger The address of L2ScrollMessenger.
/// @param _cctpMessenger The address of TokenMessenger in local domain.
/// @param _cctpTransmitter The address of MessageTransmitter in local domain.
function initialize(
address _counterpart,
address _router,
address _messenger,
address _cctpMessenger,
address _cctpTransmitter
) external initializer {
require(_router != address(0), "zero router address");
ScrollGatewayBase._initialize(_counterpart, _router, _messenger);
CCTPGatewayBase._initialize(_cctpMessenger, _cctpTransmitter);
}
/*************************
* Public View Functions *
*************************/
/// @inheritdoc IL2ERC20Gateway
function getL1ERC20Address(address) external view override returns (address) {
return l1USDC;
}
/// @inheritdoc IL2ERC20Gateway
function getL2ERC20Address(address) public view override returns (address) {
return l2USDC;
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @inheritdoc IL2ERC20Gateway
/// @dev The function will not mint the USDC, users need to call `claimUSDC` after this function is done.
function finalizeDepositERC20(
address _l1Token,
address _l2Token,
address _from,
address _to,
uint256 _amount,
bytes memory _data
) external payable override onlyCallByCounterpart {
require(msg.value == 0, "nonzero msg.value");
require(_l1Token == l1USDC, "l1 token not USDC");
require(_l2Token == l2USDC, "l2 token not USDC");
uint256 _nonce;
(_nonce, _data) = abi.decode(_data, (uint256, bytes));
require(status[_nonce] == CCTPMessageStatus.None, "message relayed");
status[_nonce] = CCTPMessageStatus.Pending;
emit FinalizeDepositERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}
/*******************************
* Public Restricted Functions *
*******************************/
/// @notice Update the CCTP contract addresses.
/// @param _messenger The address of TokenMessenger in local domain.
/// @param _transmitter The address of MessageTransmitter in local domain.
function updateCCTPContracts(address _messenger, address _transmitter) external onlyOwner {
cctpMessenger = _messenger;
cctpTransmitter = _transmitter;
}
/**********************
* Internal Functions *
**********************/
/// @inheritdoc L2ERC20Gateway
function _withdraw(
address _token,
address _to,
uint256 _amount,
bytes memory _data,
uint256 _gasLimit
) internal virtual override {
require(_amount > 0, "withdraw zero amount");
require(_token == l2USDC, "only USDC is allowed");
// 1. Extract real sender if this call is from L1GatewayRouter.
address _from = msg.sender;
if (router == msg.sender) {
(_from, _data) = abi.decode(_data, (address, bytes));
}
// 2. Transfer token into this contract.
IERC20Upgradeable(_token).safeTransferFrom(_from, address(this), _amount);
// 3. Burn token through CCTP TokenMessenger
uint256 _nonce = ITokenMessenger(cctpMessenger).depositForBurnWithCaller(
_amount,
destinationDomain,
bytes32(uint256(uint160(_to))),
address(this),
bytes32(uint256(uint160(counterpart)))
);
// 4. Generate message passed to L1USDCGateway.
address _l1USDC = l1USDC;
bytes memory _message = abi.encodeWithSelector(
IL1ERC20Gateway.finalizeWithdrawERC20.selector,
_l1USDC,
_token,
_from,
_to,
_amount,
abi.encode(_nonce, _data)
);
// 4. Send message to L1ScrollMessenger.
IL2ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit);
emit WithdrawERC20(_l1USDC, _token, _from, _to, _amount, _data);
}
}

View File

@@ -1,6 +1,6 @@
A library for interacting with Scroll contracts.
This library includes contracts and interfaces needed to interact with the Scroll Smart Contracts deployed on both Layer 1 and Layer 2. This includes deposting and withdrawing ETH, ERC20 tokens and NFTs or sending arbitrary messages.
This library includes contracts and interfaces needed to interact with the Scroll Smart Contracts deployed on both Layer 1 and Layer 2. This includes deposting and withdrawing ETH, ERC20 tokens and NFTs or sending arbitrary messages.
# Overview
@@ -21,10 +21,11 @@ pragma solidity 0.8.20;
import "@scroll-tech/contracts/L1/gateways/IL1ETHGateway.sol";
contract MyContract {
function bridgeETH(address scrollBridge, uint gasLimit) public payable {
IL1ETHGateway(scrollBridge).depositETH(msg.sender, msg.value, gasLimit);
}
function bridgeETH(address scrollBridge, uint256 gasLimit) public payable {
IL1ETHGateway(scrollBridge).depositETH(msg.sender, msg.value, gasLimit);
}
}
```
Visit the Bridge Documentation for API reference, architecture overview and guides with code examples.

View File

@@ -18,5 +18,5 @@ interface IFiatToken {
* amount is less than or equal to the minter's account balance
* @param _amount uint256 the amount of tokens to be burned
*/
function burn(uint256 _amount) external returns (bool);
function burn(uint256 _amount) external;
}

View File

@@ -0,0 +1,16 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.16;
interface IMessageTransmitter {
function usedNonces(bytes32 _sourceAndNonce) external view returns (uint256);
/**
* @notice Receives an incoming message, validating the header and passing
* the body to application-specific handler.
* @param message The message raw bytes
* @param signature The message signature
* @return success bool, true if successful
*/
function receiveMessage(bytes calldata message, bytes calldata signature) external returns (bool success);
}

View File

@@ -0,0 +1,63 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.16;
interface ITokenMessenger {
/**
* @notice Deposits and burns tokens from sender to be minted on destination domain. The mint
* on the destination domain must be called by `destinationCaller`.
* WARNING: if the `destinationCaller` does not represent a valid address as bytes32, then it will not be possible
* to broadcast the message on the destination domain. This is an advanced feature, and the standard
* depositForBurn() should be preferred for use cases where a specific destination caller is not required.
* Emits a `DepositForBurn` event.
* @dev reverts if:
* - given destinationCaller is zero address
* - given burnToken is not supported
* - given destinationDomain has no TokenMessenger registered
* - transferFrom() reverts. For example, if sender's burnToken balance or approved allowance
* to this contract is less than `amount`.
* - burn() reverts. For example, if `amount` is 0.
* - MessageTransmitter returns false or reverts.
* @param amount amount of tokens to burn
* @param destinationDomain destination domain
* @param mintRecipient address of mint recipient on destination domain
* @param burnToken address of contract to burn deposited tokens, on local domain
* @param destinationCaller caller on the destination domain, as bytes32
* @return nonce unique nonce reserved by message
*/
function depositForBurnWithCaller(
uint256 amount,
uint32 destinationDomain,
bytes32 mintRecipient,
address burnToken,
bytes32 destinationCaller
) external returns (uint64 nonce);
/**
* @notice Replace a BurnMessage to change the mint recipient and/or
* destination caller. Allows the sender of a previous BurnMessage
* (created by depositForBurn or depositForBurnWithCaller)
* to send a new BurnMessage to replace the original.
* The new BurnMessage will reuse the amount and burn token of the original,
* without requiring a new deposit.
* @dev The new message will reuse the original message's nonce. For a
* given nonce, all replacement message(s) and the original message are
* valid to broadcast on the destination domain, until the first message
* at the nonce confirms, at which point all others are invalidated.
* Note: The msg.sender of the replaced message must be the same as the
* msg.sender of the original message.
* @param originalMessage original message bytes (to replace)
* @param originalAttestation original attestation bytes
* @param newDestinationCaller the new destination caller, which may be the
* same as the original destination caller, a new destination caller, or an empty
* destination caller (bytes32(0), indicating that any destination caller is valid.)
* @param newMintRecipient the new mint recipient, which may be the same as the
* original mint recipient, or different.
*/
function replaceDepositForBurn(
bytes calldata originalMessage,
bytes calldata originalAttestation,
bytes32 newDestinationCaller,
bytes32 newMintRecipient
) external;
}

View File

@@ -0,0 +1,12 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.16;
// Implement this on the source chain (Ethereum).
interface IUSDCBurnableSourceBridge {
/**
* @notice Called by Circle, this executes a burn on the source
* chain.
*/
function burnAllLockedUSDC() external;
}

View File

@@ -0,0 +1,11 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.16;
// Implement this on the destination chain (Scroll).
interface IUSDCDestinationBridge {
/**
* @notice Called by Circle, this transfers FiatToken roles to the designated owner.
*/
function transferUSDCRoles(address owner) external;
}

View File

@@ -0,0 +1,95 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
import {IMessageTransmitter} from "../../interfaces/IMessageTransmitter.sol";
import {ScrollGatewayBase} from "./ScrollGatewayBase.sol";
abstract contract CCTPGatewayBase is ScrollGatewayBase {
/*********
* Enums *
*********/
enum CCTPMessageStatus {
None,
Pending,
Relayed
}
/*************
* Constants *
*************/
/// @notice The address of L1 USDC address.
address public immutable l1USDC;
/// @notice The address of L2 USDC address.
address public immutable l2USDC;
/// @notice The destination domain for layer2.
uint32 public immutable destinationDomain;
/*************
* Variables *
*************/
/// @notice The address of TokenMessenger in local domain.
address public cctpMessenger;
/// @notice The address of MessageTransmitter in local domain.
address public cctpTransmitter;
/// @notice Mapping from destination domain CCTP nonce to status.
mapping(uint256 => CCTPMessageStatus) public status;
/***************
* Constructor *
***************/
constructor(
address _l1USDC,
address _l2USDC,
uint32 _destinationDomain
) {
l1USDC = _l1USDC;
l2USDC = _l2USDC;
destinationDomain = _destinationDomain;
}
function _initialize(address _cctpMessenger, address _cctpTransmitter) internal {
cctpMessenger = _cctpMessenger;
cctpTransmitter = _cctpTransmitter;
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Claim USDC that has been cross chained.
/// @param _nonce The nonce of the message from CCTP.
/// @param _cctpMessage The message passed to MessageTransmitter contract in CCTP.
/// @param _cctpSignature The message passed to MessageTransmitter contract in CCTP.
function claimUSDC(
uint256 _nonce,
bytes calldata _cctpMessage,
bytes calldata _cctpSignature
) public {
// Check `_nonce` match with `_cctpMessage`.
// According to the encoding of `_cctpMessage`, the nonce is in bytes 12 to 16.
// See here: https://github.com/circlefin/evm-cctp-contracts/blob/master/src/messages/Message.sol#L29
uint256 _expectedMessageNonce;
assembly {
_expectedMessageNonce := and(shr(96, calldataload(_cctpMessage.offset)), 0xffffffffffffffff)
}
require(_expectedMessageNonce == _nonce, "nonce mismatch");
require(status[_nonce] == CCTPMessageStatus.Pending, "message not relayed");
// call transmitter to mint USDC
bool _success = IMessageTransmitter(cctpTransmitter).receiveMessage(_cctpMessage, _cctpSignature);
require(_success, "call transmitter failed");
status[_nonce] = CCTPMessageStatus.Relayed;
}
}

View File

@@ -0,0 +1,528 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
import {MockERC20} from "solmate/test/utils/mocks/MockERC20.sol";
import {ERC1967Proxy} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol";
import {L1GatewayRouter} from "../L1/gateways/L1GatewayRouter.sol";
import {IL1ERC20Gateway, L1USDCGateway} from "../L1/gateways/usdc/L1USDCGateway.sol";
import {IL1ScrollMessenger} from "../L1/IL1ScrollMessenger.sol";
import {IL2ERC20Gateway, L2USDCGateway} from "../L2/gateways/usdc/L2USDCGateway.sol";
import {AddressAliasHelper} from "../libraries/common/AddressAliasHelper.sol";
import {L1GatewayTestBase} from "./L1GatewayTestBase.t.sol";
import {MockScrollMessenger} from "./mocks/MockScrollMessenger.sol";
import {MockGatewayRecipient} from "./mocks/MockGatewayRecipient.sol";
contract L1USDCGatewayTest is L1GatewayTestBase {
// from L1USDCGateway
event FinalizeWithdrawERC20(
address indexed _l1Token,
address indexed _l2Token,
address indexed _from,
address _to,
uint256 _amount,
bytes _data
);
event DepositERC20(
address indexed _l1Token,
address indexed _l2Token,
address indexed _from,
address _to,
uint256 _amount,
bytes _data
);
MockERC20 private l1USDC;
MockERC20 private l2USDC;
L1USDCGateway private gateway;
L1GatewayRouter private router;
L2USDCGateway private counterpartGateway;
function setUp() public {
setUpBase();
// Deploy tokens
l1USDC = new MockERC20("USDC", "USDC", 6);
l2USDC = new MockERC20("USDC", "USDC", 6);
// Deploy L1 contracts
gateway = _deployGateway();
router = L1GatewayRouter(address(new ERC1967Proxy(address(new L1GatewayRouter()), new bytes(0))));
// Deploy L2 contracts
counterpartGateway = new L2USDCGateway(address(l1USDC), address(l2USDC));
// Initialize L1 contracts
gateway.initialize(address(counterpartGateway), address(router), address(l1Messenger));
router.initialize(address(0), address(gateway));
// Prepare token balances
l1USDC.mint(address(this), type(uint128).max);
l1USDC.approve(address(gateway), type(uint256).max);
l1USDC.approve(address(router), type(uint256).max);
}
function testInitialized() public {
assertEq(address(counterpartGateway), gateway.counterpart());
assertEq(address(router), gateway.router());
assertEq(address(l1Messenger), gateway.messenger());
assertEq(address(l1USDC), gateway.l1USDC());
assertEq(address(l2USDC), gateway.l2USDC());
assertEq(address(l2USDC), gateway.getL2ERC20Address(address(l1USDC)));
assertEq(0, gateway.totalBridgedUSDC());
hevm.expectRevert("Initializable: contract is already initialized");
gateway.initialize(address(counterpartGateway), address(router), address(l1Messenger));
}
function testDepositPaused() public {
// non-owner call pause, should revert
hevm.startPrank(address(1));
hevm.expectRevert("Ownable: caller is not the owner");
gateway.pauseDeposit(false);
hevm.expectRevert("Ownable: caller is not the owner");
gateway.pauseDeposit(true);
hevm.stopPrank();
// pause deposit
gateway.pauseDeposit(true);
// deposit paused, should revert
hevm.expectRevert("deposit paused");
gateway.depositERC20(address(l1USDC), 1, 0);
hevm.expectRevert("deposit paused");
gateway.depositERC20(address(l1USDC), address(this), 1, 0);
hevm.expectRevert("deposit paused");
gateway.depositERC20AndCall(address(l1USDC), address(this), 1, new bytes(0), 0);
}
function testPauseWithdraw() public {
// non-owner call pause, should revert
hevm.startPrank(address(1));
hevm.expectRevert("Ownable: caller is not the owner");
gateway.pauseWithdraw(false);
hevm.expectRevert("Ownable: caller is not the owner");
gateway.pauseWithdraw(true);
hevm.stopPrank();
}
function testDepositERC20(
uint256 amount,
uint256 gasLimit,
uint256 feePerGas
) public {
_depositERC20(false, amount, gasLimit, feePerGas);
}
function testDepositERC20WithRecipient(
uint256 amount,
address recipient,
uint256 gasLimit,
uint256 feePerGas
) public {
_depositERC20WithRecipient(false, amount, recipient, gasLimit, feePerGas);
}
function testRouterDepositERC20(
uint256 amount,
uint256 gasLimit,
uint256 feePerGas
) public {
_depositERC20(true, amount, gasLimit, feePerGas);
}
function testRouterDepositERC20WithRecipient(
uint256 amount,
address recipient,
uint256 gasLimit,
uint256 feePerGas
) public {
_depositERC20WithRecipient(true, amount, recipient, gasLimit, feePerGas);
}
function testFinalizeWithdrawERC20FailedMocking(
address sender,
address recipient,
uint256 amount,
bytes memory dataToCall
) public {
amount = bound(amount, 1, 100000);
// revert when caller is not messenger
hevm.expectRevert("only messenger can call");
gateway.finalizeWithdrawERC20(address(l1USDC), address(l2USDC), sender, recipient, amount, dataToCall);
MockScrollMessenger mockMessenger = new MockScrollMessenger();
gateway = _deployGateway();
gateway.initialize(address(counterpartGateway), address(router), address(mockMessenger));
// only call by conterpart
hevm.expectRevert("only call by counterpart");
mockMessenger.callTarget(
address(gateway),
abi.encodeWithSelector(
gateway.finalizeWithdrawERC20.selector,
address(l1USDC),
address(l2USDC),
sender,
recipient,
amount,
dataToCall
)
);
mockMessenger.setXDomainMessageSender(address(counterpartGateway));
// nonzero msg.value
hevm.expectRevert("nonzero msg.value");
mockMessenger.callTarget{value: 1}(
address(gateway),
abi.encodeWithSelector(
gateway.finalizeWithdrawERC20.selector,
address(l1USDC),
address(l2USDC),
sender,
recipient,
amount,
dataToCall
)
);
// l1 token not USDC
hevm.expectRevert("l1 token not USDC");
mockMessenger.callTarget(
address(gateway),
abi.encodeWithSelector(
gateway.finalizeWithdrawERC20.selector,
address(l2USDC),
address(l2USDC),
sender,
recipient,
amount,
dataToCall
)
);
// l2 token not USDC
hevm.expectRevert("l2 token not USDC");
mockMessenger.callTarget(
address(gateway),
abi.encodeWithSelector(
gateway.finalizeWithdrawERC20.selector,
address(l1USDC),
address(l1USDC),
sender,
recipient,
amount,
dataToCall
)
);
// withdraw paused
gateway.pauseWithdraw(true);
hevm.expectRevert("withdraw paused");
mockMessenger.callTarget(
address(gateway),
abi.encodeWithSelector(
gateway.finalizeWithdrawERC20.selector,
address(l1USDC),
address(l2USDC),
sender,
recipient,
amount,
dataToCall
)
);
}
function testFinalizeWithdrawERC20Failed(
address sender,
address recipient,
uint256 amount,
bytes memory dataToCall
) public {
// blacklist some addresses
hevm.assume(recipient != address(0));
hevm.assume(recipient != address(gateway));
amount = bound(amount, 1, l1USDC.balanceOf(address(this)));
// deposit some USDC to L1ScrollMessenger
gateway.depositERC20(address(l1USDC), amount, 0);
// do finalize withdraw usdc
bytes memory message = abi.encodeWithSelector(
IL1ERC20Gateway.finalizeWithdrawERC20.selector,
address(l1USDC),
address(l2USDC),
sender,
recipient,
amount,
dataToCall
);
bytes memory xDomainCalldata = abi.encodeWithSignature(
"relayMessage(address,address,uint256,uint256,bytes)",
address(uint160(address(counterpartGateway)) + 1),
address(gateway),
0,
0,
message
);
prepareL2MessageRoot(keccak256(xDomainCalldata));
IL1ScrollMessenger.L2MessageProof memory proof;
proof.batchIndex = rollup.lastFinalizedBatchIndex();
// conterpart is not L2USDCGateway
// emit FailedRelayedMessage from L1ScrollMessenger
hevm.expectEmit(true, false, false, true);
emit FailedRelayedMessage(keccak256(xDomainCalldata));
uint256 gatewayBalance = l1USDC.balanceOf(address(gateway));
uint256 recipientBalance = l1USDC.balanceOf(recipient);
assertBoolEq(false, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
l1Messenger.relayMessageWithProof(
address(uint160(address(counterpartGateway)) + 1),
address(gateway),
0,
0,
message,
proof
);
assertEq(gatewayBalance, l1USDC.balanceOf(address(gateway)));
assertEq(recipientBalance, l1USDC.balanceOf(recipient));
assertBoolEq(false, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
}
function testFinalizeWithdrawERC20(
address sender,
uint256 amount,
bytes memory dataToCall
) public {
MockGatewayRecipient recipient = new MockGatewayRecipient();
amount = bound(amount, 1, l1USDC.balanceOf(address(this)));
// deposit some USDC to gateway
gateway.depositERC20(address(l1USDC), amount, 0);
// do finalize withdraw usdc
bytes memory message = abi.encodeWithSelector(
IL1ERC20Gateway.finalizeWithdrawERC20.selector,
address(l1USDC),
address(l2USDC),
sender,
address(recipient),
amount,
dataToCall
);
bytes memory xDomainCalldata = abi.encodeWithSignature(
"relayMessage(address,address,uint256,uint256,bytes)",
address(counterpartGateway),
address(gateway),
0,
0,
message
);
prepareL2MessageRoot(keccak256(xDomainCalldata));
IL1ScrollMessenger.L2MessageProof memory proof;
proof.batchIndex = rollup.lastFinalizedBatchIndex();
// emit FinalizeWithdrawERC20 from L1USDCGateway
{
hevm.expectEmit(true, true, true, true);
emit FinalizeWithdrawERC20(
address(l1USDC),
address(l2USDC),
sender,
address(recipient),
amount,
dataToCall
);
}
// emit RelayedMessage from L1ScrollMessenger
{
hevm.expectEmit(true, false, false, true);
emit RelayedMessage(keccak256(xDomainCalldata));
}
uint256 gatewayBalance = l1USDC.balanceOf(address(gateway));
uint256 totalBridgedUSDCBefore = gateway.totalBridgedUSDC();
uint256 recipientBalance = l1USDC.balanceOf(address(recipient));
assertBoolEq(false, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
l1Messenger.relayMessageWithProof(address(counterpartGateway), address(gateway), 0, 0, message, proof);
assertEq(gatewayBalance - amount, l1USDC.balanceOf(address(gateway)));
assertEq(totalBridgedUSDCBefore - amount, gateway.totalBridgedUSDC());
assertEq(recipientBalance + amount, l1USDC.balanceOf(address(recipient)));
assertBoolEq(true, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
}
function _depositERC20(
bool useRouter,
uint256 amount,
uint256 gasLimit,
uint256 feePerGas
) private {
amount = bound(amount, 0, l1USDC.balanceOf(address(this)));
gasLimit = bound(gasLimit, 0, 1000000);
feePerGas = bound(feePerGas, 0, 1000);
gasOracle.setL2BaseFee(feePerGas);
uint256 feeToPay = feePerGas * gasLimit;
bytes memory message = abi.encodeWithSelector(
IL2ERC20Gateway.finalizeDepositERC20.selector,
address(l1USDC),
address(l2USDC),
address(this),
address(this),
amount,
new bytes(0)
);
bytes memory xDomainCalldata = abi.encodeWithSignature(
"relayMessage(address,address,uint256,uint256,bytes)",
address(gateway),
address(counterpartGateway),
0,
0,
message
);
if (amount == 0) {
hevm.expectRevert("deposit zero amount");
if (useRouter) {
router.depositERC20{value: feeToPay + extraValue}(address(l1USDC), amount, gasLimit);
} else {
gateway.depositERC20{value: feeToPay + extraValue}(address(l1USDC), amount, gasLimit);
}
} else {
// token is not l1USDC
hevm.expectRevert("only USDC is allowed");
gateway.depositERC20(address(l2USDC), amount, gasLimit);
// emit QueueTransaction from L1MessageQueue
{
hevm.expectEmit(true, true, false, true);
address sender = AddressAliasHelper.applyL1ToL2Alias(address(l1Messenger));
emit QueueTransaction(sender, address(l2Messenger), 0, 0, gasLimit, xDomainCalldata);
}
// emit SentMessage from L1ScrollMessenger
{
hevm.expectEmit(true, true, false, true);
emit SentMessage(address(gateway), address(counterpartGateway), 0, 0, gasLimit, message);
}
// emit DepositERC20 from L1USDCGateway
hevm.expectEmit(true, true, true, true);
emit DepositERC20(address(l1USDC), address(l2USDC), address(this), address(this), amount, new bytes(0));
uint256 gatewayBalance = l1USDC.balanceOf(address(gateway));
uint256 totalBridgedUSDCBefore = gateway.totalBridgedUSDC();
uint256 feeVaultBalance = address(feeVault).balance;
assertBoolEq(false, l1Messenger.isL1MessageSent(keccak256(xDomainCalldata)));
if (useRouter) {
router.depositERC20{value: feeToPay + extraValue}(address(l1USDC), amount, gasLimit);
} else {
gateway.depositERC20{value: feeToPay + extraValue}(address(l1USDC), amount, gasLimit);
}
assertEq(amount + gatewayBalance, l1USDC.balanceOf(address(gateway)));
assertEq(amount + totalBridgedUSDCBefore, gateway.totalBridgedUSDC());
assertEq(feeToPay + feeVaultBalance, address(feeVault).balance);
assertBoolEq(true, l1Messenger.isL1MessageSent(keccak256(xDomainCalldata)));
}
}
function _depositERC20WithRecipient(
bool useRouter,
uint256 amount,
address recipient,
uint256 gasLimit,
uint256 feePerGas
) private {
amount = bound(amount, 0, l1USDC.balanceOf(address(this)));
gasLimit = bound(gasLimit, 0, 1000000);
feePerGas = bound(feePerGas, 0, 1000);
gasOracle.setL2BaseFee(feePerGas);
uint256 feeToPay = feePerGas * gasLimit;
bytes memory message = abi.encodeWithSelector(
IL2ERC20Gateway.finalizeDepositERC20.selector,
address(l1USDC),
address(l2USDC),
address(this),
recipient,
amount,
new bytes(0)
);
bytes memory xDomainCalldata = abi.encodeWithSignature(
"relayMessage(address,address,uint256,uint256,bytes)",
address(gateway),
address(counterpartGateway),
0,
0,
message
);
if (amount == 0) {
hevm.expectRevert("deposit zero amount");
if (useRouter) {
router.depositERC20{value: feeToPay + extraValue}(address(l1USDC), recipient, amount, gasLimit);
} else {
gateway.depositERC20{value: feeToPay + extraValue}(address(l1USDC), recipient, amount, gasLimit);
}
} else {
// token is not l1USDC
hevm.expectRevert("only USDC is allowed");
gateway.depositERC20(address(l2USDC), recipient, amount, gasLimit);
// emit QueueTransaction from L1MessageQueue
{
hevm.expectEmit(true, true, false, true);
address sender = AddressAliasHelper.applyL1ToL2Alias(address(l1Messenger));
emit QueueTransaction(sender, address(l2Messenger), 0, 0, gasLimit, xDomainCalldata);
}
// emit SentMessage from L1ScrollMessenger
{
hevm.expectEmit(true, true, false, true);
emit SentMessage(address(gateway), address(counterpartGateway), 0, 0, gasLimit, message);
}
// emit DepositERC20 from L1USDCGateway
hevm.expectEmit(true, true, true, true);
emit DepositERC20(address(l1USDC), address(l2USDC), address(this), recipient, amount, new bytes(0));
uint256 gatewayBalance = l1USDC.balanceOf(address(gateway));
uint256 totalBridgedUSDCBefore = gateway.totalBridgedUSDC();
uint256 feeVaultBalance = address(feeVault).balance;
assertBoolEq(false, l1Messenger.isL1MessageSent(keccak256(xDomainCalldata)));
if (useRouter) {
router.depositERC20{value: feeToPay + extraValue}(address(l1USDC), recipient, amount, gasLimit);
} else {
gateway.depositERC20{value: feeToPay + extraValue}(address(l1USDC), recipient, amount, gasLimit);
}
assertEq(amount + gatewayBalance, l1USDC.balanceOf(address(gateway)));
assertEq(amount + totalBridgedUSDCBefore, gateway.totalBridgedUSDC());
assertEq(feeToPay + feeVaultBalance, address(feeVault).balance);
assertBoolEq(true, l1Messenger.isL1MessageSent(keccak256(xDomainCalldata)));
}
}
function _deployGateway() internal returns (L1USDCGateway) {
return
L1USDCGateway(
payable(new ERC1967Proxy(address(new L1USDCGateway(address(l1USDC), address(l2USDC))), new bytes(0)))
);
}
}

View File

@@ -56,7 +56,7 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
router = L2GatewayRouter(address(new ERC1967Proxy(address(new L2GatewayRouter()), new bytes(0))));
// Deploy L1 contracts
counterpartGateway = new L1USDCGateway();
counterpartGateway = new L1USDCGateway(address(l1USDC), address(l2USDC));
// Initialize L2 contracts
gateway.initialize(address(counterpartGateway), address(router), address(l2Messenger));
@@ -128,16 +128,6 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
_withdrawERC20WithRecipient(false, amount, recipient, gasLimit, feePerGas);
}
function testWithdrawERC20WithRecipientAndCalldata(
uint256 amount,
address recipient,
bytes memory dataToCall,
uint256 gasLimit,
uint256 feePerGas
) public {
_withdrawERC20WithRecipientAndCalldata(false, amount, recipient, dataToCall, gasLimit, feePerGas);
}
function testRouterWithdrawERC20(
uint256 amount,
uint256 gasLimit,
@@ -155,16 +145,6 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
_withdrawERC20WithRecipient(true, amount, recipient, gasLimit, feePerGas);
}
function testRouterWithdrawERC20WithRecipientAndCalldata(
uint256 amount,
address recipient,
bytes memory dataToCall,
uint256 gasLimit,
uint256 feePerGas
) public {
_withdrawERC20WithRecipientAndCalldata(true, amount, recipient, dataToCall, gasLimit, feePerGas);
}
function testFinalizeDepositERC20FailedMocking(
address sender,
address recipient,
@@ -356,7 +336,6 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
) private {
amount = bound(amount, 0, l2USDC.balanceOf(address(this)));
gasLimit = bound(gasLimit, 21000, 1000000);
feePerGas = bound(feePerGas, 0, 1000);
feePerGas = 0;
setL1BaseFee(feePerGas);
@@ -433,7 +412,6 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
) private {
amount = bound(amount, 0, l2USDC.balanceOf(address(this)));
gasLimit = bound(gasLimit, 21000, 1000000);
feePerGas = bound(feePerGas, 0, 1000);
feePerGas = 0;
setL1BaseFee(feePerGas);
@@ -501,85 +479,6 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
}
}
function _withdrawERC20WithRecipientAndCalldata(
bool useRouter,
uint256 amount,
address recipient,
bytes memory dataToCall,
uint256 gasLimit,
uint256 feePerGas
) private {
amount = bound(amount, 0, l2USDC.balanceOf(address(this)));
gasLimit = bound(gasLimit, 21000, 1000000);
feePerGas = bound(feePerGas, 0, 1000);
// we don't charge fee now.
feePerGas = 0;
setL1BaseFee(feePerGas);
uint256 feeToPay = feePerGas * gasLimit;
bytes memory message = abi.encodeWithSelector(
IL1ERC20Gateway.finalizeWithdrawERC20.selector,
address(l1USDC),
address(l2USDC),
address(this),
recipient,
amount,
dataToCall
);
bytes memory xDomainCalldata = abi.encodeWithSignature(
"relayMessage(address,address,uint256,uint256,bytes)",
address(gateway),
address(counterpartGateway),
0,
0,
message
);
if (amount == 0) {
hevm.expectRevert("withdraw zero amount");
if (useRouter) {
router.withdrawERC20AndCall{value: feeToPay}(address(l2USDC), recipient, amount, dataToCall, gasLimit);
} else {
gateway.withdrawERC20AndCall{value: feeToPay}(address(l2USDC), recipient, amount, dataToCall, gasLimit);
}
} else {
// token is not l1USDC
hevm.expectRevert("only USDC is allowed");
gateway.withdrawERC20AndCall(address(l1USDC), recipient, amount, dataToCall, gasLimit);
// emit AppendMessage from L2MessageQueue
{
hevm.expectEmit(false, false, false, true);
emit AppendMessage(0, keccak256(xDomainCalldata));
}
// emit SentMessage from L2ScrollMessenger
{
hevm.expectEmit(true, true, false, true);
emit SentMessage(address(gateway), address(counterpartGateway), 0, 0, gasLimit, message);
}
// emit WithdrawERC20 from L2USDCGateway
hevm.expectEmit(true, true, true, true);
emit WithdrawERC20(address(l1USDC), address(l2USDC), address(this), recipient, amount, dataToCall);
uint256 senderBalance = l2USDC.balanceOf(address(this));
uint256 gatewayBalance = l2USDC.balanceOf(address(gateway));
uint256 feeVaultBalance = address(feeVault).balance;
assertBoolEq(false, l2Messenger.isL2MessageSent(keccak256(xDomainCalldata)));
if (useRouter) {
router.withdrawERC20AndCall{value: feeToPay}(address(l2USDC), recipient, amount, dataToCall, gasLimit);
} else {
gateway.withdrawERC20AndCall{value: feeToPay}(address(l2USDC), recipient, amount, dataToCall, gasLimit);
}
assertEq(senderBalance - amount, l2USDC.balanceOf(address(this)));
assertEq(gatewayBalance, l2USDC.balanceOf(address(gateway)));
assertEq(feeToPay + feeVaultBalance, address(feeVault).balance);
assertBoolEq(true, l2Messenger.isL2MessageSent(keccak256(xDomainCalldata)));
}
}
function _deployGateway() internal returns (L2USDCGateway) {
return
L2USDCGateway(

View File

@@ -410,7 +410,7 @@ contract L2WETHGatewayTest is L2GatewayTestBase {
uint256 gasLimit,
uint256 feePerGas
) private {
amount = bound(amount, 0, l1weth.balanceOf(address(this)));
amount = bound(amount, 0, l2weth.balanceOf(address(this)));
gasLimit = bound(gasLimit, 21000, 1000000);
feePerGas = 0;
@@ -485,7 +485,7 @@ contract L2WETHGatewayTest is L2GatewayTestBase {
uint256 gasLimit,
uint256 feePerGas
) private {
amount = bound(amount, 0, l1weth.balanceOf(address(this)));
amount = bound(amount, 0, l2weth.balanceOf(address(this)));
gasLimit = bound(gasLimit, 21000, 1000000);
feePerGas = 0;

View File

@@ -19,7 +19,7 @@ contract ScrollChainTest is DSTestPlus {
event UpdateSequencer(address indexed account, bool status);
event UpdateProver(address indexed account, bool status);
event UpdateVerifier(address indexed oldVerifier, address indexed newVerifier);
event UpdateMaxNumL2TxInChunk(uint256 oldMaxNumL2TxInChunk, uint256 newMaxNumL2TxInChunk);
event UpdateMaxNumTxInChunk(uint256 oldMaxNumTxInChunk, uint256 newMaxNumTxInChunk);
event CommitBatch(uint256 indexed batchIndex, bytes32 indexed batchHash);
event FinalizeBatch(uint256 indexed batchIndex, bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
@@ -429,6 +429,15 @@ contract ScrollChainTest is DSTestPlus {
mstore(add(bitmap, add(0x20, 32)), 42) // bitmap1
}
// too many txs in one chunk, revert
rollup.updateMaxNumTxInChunk(2); // 3 - 1
hevm.expectRevert("too many txs in one chunk");
rollup.commitBatch(0, batchHeader1, chunks, bitmap); // first chunk with too many txs
rollup.updateMaxNumTxInChunk(185); // 5+10+300 - 2 - 127
hevm.expectRevert("too many txs in one chunk");
rollup.commitBatch(0, batchHeader1, chunks, bitmap); // second chunk with too many txs
rollup.updateMaxNumTxInChunk(186);
hevm.expectEmit(true, true, false, true);
emit CommitBatch(2, bytes32(0x03a9cdcb9d582251acf60937db006ec99f3505fd4751b7c1f92c9a8ef413e873));
rollup.commitBatch(0, batchHeader1, chunks, bitmap);
@@ -631,20 +640,20 @@ contract ScrollChainTest is DSTestPlus {
assertEq(rollup.verifier(), _newVerifier);
}
function testUpdateMaxNumL2TxInChunk(uint256 _maxNumL2TxInChunk) public {
function testUpdateMaxNumTxInChunk(uint256 _maxNumTxInChunk) public {
// set by non-owner, should revert
hevm.startPrank(address(1));
hevm.expectRevert("Ownable: caller is not the owner");
rollup.updateMaxNumL2TxInChunk(_maxNumL2TxInChunk);
rollup.updateMaxNumTxInChunk(_maxNumTxInChunk);
hevm.stopPrank();
// change to random operator
hevm.expectEmit(false, false, false, true);
emit UpdateMaxNumL2TxInChunk(100, _maxNumL2TxInChunk);
emit UpdateMaxNumTxInChunk(100, _maxNumTxInChunk);
assertEq(rollup.maxNumL2TxInChunk(), 100);
rollup.updateMaxNumL2TxInChunk(_maxNumL2TxInChunk);
assertEq(rollup.maxNumL2TxInChunk(), _maxNumL2TxInChunk);
assertEq(rollup.maxNumTxInChunk(), 100);
rollup.updateMaxNumTxInChunk(_maxNumTxInChunk);
assertEq(rollup.maxNumTxInChunk(), _maxNumTxInChunk);
}
function testImportGenesisBlock() public {

View File

@@ -42,12 +42,15 @@ require (
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/net v0.14.0 // indirect
golang.org/x/text v0.12.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
)
require github.com/prometheus/client_golang v1.14.0
require (
github.com/google/uuid v1.3.0
github.com/prometheus/client_golang v1.14.0
)
require (
github.com/beorn7/perks v1.0.1 // indirect

View File

@@ -73,6 +73,8 @@ github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
@@ -212,8 +214,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@@ -163,16 +163,14 @@ func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout promethe
"prover public key", assignedProverTask.ProverPublicKey, "prover name", assignedProverTask.ProverName, "task type", assignedProverTask.TaskType)
err := c.db.Transaction(func(tx *gorm.DB) error {
// update prover task proving status as ProverProofInvalid
if err := c.proverTaskOrm.UpdateProverTaskProvingStatus(c.ctx, message.ProofType(assignedProverTask.TaskType),
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverProofInvalid, tx); err != nil {
log.Error("update prover task proving status failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
if err := c.proverTaskOrm.UpdateProverTaskProvingStatus(c.ctx, assignedProverTask.UUID, types.ProverProofInvalid, tx); err != nil {
log.Error("update prover task proving status failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
// update prover task failure type
if err := c.proverTaskOrm.UpdateProverTaskFailureType(c.ctx, message.ProofType(assignedProverTask.TaskType),
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverTaskFailureTypeTimeout, tx); err != nil {
log.Error("update prover task failure type failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
if err := c.proverTaskOrm.UpdateProverTaskFailureType(c.ctx, assignedProverTask.UUID, types.ProverTaskFailureTypeTimeout, tx); err != nil {
log.Error("update prover task failure type failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}

View File

@@ -128,13 +128,13 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
// Store session info.
if err = bp.proverTaskOrm.SetProverTask(ctx, &proverTask); err != nil {
if err = bp.proverTaskOrm.InsertProverTask(ctx, &proverTask); err != nil {
bp.recoverProvingStatus(ctx, batchTask)
log.Error("db set session info fail", "task hash", batchTask.Hash, "prover name", proverName.(string), "prover pubKey", publicKey.(string), "err", err)
log.Error("insert batch prover task info fail", "taskID", batchTask.Hash, "publicKey", publicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx, batchTask.Hash)
taskMsg, err := bp.formatProverTask(ctx, &proverTask)
if err != nil {
bp.recoverProvingStatus(ctx, batchTask)
log.Error("format prover task failure", "hash", batchTask.Hash, "err", err)
@@ -146,11 +146,11 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return taskMsg, nil
}
func (bp *BatchProverTask) formatProverTask(ctx context.Context, taskID string) (*coordinatorType.GetTaskSchema, error) {
func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
// get chunk from db
chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, taskID)
chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, task.TaskID)
if err != nil {
err = fmt.Errorf("failed to get chunk proofs for batch task id:%s err:%w ", taskID, err)
err = fmt.Errorf("failed to get chunk proofs for batch task id:%s err:%w ", task.TaskID, err)
return nil, err
}
@@ -159,7 +159,7 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, taskID string)
for _, chunk := range chunks {
var proof message.ChunkProof
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, taskID, chunk.Hash)
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, task.TaskID, chunk.Hash)
}
chunkProofs = append(chunkProofs, &proof)
@@ -181,11 +181,12 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, taskID string)
chunkProofsBytes, err := json.Marshal(taskDetail)
if err != nil {
return nil, fmt.Errorf("failed to marshal chunk proofs, taskID:%s err:%w", taskID, err)
return nil, fmt.Errorf("failed to marshal chunk proofs, taskID:%s err:%w", task.TaskID, err)
}
taskMsg := &coordinatorType.GetTaskSchema{
TaskID: taskID,
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeBatch),
TaskData: string(chunkProofsBytes),
}

View File

@@ -132,13 +132,14 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// here why need use UTC time. see scroll/common/databased/db.go
AssignedAt: utils.NowUTC(),
}
if err = cp.proverTaskOrm.SetProverTask(ctx, &proverTask); err != nil {
if err = cp.proverTaskOrm.InsertProverTask(ctx, &proverTask); err != nil {
cp.recoverProvingStatus(ctx, chunkTask)
log.Error("db set session info fail", "task hash", chunkTask.Hash, "prover name", proverName.(string), "prover pubKey", publicKey.(string), "err", err)
log.Error("insert chunk prover task fail", "taskID", chunkTask.Hash, "publicKey", publicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := cp.formatProverTask(ctx, chunkTask.Hash)
taskMsg, err := cp.formatProverTask(ctx, &proverTask)
if err != nil {
cp.recoverProvingStatus(ctx, chunkTask)
log.Error("format prover task failure", "hash", chunkTask.Hash, "err", err)
@@ -150,11 +151,11 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return taskMsg, nil
}
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, hash string) (*coordinatorType.GetTaskSchema, error) {
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
// Get block hashes.
wrappedBlocks, wrappedErr := cp.blockOrm.GetL2BlocksByChunkHash(ctx, hash)
wrappedBlocks, wrappedErr := cp.blockOrm.GetL2BlocksByChunkHash(ctx, task.TaskID)
if wrappedErr != nil || len(wrappedBlocks) == 0 {
return nil, fmt.Errorf("failed to fetch wrapped blocks, batch hash:%s err:%w", hash, wrappedErr)
return nil, fmt.Errorf("failed to fetch wrapped blocks, chunk hash:%s err:%w", task.TaskID, wrappedErr)
}
blockHashes := make([]common.Hash, len(wrappedBlocks))
@@ -167,11 +168,12 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, hash string) (*
}
blockHashesBytes, err := json.Marshal(taskDetail)
if err != nil {
return nil, fmt.Errorf("failed to marshal block hashes hash:%s, err:%w", hash, err)
return nil, fmt.Errorf("failed to marshal block hashes hash:%s, err:%w", task.TaskID, err)
}
proverTaskSchema := &coordinatorType.GetTaskSchema{
TaskID: hash,
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeChunk),
TaskData: string(blockHashesBytes),
}

View File

@@ -57,10 +57,6 @@ func (b *BaseProverTask) checkAttemptsExceeded(hash string, taskType message.Pro
log.Error("failed to update batch proving_status as failed", "msg.ID", hash, "error", err)
}
}
// update the prover task status to let timeout checker don't check it.
if err := b.proverTaskOrm.UpdateAllProverTaskProvingStatusOfTaskID(b.ctx, message.ProofType(proverTasks[0].TaskType), hash, types.ProverProofInvalid, tx); err != nil {
log.Error("failed to update prover task proving_status as failed", "msg.ID", hash, "error", err)
}
return nil
})
if transErr == nil {

View File

@@ -135,10 +135,21 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
return fmt.Errorf("get ProverVersion from context failed")
}
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndProver(ctx, proofMsg.Type, proofMsg.ID, pk, pv)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
var proverTask *orm.ProverTask
var err error
if proofParameter.UUID != "" {
proverTask, err = m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx, proofParameter.UUID, pk)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "uuid", proofParameter.UUID, "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
}
} else {
// TODO When prover all have upgrade, need delete this logic
proverTask, err = m.proverTaskOrm.GetAssignedProverTaskByTaskIDAndProver(ctx, proofMsg.Type, proofMsg.ID, pk, pv)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
}
}
proofTime := time.Since(proverTask.CreatedAt)
@@ -163,7 +174,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
if verifyErr != nil || !success {
m.verifierFailureTotal.WithLabelValues(pv).Inc()
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
m.proofRecover(ctx, proverTask, proofMsg)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
@@ -179,9 +190,9 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec)
if err := m.closeProofTask(ctx, proofMsg.ID, pk, proofMsg, proofTimeSec); err != nil {
if err := m.closeProofTask(ctx, proverTask, proofMsg, proofTimeSec); err != nil {
m.proofSubmitFailure.Inc()
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
m.proofRecover(ctx, proverTask, proofMsg)
return ErrCoordinatorInternalFailure
}
@@ -239,14 +250,14 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
// Verify if the proving task has already been assigned to another prover.
// Upon receiving an error message, it's possible the proving status has been reset by another prover
// and the task has been reassigned. In this case, the coordinator should avoid resetting the proving status.
m.processProverErr(ctx, proofMsg.ID, pk, proofMsg.Type)
m.processProverErr(ctx, proverTask)
m.validateFailureProverTaskStatusNotOk.Inc()
log.Info("proof generated by prover failed",
"taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName,
"proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType,
"failureMessage", "failureMessage", failureMsg)
"failureMessage", failureMsg)
return ErrValidatorFailureProofMsgStatusNotOk
}
@@ -259,7 +270,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
}
// store the proof to prover task
if updateTaskProofErr := m.updateProverTaskProof(ctx, pk, proofMsg); updateTaskProofErr != nil {
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofMsg); updateTaskProofErr != nil {
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk,
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr)
}
@@ -274,28 +285,28 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
return nil
}
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
log.Info("proof recover update proof status", "hash", hash, "proverPublicKey", pubKey,
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg) {
log.Info("proof recover update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey,
"taskType", proofMsg.Type.String(), "status", types.ProvingTaskUnassigned.String())
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskUnassigned, 0); err != nil {
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", hash, "pubKey", pubKey, "error", err)
if err := m.updateProofStatus(ctx, proverTask, proofMsg, types.ProvingTaskUnassigned, 0); err != nil {
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", proverTask.TaskID, "pubKey", proverTask.ProverPublicKey, "error", err)
}
}
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg, proofTimeSec uint64) error {
log.Info("proof close task update proof status", "hash", hash, "proverPublicKey", pubKey,
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg, proofTimeSec uint64) error {
log.Info("proof close task update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey,
"taskType", proofMsg.Type.String(), "status", types.ProvingTaskVerified.String())
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskVerified, proofTimeSec); err != nil {
log.Error("failed to updated proof status ProvingTaskVerified", "hash", hash, "proverPublicKey", pubKey, "error", err)
if err := m.updateProofStatus(ctx, proverTask, proofMsg, types.ProvingTaskVerified, proofTimeSec); err != nil {
log.Error("failed to updated proof status ProvingTaskVerified", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "error", err)
return err
}
return nil
}
// UpdateProofStatus update the chunk/batch task and session info status
func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string, proverPublicKey string, proofMsg *message.ProofMsg, status types.ProvingStatus, proofTimeSec uint64) error {
func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg, status types.ProvingStatus, proofTimeSec uint64) error {
var proverTaskStatus types.ProverProveStatus
switch status {
case types.ProvingTaskFailed, types.ProvingTaskUnassigned:
@@ -305,13 +316,13 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string,
}
err := m.db.Transaction(func(tx *gorm.DB) error {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proofMsg.Type, hash, proverPublicKey, proverTaskStatus, tx); updateErr != nil {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proverTask.UUID, proverTaskStatus, tx); updateErr != nil {
return updateErr
}
// if the block batch has proof verified, so the failed status not update block batch proving status
if m.checkIsTaskSuccess(ctx, hash, proofMsg.Type) {
log.Info("update proof status skip because this chunk / batch has been verified", "hash", hash, "public key", proverPublicKey)
if m.checkIsTaskSuccess(ctx, proverTask.TaskID, proofMsg.Type) {
log.Info("update proof status skip because this chunk / batch has been verified", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey)
return nil
}
@@ -324,20 +335,20 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string,
storeProofErr = m.batchOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.BatchProof, proofTimeSec, tx)
}
if storeProofErr != nil {
log.Error("failed to store chunk/batch proof into db", "hash", hash, "public key", proverPublicKey, "error", storeProofErr)
log.Error("failed to store chunk/batch proof into db", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey, "error", storeProofErr)
return storeProofErr
}
}
switch proofMsg.Type {
case message.ProofTypeChunk:
if err := m.chunkOrm.UpdateProvingStatus(ctx, hash, status, tx); err != nil {
log.Error("failed to update chunk proving_status as failed", "msg.ID", hash, "error", err)
if err := m.chunkOrm.UpdateProvingStatus(ctx, proverTask.TaskID, status, tx); err != nil {
log.Error("failed to update chunk proving_status as failed", "hash", proverTask.TaskID, "error", err)
return err
}
case message.ProofTypeBatch:
if err := m.batchOrm.UpdateProvingStatus(ctx, hash, status, tx); err != nil {
log.Error("failed to update batch proving_status as failed", "msg.ID", hash, "error", err)
if err := m.batchOrm.UpdateProvingStatus(ctx, proverTask.TaskID, status, tx); err != nil {
log.Error("failed to update batch proving_status as failed", "hash", proverTask.TaskID, "error", err)
return err
}
}
@@ -349,7 +360,7 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string,
}
if status == types.ProvingTaskVerified && proofMsg.Type == message.ProofTypeChunk {
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, hash); checkReadyErr != nil {
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, proverTask.TaskID); checkReadyErr != nil {
log.Error("failed to check are all chunk proofs ready", "error", checkReadyErr)
return checkReadyErr
}
@@ -378,14 +389,15 @@ func (m *ProofReceiverLogic) checkIsTaskSuccess(ctx context.Context, hash string
return provingStatus == types.ProvingTaskVerified
}
func (m *ProofReceiverLogic) processProverErr(ctx context.Context, taskID, pk string, taskType message.ProofType) {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, taskType, taskID, pk, types.ProverProofInvalid); updateErr != nil {
log.Error("update prover task proving status failure", "taskID", taskID, "proverPublicKey", pk, "taskType", taskType, "error", updateErr)
func (m *ProofReceiverLogic) processProverErr(ctx context.Context, proverTask *orm.ProverTask) {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proverTask.UUID, types.ProverProofInvalid); updateErr != nil {
log.Error("update prover task proving status failure", "uuid", proverTask.UUID, "taskID", proverTask.TaskID, "proverPublicKey",
proverTask.ProverPublicKey, "taskType", message.ProofType(proverTask.TaskType).String(), "error", updateErr)
}
proverTasks, err := m.proverTaskOrm.GetAssignedTaskOfOtherProvers(ctx, taskType, taskID, pk)
proverTasks, err := m.proverTaskOrm.GetAssignedTaskOfOtherProvers(ctx, message.ProofType(proverTask.TaskType), proverTask.TaskID, proverTask.ProverPublicKey)
if err != nil {
log.Warn("checkIsAssignedToOtherProver failure", "taskID", taskID, "proverPublicKey", pk, "taskType", taskType, "error", err)
log.Warn("checkIsAssignedToOtherProver failure", "taskID", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "taskType", message.ProofType(proverTask.TaskType).String(), "error", err)
return
}
@@ -393,19 +405,19 @@ func (m *ProofReceiverLogic) processProverErr(ctx context.Context, taskID, pk st
return
}
switch taskType {
switch message.ProofType(proverTask.TaskType) {
case message.ProofTypeChunk:
if err := m.chunkOrm.UpdateProvingStatusFromProverError(ctx, taskID, types.ProvingTaskUnassigned); err != nil {
log.Error("failed to update chunk proving_status as failed", taskID, "proverPublicKey", pk, "taskType", taskType, "error", err)
if err := m.chunkOrm.UpdateProvingStatusFromProverError(ctx, proverTask.TaskID, types.ProvingTaskUnassigned); err != nil {
log.Error("failed to update chunk proving_status as failed", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "taskType", message.ProofType(proverTask.TaskType).String(), "error", err)
}
case message.ProofTypeBatch:
if err := m.batchOrm.UpdateProvingStatusFromProverError(ctx, taskID, types.ProvingTaskUnassigned); err != nil {
log.Error("failed to update batch proving_status as failed", taskID, "proverPublicKey", pk, "taskType", taskType, "error", err)
if err := m.batchOrm.UpdateProvingStatusFromProverError(ctx, proverTask.TaskID, types.ProvingTaskUnassigned); err != nil {
log.Error("failed to update batch proving_status as failed", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "taskType", message.ProofType(proverTask.TaskType).String(), "error", err)
}
}
}
func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, pk string, proofMsg *message.ProofMsg) error {
func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg) error {
// store the proof to prover task
var proofBytes []byte
var marshalErr error
@@ -419,5 +431,5 @@ func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, pk strin
if len(proofBytes) == 0 || marshalErr != nil {
return fmt.Errorf("updateProverTaskProof marshal proof error:%w", marshalErr)
}
return m.proverTaskOrm.UpdateProverTaskProof(ctx, proofMsg.Type, proofMsg.ID, pk, proofBytes)
return m.proverTaskOrm.UpdateProverTaskProof(ctx, proverTask.UUID, proofBytes)
}

View File

@@ -234,7 +234,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
var totalL1CommitGas uint64
for _, block := range chunk.Blocks {
totalL2TxGas += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalL2TxNum += block.NumL2Transactions()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas += block.EstimateL1CommitGas()
}
@@ -306,15 +306,8 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
func (o *Chunk) UpdateProvingStatusFromProverError(ctx context.Context, hash string, status types.ProvingStatus) error {
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
updateFields["prover_assigned_at"] = nil
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash", hash).Where("proving_status", types.ProvingTaskAssigned)

View File

@@ -77,12 +77,13 @@ func TestProverTaskOrm(t *testing.T) {
AssignedAt: utils.NowUTC(),
}
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
err = proverTaskOrm.InsertProverTask(context.Background(), &proverTask)
assert.NoError(t, err)
proverTasks, err := proverTaskOrm.GetProverTasksByHashes(context.Background(), message.ProofTypeChunk, []string{"test-hash"})
assert.NoError(t, err)
assert.Equal(t, 1, len(proverTasks))
assert.Equal(t, proverTask.ProverName, proverTasks[0].ProverName)
assert.NotEqual(t, proverTask.UUID.String(), "00000000-0000-0000-0000-000000000000")
// test decimal reward, get reward
resultReward := proverTasks[0].Reward.BigInt()
@@ -91,12 +92,8 @@ func TestProverTaskOrm(t *testing.T) {
proverTask.ProvingStatus = int16(types.ProverProofValid)
proverTask.AssignedAt = utils.NowUTC()
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
assert.NoError(t, err)
proverTasks, err = proverTaskOrm.GetProverTasksByHashes(context.Background(), message.ProofTypeChunk, []string{"test-hash"})
assert.NoError(t, err)
assert.Equal(t, 1, len(proverTasks))
assert.Equal(t, proverTask.ProvingStatus, proverTasks[0].ProvingStatus)
err = proverTaskOrm.InsertProverTask(context.Background(), &proverTask)
assert.Error(t, err)
}
func TestProverTaskOrmUint256(t *testing.T) {
@@ -117,8 +114,9 @@ func TestProverTaskOrmUint256(t *testing.T) {
AssignedAt: utils.NowUTC(),
}
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
err = proverTaskOrm.InsertProverTask(context.Background(), &proverTask)
assert.NoError(t, err)
assert.NotEqual(t, proverTask.UUID.String(), "00000000-0000-0000-0000-000000000000")
proverTasksUint256, err := proverTaskOrm.GetProverTasksByHashes(context.Background(), message.ProofTypeChunk, []string{"test-hash"})
assert.NoError(t, err)
assert.Equal(t, 1, len(proverTasksUint256))

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"time"
"github.com/google/uuid"
"github.com/shopspring/decimal"
"gorm.io/gorm"
"gorm.io/gorm/clause"
@@ -18,7 +19,8 @@ import (
type ProverTask struct {
db *gorm.DB `gorm:"column:-"`
ID int64 `json:"id" gorm:"column:id"`
ID int64 `json:"id" gorm:"column:id"`
UUID uuid.UUID `json:"uuid" gorm:"column:uuid;type:uuid;default:gen_random_uuid()"`
// prover
ProverPublicKey string `json:"prover_public_key" gorm:"column:prover_public_key"`
@@ -114,14 +116,16 @@ func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, taskType messag
return proverTasks, nil
}
// GetProverTaskByTaskIDAndProver get prover task taskID and public key
func (o *ProverTask) GetProverTaskByTaskIDAndProver(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey, proverVersion string) (*ProverTask, error) {
// GetAssignedProverTaskByTaskIDAndProver get prover task taskID and public key
// TODO: when prover all upgrade need DEPRECATED this function
func (o *ProverTask) GetAssignedProverTaskByTaskIDAndProver(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey, proverVersion string) (*ProverTask, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type", int(taskType))
db = db.Where("task_id", taskID)
db = db.Where("prover_public_key", proverPublicKey)
db = db.Where("prover_version", proverVersion)
db = db.Where("proving_status", types.ProverAssigned)
var proverTask ProverTask
err := db.First(&proverTask).Error
@@ -131,6 +135,21 @@ func (o *ProverTask) GetProverTaskByTaskIDAndProver(ctx context.Context, taskTyp
return &proverTask, nil
}
// GetProverTaskByUUIDAndPublicKey get prover task taskID by uuid and public key
func (o *ProverTask) GetProverTaskByUUIDAndPublicKey(ctx context.Context, uuid, publicKey string) (*ProverTask, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("uuid", uuid)
db = db.Where("prover_public_key", publicKey)
var proverTask ProverTask
err := db.First(&proverTask).Error
if err != nil {
return nil, fmt.Errorf("ProverTask.GetProverTaskByUUID err:%w, uuid:%s publicKey:%s", err, uuid, publicKey)
}
return &proverTask, nil
}
// GetAssignedTaskOfOtherProvers get the chunk/batch task assigned other provers
func (o *ProverTask) GetAssignedTaskOfOtherProvers(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey string) ([]ProverTask, error) {
db := o.db.WithContext(ctx)
@@ -199,96 +218,59 @@ func (o *ProverTask) TaskTimeoutMoreThanOnce(ctx context.Context, taskType messa
return false
}
// SetProverTask updates or inserts a ProverTask record.
func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask, dbTX ...*gorm.DB) error {
// InsertProverTask insert a prover Task record
func (o *ProverTask) InsertProverTask(ctx context.Context, proverTask *ProverTask, dbTX ...*gorm.DB) error {
db := o.db.WithContext(ctx)
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.Clauses(clause.Returning{})
db = db.Model(&ProverTask{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}, {Name: "prover_version"}},
DoUpdates: clause.AssignmentColumns([]string{"proving_status", "failure_type", "assigned_at"}),
})
if err := db.Create(&proverTask).Error; err != nil {
return fmt.Errorf("ProverTask.SetProverTask error: %w, prover task: %v", err, proverTask)
if err := db.Create(proverTask).Error; err != nil {
return fmt.Errorf("ProverTask.InsertProverTask error: %w, prover task: %v", err, proverTask)
}
return nil
}
// UpdateProverTaskProof update the prover task's proof
func (o *ProverTask) UpdateProverTaskProof(ctx context.Context, proofType message.ProofType, taskID string, pk string, proof []byte) error {
func (o *ProverTask) UpdateProverTaskProof(ctx context.Context, uuid uuid.UUID, proof []byte) error {
db := o.db
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type = ? AND task_id = ? AND prover_public_key = ?", int(proofType), taskID, pk)
db = db.Where("uuid = ?", uuid)
if err := db.Update("proof", proof).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateProverTaskProof error: %w, proof type: %v, taskID: %v, prover public key: %v", err, proofType.String(), taskID, pk)
return fmt.Errorf("ProverTask.UpdateProverTaskProof error: %w, uuid: %v", err, uuid)
}
return nil
}
// UpdateProverTaskProvingStatus updates the proving_status of a specific ProverTask record.
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.ProverProveStatus, dbTX ...*gorm.DB) error {
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, uuid uuid.UUID, status types.ProverProveStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type = ? AND task_id = ? AND prover_public_key = ?", int(proofType), taskID, pk)
db = db.Where("uuid = ?", uuid)
if err := db.Update("proving_status", status).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateProverTaskProvingStatus error: %w, proof type: %v, taskID: %v, prover public key: %v, status: %v", err, proofType.String(), taskID, pk, status.String())
}
return nil
}
// UpdateAllProverTaskProvingStatusOfTaskID updates all the proving_status of a specific task id.
func (o *ProverTask) UpdateAllProverTaskProvingStatusOfTaskID(ctx context.Context, proofType message.ProofType, taskID string, status types.ProverProveStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type = ? AND task_id = ?", int(proofType), taskID)
if err := db.Update("proving_status", status).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateAllProverTaskProvingStatusOfTaskID error: %w, proof type: %v, taskID: %v, status: %v", err, proofType.String(), taskID, status.String())
return fmt.Errorf("ProverTask.UpdateProverTaskProvingStatus error: %w, uuid:%s, status: %v", err, uuid, status.String())
}
return nil
}
// UpdateProverTaskFailureType update the prover task failure type
func (o *ProverTask) UpdateProverTaskFailureType(ctx context.Context, proofType message.ProofType, taskID string, pk string, failureType types.ProverTaskFailureType, dbTX ...*gorm.DB) error {
func (o *ProverTask) UpdateProverTaskFailureType(ctx context.Context, uuid uuid.UUID, failureType types.ProverTaskFailureType, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_id", taskID).Where("prover_public_key", pk).Where("task_type", int(proofType))
db = db.Where("uuid", uuid)
if err := db.Update("failure_type", int(failureType)).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateProverTaskFailureType error: %w, proof type: %v, taskID: %v, prover public key: %v, failure type: %v", err, proofType.String(), taskID, pk, failureType.String())
}
return nil
}
// UpdateAllProverTaskFailureTypeOfTaskID update the prover task failure type
func (o *ProverTask) UpdateAllProverTaskFailureTypeOfTaskID(ctx context.Context, proofType message.ProofType, taskID string, failureType types.ProverTaskFailureType, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_id", taskID).Where("task_type", int(proofType))
if err := db.Update("failure_type", int(failureType)).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateAllProverTaskFailureTypeOfTaskID error: %w, proof type: %v, taskID: %v, failure type: %v", err, proofType.String(), taskID, failureType.String())
return fmt.Errorf("ProverTask.UpdateProverTaskFailureType error: %w, uuid:%s, failure type: %v", err, uuid.String(), failureType.String())
}
return nil
}

View File

@@ -9,6 +9,7 @@ type GetTaskParameter struct {
// GetTaskSchema the schema data return to prover for get prover task
type GetTaskSchema struct {
UUID string `json:"uuid"`
TaskID string `json:"task_id"`
TaskType int `json:"task_type"`
TaskData string `json:"task_data"`

View File

@@ -2,6 +2,8 @@ package types
// SubmitProofParameter the SubmitProof api request parameter
type SubmitProofParameter struct {
// TODO when prover have upgrade, need change this field to required
UUID string `form:"uuid" json:"uuid"`
TaskID string `form:"task_id" json:"task_id" binding:"required"`
TaskType int `form:"task_type" json:"task_type" binding:"required"`
Status int `form:"status" json:"status"`

View File

@@ -63,7 +63,7 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 8, int(cur))
assert.Equal(t, 11, int(cur))
}
func testMigrate(t *testing.T) {

View File

@@ -0,0 +1,16 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE prover_task ADD COLUMN uuid uuid DEFAULT gen_random_uuid() NOT NULL UNIQUE;
create index if not exists idx_uuid on prover_task (uuid) where deleted_at IS NULL;
ALTER TABLE prover_task DROP CONSTRAINT uk_tasktype_taskid_publickey_version;
drop index if exists uk_tasktype_taskid_publickey_version;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
create unique index if not exists uk_tasktype_taskid_publickey_version
on prover_task (task_type, task_id, prover_public_key, prover_version) where deleted_at IS NULL;
-- +goose StatementEnd

View File

@@ -0,0 +1,19 @@
-- +goose Up
-- +goose StatementBegin
create index if not exists idx_chunk_hash on chunk(hash, deleted_at) where deleted_at IS NULL;
create index if not exists idx_proving_status_end_block_number_index on chunk(index, end_block_number, proving_status, deleted_at) where deleted_at IS NULL;
create index if not exists idx_publickey_proving_status on prover_task(prover_public_key, proving_status, deleted_at, id) where deleted_at is null;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop index if exists idx_chunk_hash;
drop index if exists idx_proving_status_end_block_number_index;
drop index if exists idx_publickey_proving_status;
-- +goose StatementEnd

View File

@@ -0,0 +1,17 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE batch
ADD COLUMN total_l1_commit_gas BIGINT NOT NULL DEFAULT 0,
ADD COLUMN total_l1_commit_calldata_size INTEGER NOT NULL DEFAULT 0;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE IF EXISTS batch
DROP COLUMN total_l1_commit_gas,
DROP COLUMN total_l1_commit_calldata_size;
-- +goose StatementEnd

View File

@@ -786,6 +786,7 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=

View File

@@ -5,6 +5,7 @@ go 1.19
require (
github.com/appleboy/gin-jwt/v2 v2.9.1
github.com/gin-gonic/gin v1.9.1
github.com/google/uuid v1.3.0
github.com/stretchr/testify v1.8.3
gorm.io/gorm v1.25.2
)
@@ -57,7 +58,7 @@ require (
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
golang.org/x/arch v0.4.0 // indirect
golang.org/x/crypto v0.12.0 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/net v0.14.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/text v0.12.0 // indirect
golang.org/x/tools v0.11.0 // indirect

View File

@@ -64,6 +64,8 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
@@ -174,8 +176,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

View File

@@ -47,10 +47,10 @@ func insertSomeProverTasks(t *testing.T, db *gorm.DB) {
assert.NoError(t, migrate.ResetDB(sqlDB))
ptdb := orm.NewProverTask(db)
err = ptdb.SetProverTask(context.Background(), &task1)
err = ptdb.InsertProverTask(context.Background(), &task1)
assert.NoError(t, err)
err = ptdb.SetProverTask(context.Background(), &task2)
err = ptdb.InsertProverTask(context.Background(), &task2)
assert.NoError(t, err)
}

View File

@@ -9,11 +9,13 @@ import (
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/database/migrate"
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/database/migrate"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
)
var (
@@ -66,30 +68,32 @@ func TestProverTaskOrm(t *testing.T) {
reward.SetString("18446744073709551616", 10) // 1 << 64, uint64 maximum 1<<64 -1
proverTask := ProverTask{
TaskType: int16(message.ProofTypeChunk),
TaskID: "test-hash",
ProverName: "prover-0",
ProverPublicKey: "0",
ProvingStatus: int16(types.ProverAssigned),
Reward: decimal.NewFromBigInt(reward, 0),
AssignedAt: utils.NowUTC(),
}
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
err = proverTaskOrm.InsertProverTask(context.Background(), &proverTask)
assert.NoError(t, err)
getTask, err := proverTaskOrm.GetProverTasksByHash(context.Background(), "test-hash")
proverTasks, err := proverTaskOrm.GetProverTasksByHashes(context.Background(), message.ProofTypeChunk, []string{"test-hash"})
assert.NoError(t, err)
assert.Equal(t, proverTask.ProverName, getTask.ProverName)
assert.Equal(t, 1, len(proverTasks))
assert.Equal(t, proverTask.ProverName, proverTasks[0].ProverName)
assert.NotEqual(t, proverTask.UUID.String(), "00000000-0000-0000-0000-000000000000")
// test decimal reward, get reward
resultReward := getTask.Reward.BigInt()
resultReward := proverTasks[0].Reward.BigInt()
assert.Equal(t, resultReward, reward)
assert.Equal(t, resultReward.String(), "18446744073709551616")
proverTask.ProvingStatus = int16(types.ProverProofValid)
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
assert.NoError(t, err)
getTask, err = proverTaskOrm.GetProverTasksByHash(context.Background(), "test-hash")
assert.NoError(t, err)
assert.Equal(t, proverTask.ProvingStatus, getTask.ProvingStatus)
proverTask.AssignedAt = utils.NowUTC()
err = proverTaskOrm.InsertProverTask(context.Background(), &proverTask)
assert.Error(t, err)
}
func TestProverTaskOrmUint256(t *testing.T) {
@@ -101,18 +105,22 @@ func TestProverTaskOrmUint256(t *testing.T) {
rewardUint256 := big.NewInt(0)
rewardUint256.SetString("115792089237316195423570985008687907853269984665640564039457584007913129639935", 10)
proverTask := ProverTask{
TaskType: int16(message.ProofTypeChunk),
TaskID: "test-hash",
ProverName: "prover-0",
ProverPublicKey: "0",
ProvingStatus: int16(types.ProverAssigned),
Reward: decimal.NewFromBigInt(rewardUint256, 0),
AssignedAt: utils.NowUTC(),
}
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
err = proverTaskOrm.InsertProverTask(context.Background(), &proverTask)
assert.NoError(t, err)
proverTasksUint256, err := proverTaskOrm.GetProverTasksByHash(context.Background(), "test-hash")
assert.NotEqual(t, proverTask.UUID.String(), "00000000-0000-0000-0000-000000000000")
proverTasksUint256, err := proverTaskOrm.GetProverTasksByHashes(context.Background(), message.ProofTypeChunk, []string{"test-hash"})
assert.NoError(t, err)
resultRewardUint256 := proverTasksUint256.Reward.BigInt()
assert.Equal(t, 1, len(proverTasksUint256))
resultRewardUint256 := proverTasksUint256[0].Reward.BigInt()
assert.Equal(t, resultRewardUint256, rewardUint256)
assert.Equal(t, resultRewardUint256.String(), "115792089237316195423570985008687907853269984665640564039457584007913129639935")
}

View File

@@ -6,30 +6,41 @@ import (
"math/big"
"time"
"gorm.io/gorm/clause"
"github.com/google/uuid"
"github.com/shopspring/decimal"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"scroll-tech/common/types/message"
)
// ProverTask is assigned provers info of chunk/batch proof prover task
type ProverTask struct {
db *gorm.DB `gorm:"column:-"`
ID int64 `json:"id" gorm:"column:id"`
TaskID string `json:"task_id" gorm:"column:task_id"`
ProverPublicKey string `json:"prover_public_key" gorm:"column:prover_public_key"`
ProverName string `json:"prover_name" gorm:"column:prover_name"`
ProverVersion string `json:"prover_version" gorm:"column:prover_version"`
TaskType int16 `json:"task_type" gorm:"column:task_type;default:0"`
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:0"`
FailureType int16 `json:"failure_type" gorm:"column:failure_type;default:0"`
Reward decimal.Decimal `json:"reward" gorm:"column:reward;default:0;type:decimal(78)"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
AssignedAt time.Time `json:"assigned_at" gorm:"assigned_at"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at"`
ID int64 `json:"id" gorm:"column:id"`
UUID uuid.UUID `json:"uuid" gorm:"column:uuid;type:uuid;default:gen_random_uuid()"`
// prover
ProverPublicKey string `json:"prover_public_key" gorm:"column:prover_public_key"`
ProverName string `json:"prover_name" gorm:"column:prover_name"`
ProverVersion string `json:"prover_version" gorm:"column:prover_version"`
// task
TaskID string `json:"task_id" gorm:"column:task_id"`
TaskType int16 `json:"task_type" gorm:"column:task_type;default:0"`
// status
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:0"`
FailureType int16 `json:"failure_type" gorm:"column:failure_type;default:0"`
Reward decimal.Decimal `json:"reward" gorm:"column:reward;default:0;type:decimal(78)"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
AssignedAt time.Time `json:"assigned_at" gorm:"assigned_at"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at"`
}
// NewProverTask creates a new ProverTask instance.
@@ -85,21 +96,36 @@ func (o *ProverTask) GetProverTasksByHash(ctx context.Context, hash string) (*Pr
return proverTask, nil
}
// SetProverTask updates or inserts a ProverTask record.
func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask, dbTX ...*gorm.DB) error {
// GetProverTasksByHashes retrieves the ProverTask records associated with the specified hashes.
// The returned prover task objects are sorted in ascending order by their ids.
func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, taskType message.ProofType, hashes []string) ([]*ProverTask, error) {
if len(hashes) == 0 {
return nil, nil
}
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type", int(taskType))
db = db.Where("task_id IN ?", hashes)
db = db.Order("id asc")
var proverTasks []*ProverTask
if err := db.Find(&proverTasks).Error; err != nil {
return nil, fmt.Errorf("ProverTask.GetProverTasksByHashes error: %w, hashes: %v", err, hashes)
}
return proverTasks, nil
}
// InsertProverTask insert a prover Task record
func (o *ProverTask) InsertProverTask(ctx context.Context, proverTask *ProverTask, dbTX ...*gorm.DB) error {
db := o.db.WithContext(ctx)
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.Clauses(clause.Returning{})
db = db.Model(&ProverTask{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}, {Name: "prover_version"}},
DoUpdates: clause.AssignmentColumns([]string{"proving_status", "failure_type", "assigned_at"}),
})
if err := db.Create(&proverTask).Error; err != nil {
return fmt.Errorf("ProverTask.SetProverTask error: %w, prover task: %v", err, proverTask)
if err := db.Create(proverTask).Error; err != nil {
return fmt.Errorf("ProverTask.InsertProverTask error: %w, prover task: %v", err, proverTask)
}
return nil
}

View File

@@ -19,6 +19,7 @@ import (
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/common/utils"
"scroll-tech/prover-stats-api/internal/config"
"scroll-tech/prover-stats-api/internal/controller"
@@ -32,12 +33,19 @@ var (
)
var (
port = ":12990"
addr = fmt.Sprintf("http://localhost%s", port)
basicPath = fmt.Sprintf("%s/api/prover_task/v1", addr)
addr = utils.RandomURL()
basicPath = fmt.Sprintf("http://%s/api/prover_task/v1", addr)
token string
)
func mockHTTPServer(cfg *config.Config, db *gorm.DB) (*http.Server, error) {
// run Prover Stats APIs
router := gin.Default()
controller.InitController(db)
route.Route(router, cfg)
return utils.StartHTTPServer(addr, router)
}
func TestProverTaskAPIs(t *testing.T) {
// start database image
base := docker.NewDockerApp()
@@ -54,12 +62,9 @@ func TestProverTaskAPIs(t *testing.T) {
insertSomeProverTasks(t, db)
// run Prover Stats APIs
router := gin.Default()
controller.InitController(db)
route.Route(router, cfg)
go func() {
router.Run(port)
}()
srv, err := mockHTTPServer(cfg, db)
assert.NoError(t, err)
defer srv.Close()
t.Run("testRequestToken", testRequestToken)
t.Run("testGetProverTasksByProver", testGetProverTasksByProver)
@@ -142,9 +147,9 @@ func insertSomeProverTasks(t *testing.T, db *gorm.DB) {
assert.NoError(t, migrate.ResetDB(sqlDB))
ptdb := orm.NewProverTask(db)
err = ptdb.SetProverTask(context.Background(), &task1)
err = ptdb.InsertProverTask(context.Background(), &task1)
assert.NoError(t, err)
err = ptdb.SetProverTask(context.Background(), &task2)
err = ptdb.InsertProverTask(context.Background(), &task2)
assert.NoError(t, err)
}

View File

@@ -71,7 +71,7 @@ func (c *CoordinatorClient) Login(ctx context.Context) error {
Get("/coordinator/v1/challenge")
if err != nil {
return fmt.Errorf("get random string failed: %v", err)
return fmt.Errorf("get random string failed: %w", err)
}
if challengeResp.StatusCode() != 200 {
@@ -89,7 +89,7 @@ func (c *CoordinatorClient) Login(ctx context.Context) error {
err = authMsg.SignWithKey(c.priv)
if err != nil {
return fmt.Errorf("signature failed: %v", err)
return fmt.Errorf("signature failed: %w", err)
}
// Login to coordinator
@@ -117,7 +117,7 @@ func (c *CoordinatorClient) Login(ctx context.Context) error {
Post("/coordinator/v1/login")
if err != nil {
return fmt.Errorf("login failed: %v", err)
return fmt.Errorf("login failed: %w", err)
}
if loginResp.StatusCode() != 200 {
@@ -145,7 +145,7 @@ func (c *CoordinatorClient) GetTask(ctx context.Context, req *GetTaskRequest) (*
Post("/coordinator/v1/get_task")
if err != nil {
return nil, fmt.Errorf("request for GetTask failed: %v", err)
return nil, fmt.Errorf("request for GetTask failed: %w", err)
}
if resp.StatusCode() != 200 {
@@ -155,7 +155,7 @@ func (c *CoordinatorClient) GetTask(ctx context.Context, req *GetTaskRequest) (*
if result.ErrCode == types.ErrJWTTokenExpired {
log.Info("JWT expired, attempting to re-login")
if err := c.Login(ctx); err != nil {
return nil, fmt.Errorf("JWT expired, re-login failed: %v", err)
return nil, fmt.Errorf("JWT expired, re-login failed: %w", err)
}
log.Info("re-login success")
return c.GetTask(ctx, req)
@@ -178,19 +178,19 @@ func (c *CoordinatorClient) SubmitProof(ctx context.Context, req *SubmitProofReq
Post("/coordinator/v1/submit_proof")
if err != nil {
log.Error("submit proof request failed: %v", err)
log.Error("submit proof request failed", "error", err)
return fmt.Errorf("submit proof request failed: %w", ErrCoordinatorConnect)
}
if resp.StatusCode() != 200 {
log.Error("failed to submit proof, status code: %v", resp.StatusCode())
log.Error("failed to submit proof", "status code", resp.StatusCode())
return fmt.Errorf("failed to submit proof, status code not 200: %w", ErrCoordinatorConnect)
}
if result.ErrCode == types.ErrJWTTokenExpired {
log.Info("JWT expired, attempting to re-login")
if err := c.Login(ctx); err != nil {
log.Error("JWT expired, re-login failed: %v", err)
log.Error("JWT expired, re-login failed", "error", err)
return fmt.Errorf("JWT expired, re-login failed: %w", ErrCoordinatorConnect)
}
log.Info("re-login success")

Some files were not shown because too many files have changed in this diff Show More