mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
13 Commits
fix/prover
...
fix/chunk_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
06dd3f6100 | ||
|
|
aa6c216405 | ||
|
|
308f8f0502 | ||
|
|
e92e2d54ba | ||
|
|
a7870fc19c | ||
|
|
5dc77d98d9 | ||
|
|
158655dadb | ||
|
|
2350802af9 | ||
|
|
5e7dbbce71 | ||
|
|
49376db8e3 | ||
|
|
360f5ef72b | ||
|
|
a88d1428bc | ||
|
|
5f6e50d883 |
16
Makefile
16
Makefile
@@ -1,7 +1,5 @@
|
||||
.PHONY: check update dev_docker build_test_docker run_test_docker clean
|
||||
|
||||
L2GETH_TAG=scroll-v4.3.34
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
|
||||
@@ -17,14 +15,14 @@ lint: ## The code's format and security checks.
|
||||
|
||||
update: ## update dependencies
|
||||
go work sync
|
||||
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/bridge-history-api/ && go get -u github.com/ethereum/go-ethereum@latest && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
goimports -local $(PWD)/bridge/ -w .
|
||||
goimports -local $(PWD)/bridge-history-api/ -w .
|
||||
goimports -local $(PWD)/common/ -w .
|
||||
|
||||
@@ -95,9 +95,6 @@ func (b *BatchInfoFetcher) fetchBatchInfo() error {
|
||||
} else {
|
||||
startHeight = latestBatchHeight + 1
|
||||
}
|
||||
if startHeight < b.batchInfoStartNumber {
|
||||
startHeight = b.batchInfoStartNumber
|
||||
}
|
||||
for from := startHeight; number >= from; from += fetchLimit {
|
||||
to := from + fetchLimit - 1
|
||||
// number - confirmation can never less than 0 since the for loop condition
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -60,8 +59,8 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
registry := prometheus.DefaultRegisterer
|
||||
metrics.Server(ctx, registry.(*prometheus.Registry))
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
|
||||
@@ -73,12 +72,8 @@ func action(ctx *cli.Context) error {
|
||||
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
|
||||
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
|
||||
|
||||
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
|
||||
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
|
||||
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
|
||||
|
||||
go utils.Loop(subCtx, 10*time.Second, func() {
|
||||
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -62,8 +61,8 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
registry := prometheus.DefaultRegisterer
|
||||
metrics.Server(ctx, registry.(*prometheus.Registry))
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
|
||||
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
if err != nil {
|
||||
@@ -79,14 +78,14 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
|
||||
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
|
||||
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */, registry)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
@@ -60,10 +59,10 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
registry := prometheus.DefaultRegisterer
|
||||
metrics.Server(ctx, registry.(*prometheus.Registry))
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -63,8 +62,8 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
registry := prometheus.DefaultRegisterer
|
||||
metrics.Server(ctx, registry.(*prometheus.Registry))
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
|
||||
// Init l2geth connection
|
||||
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
@@ -74,26 +73,26 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis, registry)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis)
|
||||
if err != nil {
|
||||
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db, registry)
|
||||
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db)
|
||||
if err != nil {
|
||||
log.Error("failed to create chunkProposer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db, registry)
|
||||
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db)
|
||||
if err != nil {
|
||||
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
|
||||
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
|
||||
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
|
||||
|
||||
// Watcher loop to fetch missing blocks
|
||||
go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
|
||||
|
||||
@@ -5,7 +5,6 @@ go 1.19
|
||||
require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/stretchr/testify v1.8.3
|
||||
@@ -14,7 +13,6 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
@@ -22,7 +20,6 @@ require (
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||
@@ -38,12 +35,8 @@ require (
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.39.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
@@ -61,7 +54,7 @@ require (
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -2,8 +2,6 @@ github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0 h1:PDiKKybR596O6FHW+RVSG0Z7uGCBNbmbUXh3uCNQ7Hc=
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
@@ -33,14 +31,8 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
@@ -70,8 +62,11 @@ github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlT
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
@@ -82,8 +77,6 @@ github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APP
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
@@ -97,14 +90,6 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
@@ -155,7 +140,6 @@ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73r
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
@@ -173,13 +157,9 @@ golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
|
||||
@@ -6,13 +6,15 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
// not sure if this will make problems when relay with l1geth
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
@@ -21,6 +23,11 @@ import (
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
var (
|
||||
bridgeL1MsgsRelayedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsRelayedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
// Layer1Relayer is responsible for
|
||||
// 1. fetch pending L1Message from db
|
||||
// 2. relay pending message to layer 2 node
|
||||
@@ -47,18 +54,17 @@ type Layer1Relayer struct {
|
||||
|
||||
l1MessageOrm *orm.L1Message
|
||||
l1BlockOrm *orm.L1Block
|
||||
metrics *l1RelayerMetrics
|
||||
}
|
||||
|
||||
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
|
||||
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, reg prometheus.Registerer) (*Layer1Relayer, error) {
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey, "l1_relayer", "message_sender", reg)
|
||||
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new message sender failed for address %s, err: %v", addr.Hex(), err)
|
||||
}
|
||||
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l1_relayer", "gas_oracle_sender", reg)
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %v", addr.Hex(), err)
|
||||
@@ -80,7 +86,6 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
|
||||
}
|
||||
|
||||
l1Relayer := &Layer1Relayer{
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
l1MessageOrm: orm.NewL1Message(db),
|
||||
l1BlockOrm: orm.NewL1Block(db),
|
||||
@@ -95,9 +100,9 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
|
||||
|
||||
minGasPrice: minGasPrice,
|
||||
gasPriceDiff: gasPriceDiff,
|
||||
}
|
||||
|
||||
l1Relayer.metrics = initL1RelayerMetrics(reg)
|
||||
cfg: cfg,
|
||||
}
|
||||
|
||||
go l1Relayer.handleConfirmLoop(ctx)
|
||||
return l1Relayer, nil
|
||||
@@ -118,9 +123,7 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
|
||||
|
||||
for _, msg := range msgs {
|
||||
tmpMsg := msg
|
||||
r.metrics.bridgeL1RelayedMsgsTotal.Inc()
|
||||
if err = r.processSavedEvent(&tmpMsg); err != nil {
|
||||
r.metrics.bridgeL1RelayedMsgsFailureTotal.Inc()
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
|
||||
}
|
||||
@@ -142,6 +145,7 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bridgeL1MsgsRelayedTotalCounter.Inc(1)
|
||||
log.Info("relayMessage to layer2", "msg hash", msg.MsgHash, "tx hash", hash)
|
||||
|
||||
err = r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
|
||||
@@ -153,7 +157,6 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
|
||||
// ProcessGasPriceOracle imports gas price to layer2
|
||||
func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
r.metrics.bridgeL1RelayerGasPriceOraclerRunTotal.Inc()
|
||||
latestBlockHeight, err := r.l1BlockOrm.GetLatestL1BlockHeight(r.ctx)
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
|
||||
@@ -198,7 +201,6 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
return
|
||||
}
|
||||
r.lastGasPrice = block.BaseFee
|
||||
r.metrics.bridgeL1RelayerLastGasPrice.Set(float64(r.lastGasPrice))
|
||||
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee)
|
||||
}
|
||||
}
|
||||
@@ -210,7 +212,7 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case cfm := <-r.messageSender.ConfirmChan():
|
||||
r.metrics.bridgeL1MsgsRelayedConfirmedTotal.Inc()
|
||||
bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1)
|
||||
if !cfm.IsSuccessful {
|
||||
err := r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
@@ -226,7 +228,6 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
r.metrics.bridgeL1MsgsRelayedConfirmedTotal.Inc()
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type l1RelayerMetrics struct {
|
||||
bridgeL1RelayedMsgsTotal prometheus.Counter
|
||||
bridgeL1RelayedMsgsFailureTotal prometheus.Counter
|
||||
bridgeL1RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
bridgeL1RelayerLastGasPrice prometheus.Gauge
|
||||
bridgeL1MsgsRelayedConfirmedTotal prometheus.Counter
|
||||
bridgeL1GasOraclerConfirmedTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
initL1RelayerMetricOnce sync.Once
|
||||
l1RelayerMetric *l1RelayerMetrics
|
||||
)
|
||||
|
||||
func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
|
||||
initL1RelayerMetricOnce.Do(func() {
|
||||
l1RelayerMetric = &l1RelayerMetrics{
|
||||
bridgeL1RelayedMsgsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_msg_relayed_total",
|
||||
Help: "The total number of the l1 relayed message.",
|
||||
}),
|
||||
bridgeL1RelayedMsgsFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_msg_relayed_failure_total",
|
||||
Help: "The total number of the l1 relayed failure message.",
|
||||
}),
|
||||
bridgeL1MsgsRelayedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_relayed_confirmed_total",
|
||||
Help: "The total number of layer1 relayed confirmed",
|
||||
}),
|
||||
bridgeL1RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_gas_price_oracler_total",
|
||||
Help: "The total number of layer1 gas price oracler run total",
|
||||
}),
|
||||
bridgeL1RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_layer1_gas_price_latest_gas_price",
|
||||
Help: "The latest gas price of bridge relayer l1",
|
||||
}),
|
||||
bridgeL1GasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_gas_oracler_confirmed_total",
|
||||
Help: "The total number of layer1 relayed confirmed",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l1RelayerMetric
|
||||
}
|
||||
@@ -62,7 +62,7 @@ func setupL1RelayerDB(t *testing.T) *gorm.DB {
|
||||
func testCreateNewL1Relayer(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig, nil)
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
}
|
||||
@@ -72,7 +72,7 @@ func testL1RelayerProcessSaveEvents(t *testing.T) {
|
||||
defer database.CloseDB(db)
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
l1Cfg := cfg.L1Config
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
assert.NoError(t, l1MessageOrm.SaveL1Messages(context.Background(), templateL1Message))
|
||||
@@ -99,7 +99,7 @@ func testL1RelayerMsgConfirm(t *testing.T) {
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
@@ -138,7 +138,7 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
@@ -168,7 +168,7 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, l1Relayer)
|
||||
|
||||
|
||||
@@ -8,15 +8,16 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
@@ -25,6 +26,13 @@ import (
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
var (
|
||||
bridgeL2BatchesFinalizedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
// Layer2Relayer is responsible for
|
||||
// 1. Committing and finalizing L2 blocks on L1
|
||||
// 2. Relaying messages from L2 to L1
|
||||
@@ -70,30 +78,29 @@ type Layer2Relayer struct {
|
||||
// A list of processing batch finalization.
|
||||
// key(string): confirmation ID, value(string): batch hash.
|
||||
processingFinalization sync.Map
|
||||
|
||||
metrics *l2RelayerMetrics
|
||||
}
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool, reg prometheus.Registerer) (*Layer2Relayer, error) {
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey, "l2_relayer", "message_sender", reg)
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool) (*Layer2Relayer, error) {
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new message sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
|
||||
commitSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderPrivateKey, "l2_relayer", "commit_sender", reg)
|
||||
commitSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderPrivateKey)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.CommitSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new commit sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderPrivateKey, "l2_relayer", "finalize_sender", reg)
|
||||
|
||||
finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderPrivateKey)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.FinalizeSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new finalize sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l2_relayer", "gas_oracle_sender", reg)
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
@@ -151,7 +158,6 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
return nil, fmt.Errorf("failed to initialize and commit genesis batch, err: %v", err)
|
||||
}
|
||||
}
|
||||
layer2Relayer.metrics = initL2RelayerMetrics(reg)
|
||||
|
||||
go layer2Relayer.handleConfirmLoop(ctx)
|
||||
return layer2Relayer, nil
|
||||
@@ -269,7 +275,6 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
|
||||
// ProcessGasPriceOracle imports gas price to layer1
|
||||
func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
r.metrics.bridgeL2RelayerGasPriceOraclerRunTotal.Inc()
|
||||
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
|
||||
if batch == nil || err != nil {
|
||||
log.Error("Failed to GetLatestBatch", "batch", batch, "err", err)
|
||||
@@ -307,7 +312,6 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
return
|
||||
}
|
||||
r.lastGasPrice = suggestGasPriceUint64
|
||||
r.metrics.bridgeL2RelayerLastGasPrice.Set(float64(r.lastGasPrice))
|
||||
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
|
||||
}
|
||||
}
|
||||
@@ -322,7 +326,6 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
return
|
||||
}
|
||||
for _, batch := range pendingBatches {
|
||||
r.metrics.bridgeL2RelayerProcessPendingBatchTotal.Inc()
|
||||
// get current header and parent header.
|
||||
currentBatchHeader, err := types.DecodeBatchHeader(batch.BatchHeader)
|
||||
if err != nil {
|
||||
@@ -397,7 +400,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batch.Hash, "index", batch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
r.metrics.bridgeL2RelayerProcessPendingBatchSuccessTotal.Inc()
|
||||
bridgeL2BatchesCommittedTotalCounter.Inc(1)
|
||||
r.processingCommitment.Store(txID, batch.Hash)
|
||||
log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex())
|
||||
}
|
||||
@@ -421,8 +424,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
return
|
||||
}
|
||||
|
||||
r.metrics.bridgeL2RelayerProcessCommittedBatchesTotal.Inc()
|
||||
|
||||
batch := batches[0]
|
||||
hash := batch.Hash
|
||||
status := types.ProvingStatus(batch.ProvingStatus)
|
||||
@@ -436,7 +437,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
return
|
||||
case types.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "hash", hash)
|
||||
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
|
||||
|
||||
var parentBatchStateRoot string
|
||||
if batch.Index > 0 {
|
||||
@@ -495,6 +495,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
}
|
||||
return
|
||||
}
|
||||
bridgeL2BatchesFinalizedTotalCounter.Inc(1)
|
||||
log.Info("finalizeBatchWithProof in layer1", "index", batch.Index, "batch hash", batch.Hash, "tx hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
@@ -505,7 +506,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
"tx hash", finalizeTxHash.String(), "err", err)
|
||||
}
|
||||
r.processingFinalization.Store(txID, hash)
|
||||
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
|
||||
|
||||
case types.ProvingTaskFailed:
|
||||
// We were unable to prove this batch. There are two possibilities:
|
||||
@@ -550,7 +550,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
"batch hash", batchHash.(string),
|
||||
"tx hash", confirmation.TxHash.String(), "err", err)
|
||||
}
|
||||
r.metrics.bridgeL2BatchesCommittedConfirmedTotal.Inc()
|
||||
bridgeL2BatchesCommittedConfirmedTotalCounter.Inc(1)
|
||||
r.processingCommitment.Delete(confirmation.ID)
|
||||
}
|
||||
|
||||
@@ -572,7 +572,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
"batch hash", batchHash.(string),
|
||||
"tx hash", confirmation.TxHash.String(), "err", err)
|
||||
}
|
||||
r.metrics.bridgeL2BatchesFinalizedConfirmedTotal.Inc()
|
||||
bridgeL2BatchesFinalizedConfirmedTotalCounter.Inc(1)
|
||||
r.processingFinalization.Delete(confirmation.ID)
|
||||
}
|
||||
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
|
||||
@@ -590,7 +590,6 @@ func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
case confirmation := <-r.finalizeSender.ConfirmChan():
|
||||
r.handleConfirmation(confirmation)
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
r.metrics.bridgeL2BatchesGasOraclerConfirmedTotal.Inc()
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type l2RelayerMetrics struct {
|
||||
bridgeL2RelayerProcessPendingBatchTotal prometheus.Counter
|
||||
bridgeL2RelayerProcessPendingBatchSuccessTotal prometheus.Counter
|
||||
bridgeL2RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
bridgeL2RelayerLastGasPrice prometheus.Gauge
|
||||
bridgeL2RelayerProcessCommittedBatchesTotal prometheus.Counter
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal prometheus.Counter
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal prometheus.Counter
|
||||
bridgeL2BatchesCommittedConfirmedTotal prometheus.Counter
|
||||
bridgeL2BatchesFinalizedConfirmedTotal prometheus.Counter
|
||||
bridgeL2BatchesGasOraclerConfirmedTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
initL2RelayerMetricOnce sync.Once
|
||||
l2RelayerMetric *l2RelayerMetrics
|
||||
)
|
||||
|
||||
func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
|
||||
initL2RelayerMetricOnce.Do(func() {
|
||||
l2RelayerMetric = &l2RelayerMetrics{
|
||||
bridgeL2RelayerProcessPendingBatchTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_pending_batch_total",
|
||||
Help: "The total number of layer2 process pending batch",
|
||||
}),
|
||||
bridgeL2RelayerProcessPendingBatchSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_pending_batch_success_total",
|
||||
Help: "The total number of layer2 process pending success batch",
|
||||
}),
|
||||
bridgeL2RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_gas_price_oracler_total",
|
||||
Help: "The total number of layer2 gas price oracler run total",
|
||||
}),
|
||||
bridgeL2RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_layer2_gas_price_latest_gas_price",
|
||||
Help: "The latest gas price of bridge relayer l2",
|
||||
}),
|
||||
bridgeL2RelayerProcessCommittedBatchesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_total",
|
||||
Help: "The total number of layer2 process committed batches run total",
|
||||
}),
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_finalized_total",
|
||||
Help: "The total number of layer2 process committed batches finalized total",
|
||||
}),
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_finalized_success_total",
|
||||
Help: "The total number of layer2 process committed batches finalized success total",
|
||||
}),
|
||||
bridgeL2BatchesCommittedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_confirmed_total",
|
||||
Help: "The total number of layer2 process committed batches confirmed total",
|
||||
}),
|
||||
bridgeL2BatchesFinalizedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_finalized_batches_confirmed_total",
|
||||
Help: "The total number of layer2 process finalized batches confirmed total",
|
||||
}),
|
||||
bridgeL2BatchesGasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_gras_oracler_confirmed_total",
|
||||
Help: "The total number of layer2 process finalized batches confirmed total",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l2RelayerMetric
|
||||
}
|
||||
@@ -35,7 +35,7 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
|
||||
func testCreateNewRelayer(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, nil)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
}
|
||||
@@ -45,7 +45,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, nil)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
@@ -73,7 +73,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, nil)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
|
||||
@@ -114,7 +114,7 @@ func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
@@ -164,7 +164,7 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
@@ -221,7 +221,7 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
@@ -259,7 +259,7 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, nil)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
|
||||
|
||||
@@ -1,95 +0,0 @@
|
||||
package sender
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type senderMetrics struct {
|
||||
senderCheckBalancerTotal *prometheus.CounterVec
|
||||
senderCheckPendingTransactionTotal *prometheus.CounterVec
|
||||
sendTransactionTotal *prometheus.CounterVec
|
||||
sendTransactionFailureFullTx *prometheus.GaugeVec
|
||||
sendTransactionFailureRepeatTransaction *prometheus.CounterVec
|
||||
sendTransactionFailureGetFee *prometheus.CounterVec
|
||||
sendTransactionFailureSendTx *prometheus.CounterVec
|
||||
resubmitTransactionTotal *prometheus.CounterVec
|
||||
currentPendingTxsNum *prometheus.GaugeVec
|
||||
currentGasFeeCap *prometheus.GaugeVec
|
||||
currentGasTipCap *prometheus.GaugeVec
|
||||
currentGasPrice *prometheus.GaugeVec
|
||||
currentGasLimit *prometheus.GaugeVec
|
||||
currentNonce *prometheus.GaugeVec
|
||||
}
|
||||
|
||||
var (
|
||||
initSenderMetricOnce sync.Once
|
||||
sm *senderMetrics
|
||||
)
|
||||
|
||||
func initSenderMetrics(reg prometheus.Registerer) *senderMetrics {
|
||||
initSenderMetricOnce.Do(func() {
|
||||
sm = &senderMetrics{
|
||||
sendTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_send_transaction_total",
|
||||
Help: "The total number of sending transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureFullTx: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_send_transaction_full_tx_failure_total",
|
||||
Help: "The total number of sending transaction failure for full size tx.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureRepeatTransaction: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_send_transaction_repeat_transaction_failure_total",
|
||||
Help: "The total number of sending transaction failure for repeat transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureGetFee: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_send_transaction_get_fee_failure_total",
|
||||
Help: "The total number of sending transaction failure for getting fee.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureSendTx: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_send_transaction_send_tx_failure_total",
|
||||
Help: "The total number of sending transaction failure for sending tx.",
|
||||
}, []string{"service", "name"}),
|
||||
resubmitTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_send_transaction_resubmit_send_transaction_total",
|
||||
Help: "The total number of resubmit transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentPendingTxsNum: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_pending_tx_count",
|
||||
Help: "The pending tx count in the sender.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasFeeCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_gas_fee_cap",
|
||||
Help: "The gas fee of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasTipCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_gas_tip_cap",
|
||||
Help: "The gas tip of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasPrice: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_gas_price_cap",
|
||||
Help: "The gas price of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasLimit: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_gas_limit",
|
||||
Help: "The gas limit of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentNonce: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_nonce",
|
||||
Help: "The nonce of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
senderCheckPendingTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_check_pending_transaction_total",
|
||||
Help: "The total number of check pending transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
senderCheckBalancerTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_check_balancer_total",
|
||||
Help: "The total number of check balancer.",
|
||||
}, []string{"service", "name"}),
|
||||
}
|
||||
})
|
||||
|
||||
return sm
|
||||
}
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"time"
|
||||
|
||||
cmapV2 "github.com/orcaman/concurrent-map/v2"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
@@ -71,8 +70,6 @@ type Sender struct {
|
||||
client *ethclient.Client // The client to retrieve on chain data or send transaction.
|
||||
chainID *big.Int // The chain id of the endpoint
|
||||
ctx context.Context
|
||||
service string
|
||||
name string
|
||||
|
||||
auth *bind.TransactOpts
|
||||
minBalance *big.Int
|
||||
@@ -83,13 +80,11 @@ type Sender struct {
|
||||
confirmCh chan *Confirmation
|
||||
|
||||
stopCh chan struct{}
|
||||
|
||||
metrics *senderMetrics
|
||||
}
|
||||
|
||||
// NewSender returns a new instance of transaction sender
|
||||
// txConfirmationCh is used to notify confirmed transaction
|
||||
func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.PrivateKey, service, name string, reg prometheus.Registerer) (*Sender, error) {
|
||||
func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.PrivateKey) (*Sender, error) {
|
||||
client, err := ethclient.Dial(config.Endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to dial eth client, err: %w", err)
|
||||
@@ -140,10 +135,7 @@ func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.Pri
|
||||
baseFeePerGas: baseFeePerGas,
|
||||
pendingTxs: cmapV2.New[*PendingTransaction](),
|
||||
stopCh: make(chan struct{}),
|
||||
name: name,
|
||||
service: service,
|
||||
}
|
||||
sender.metrics = initSenderMetrics(reg)
|
||||
|
||||
go sender.loop(ctx)
|
||||
|
||||
@@ -191,15 +183,10 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
|
||||
|
||||
// SendTransaction send a signed L2tL1 transaction.
|
||||
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (common.Hash, error) {
|
||||
s.metrics.sendTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
if s.IsFull() {
|
||||
s.metrics.sendTransactionFailureFullTx.WithLabelValues(s.service, s.name).Set(1)
|
||||
return common.Hash{}, ErrFullPending
|
||||
}
|
||||
|
||||
s.metrics.sendTransactionFailureFullTx.WithLabelValues(s.service, s.name).Set(0)
|
||||
if ok := s.pendingTxs.SetIfAbsent(ID, nil); !ok {
|
||||
s.metrics.sendTransactionFailureRepeatTransaction.WithLabelValues(s.service, s.name).Inc()
|
||||
return common.Hash{}, fmt.Errorf("repeat transaction ID: %s", ID)
|
||||
}
|
||||
|
||||
@@ -216,12 +203,9 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
|
||||
}()
|
||||
|
||||
if feeData, err = s.getFeeData(s.auth, target, value, data, minGasLimit); err != nil {
|
||||
s.metrics.sendTransactionFailureGetFee.WithLabelValues(s.service, s.name).Inc()
|
||||
return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err)
|
||||
}
|
||||
|
||||
if tx, err = s.createAndSendTx(s.auth, feeData, target, value, data, nil); err != nil {
|
||||
s.metrics.sendTransactionFailureSendTx.WithLabelValues(s.service, s.name).Inc()
|
||||
return common.Hash{}, fmt.Errorf("failed to create and send transaction, err: %w", err)
|
||||
}
|
||||
|
||||
@@ -310,20 +294,6 @@ func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, targ
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if feeData.gasTipCap != nil {
|
||||
s.metrics.currentGasTipCap.WithLabelValues(s.service, s.name).Set(float64(feeData.gasTipCap.Uint64()))
|
||||
}
|
||||
|
||||
if feeData.gasFeeCap != nil {
|
||||
s.metrics.currentGasFeeCap.WithLabelValues(s.service, s.name).Set(float64(feeData.gasFeeCap.Uint64()))
|
||||
}
|
||||
|
||||
if feeData.gasPrice != nil {
|
||||
s.metrics.currentGasPrice.WithLabelValues(s.service, s.name).Set(float64(feeData.gasPrice.Uint64()))
|
||||
}
|
||||
|
||||
s.metrics.currentGasLimit.WithLabelValues(s.service, s.name).Set(float64(feeData.gasLimit))
|
||||
|
||||
// update nonce when it is not from resubmit
|
||||
if overrideNonce == nil {
|
||||
auth.Nonce = big.NewInt(int64(nonce + 1))
|
||||
@@ -392,7 +362,6 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
}
|
||||
|
||||
nonce := tx.Nonce()
|
||||
s.metrics.resubmitTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
return s.createAndSendTx(auth, feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
|
||||
}
|
||||
|
||||
@@ -503,7 +472,6 @@ func (s *Sender) loop(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-checkTick.C:
|
||||
s.metrics.senderCheckPendingTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
header, err := s.client.HeaderByNumber(s.ctx, nil)
|
||||
if err != nil {
|
||||
log.Error("failed to get latest head", "err", err)
|
||||
@@ -518,7 +486,6 @@ func (s *Sender) loop(ctx context.Context) {
|
||||
|
||||
s.checkPendingTransaction(header, confirmed)
|
||||
case <-checkBalanceTicker.C:
|
||||
s.metrics.senderCheckBalancerTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
// Check and set balance.
|
||||
if err := s.checkBalance(ctx); err != nil {
|
||||
log.Error("check balance error", "err", err)
|
||||
|
||||
@@ -70,7 +70,7 @@ func testNewSender(t *testing.T) {
|
||||
// exit by Stop()
|
||||
cfgCopy1 := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy1.TxType = txType
|
||||
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey, "test", "test", nil)
|
||||
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey)
|
||||
assert.NoError(t, err)
|
||||
newSender1.Stop()
|
||||
|
||||
@@ -78,7 +78,7 @@ func testNewSender(t *testing.T) {
|
||||
cfgCopy2 := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy2.TxType = txType
|
||||
subCtx, cancel := context.WithCancel(context.Background())
|
||||
_, err = NewSender(subCtx, &cfgCopy2, privateKey, "test", "test", nil)
|
||||
_, err = NewSender(subCtx, &cfgCopy2, privateKey)
|
||||
assert.NoError(t, err)
|
||||
cancel()
|
||||
}
|
||||
@@ -90,7 +90,7 @@ func testPendLimit(t *testing.T) {
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
cfgCopy.PendingLimit = 2
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 0; i < 2*newSender.PendingLimit(); i++ {
|
||||
@@ -107,7 +107,7 @@ func testMinGasLimit(t *testing.T) {
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
client, err := ethclient.Dial(cfgCopy.Endpoint)
|
||||
@@ -135,7 +135,7 @@ func testResubmitTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey)
|
||||
assert.NoError(t, err)
|
||||
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
feeData, err := s.getFeeData(s.auth, &common.Address{}, big.NewInt(0), nil, 0)
|
||||
@@ -151,7 +151,7 @@ func testResubmitTransactionWithRisingBaseFee(t *testing.T) {
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey)
|
||||
assert.NoError(t, err)
|
||||
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
s.baseFeePerGas = 1000
|
||||
@@ -186,7 +186,7 @@ func testCheckPendingTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
header := &types.Header{Number: big.NewInt(100), BaseFee: big.NewInt(100)}
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -31,20 +29,10 @@ type BatchProposer struct {
|
||||
minChunkNumPerBatch uint64
|
||||
batchTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
|
||||
batchProposerCircleTotal prometheus.Counter
|
||||
proposeBatchFailureTotal prometheus.Counter
|
||||
proposeBatchUpdateInfoTotal prometheus.Counter
|
||||
proposeBatchUpdateInfoFailureTotal prometheus.Counter
|
||||
totalL1CommitGas prometheus.Gauge
|
||||
totalL1CommitCalldataSize prometheus.Gauge
|
||||
batchChunksNum prometheus.Gauge
|
||||
batchFirstChunkTimeoutReached prometheus.Counter
|
||||
batchChunksSuperposeNotEnoughTotal prometheus.Counter
|
||||
}
|
||||
|
||||
// NewBatchProposer creates a new BatchProposer instance.
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB) *BatchProposer {
|
||||
return &BatchProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
@@ -57,63 +45,22 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
|
||||
minChunkNumPerBatch: cfg.MinChunkNumPerBatch,
|
||||
batchTimeoutSec: cfg.BatchTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
|
||||
batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_circle_total",
|
||||
Help: "Total number of propose batch total.",
|
||||
}),
|
||||
proposeBatchFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_failure_circle_total",
|
||||
Help: "Total number of propose batch total.",
|
||||
}),
|
||||
proposeBatchUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_update_info_total",
|
||||
Help: "Total number of propose batch update info total.",
|
||||
}),
|
||||
proposeBatchUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_update_info_failure_total",
|
||||
Help: "Total number of propose batch update info failure total.",
|
||||
}),
|
||||
totalL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_batch_total_l1_commit_gas",
|
||||
Help: "The total l1 commit gas",
|
||||
}),
|
||||
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_batch_total_l1_call_data_size",
|
||||
Help: "The total l1 call data size",
|
||||
}),
|
||||
batchChunksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_batch_chunks_number",
|
||||
Help: "The number of chunks in the batch",
|
||||
}),
|
||||
batchFirstChunkTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_first_chunk_timeout_reached_total",
|
||||
Help: "Total times of batch's first chunk timeout reached",
|
||||
}),
|
||||
batchChunksSuperposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_chunks_superpose_not_enough_total",
|
||||
Help: "Total number of batch chunk superpose not enough",
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// TryProposeBatch tries to propose a new batches.
|
||||
func (p *BatchProposer) TryProposeBatch() {
|
||||
p.batchProposerCircleTotal.Inc()
|
||||
dbChunks, err := p.proposeBatchChunks()
|
||||
if err != nil {
|
||||
p.proposeBatchFailureTotal.Inc()
|
||||
log.Error("proposeBatchChunks failed", "err", err)
|
||||
return
|
||||
}
|
||||
if err := p.updateBatchInfoInDB(dbChunks); err != nil {
|
||||
p.proposeBatchUpdateInfoFailureTotal.Inc()
|
||||
log.Error("update batch info in db failed", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
|
||||
p.proposeBatchUpdateInfoTotal.Inc()
|
||||
numChunks := len(dbChunks)
|
||||
if numChunks <= 0 {
|
||||
return nil
|
||||
@@ -130,12 +77,10 @@ func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
|
||||
err = p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, startChunkIndex, endChunkIndex, startChunkHash, endChunkHash, chunks, dbTX)
|
||||
if dbErr != nil {
|
||||
log.Warn("BatchProposer.updateBatchInfoInDB insert batch failure", "error", "start chunk index", startChunkIndex, "end chunk index", endChunkIndex, dbErr)
|
||||
return dbErr
|
||||
}
|
||||
dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, startChunkIndex, endChunkIndex, batch.Hash, dbTX)
|
||||
if dbErr != nil {
|
||||
log.Warn("BatchProposer.UpdateBatchHashInRange update the chunk's batch hash failure", "hash", batch.Hash, "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
return nil
|
||||
@@ -187,7 +132,6 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
// batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
|
||||
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
|
||||
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
// Check if the first chunk breaks hard limits.
|
||||
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
|
||||
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
|
||||
@@ -200,7 +144,6 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
)
|
||||
}
|
||||
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
|
||||
return nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
|
||||
@@ -231,8 +174,6 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
}
|
||||
}
|
||||
|
||||
p.batchChunksNum.Set(float64(len(dbChunks)))
|
||||
|
||||
var hasChunkTimeout bool
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
|
||||
@@ -242,14 +183,12 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
"chunk outdated time threshold", currentTimeSec,
|
||||
)
|
||||
hasChunkTimeout = true
|
||||
p.batchFirstChunkTimeoutReached.Inc()
|
||||
}
|
||||
|
||||
if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch {
|
||||
log.Warn("The chunk number of the batch is less than the minimum limit",
|
||||
"chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch,
|
||||
)
|
||||
p.batchChunksSuperposeNotEnoughTotal.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
return dbChunks, nil
|
||||
|
||||
@@ -30,7 +30,7 @@ func testBatchProposer(t *testing.T) {
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db, nil)
|
||||
}, db)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
@@ -39,7 +39,7 @@ func testBatchProposer(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
MinChunkNumPerBatch: 1,
|
||||
BatchTimeoutSec: 300,
|
||||
}, db, nil)
|
||||
}, db)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
@@ -59,23 +57,10 @@ type ChunkProposer struct {
|
||||
maxRowConsumptionPerChunk uint64
|
||||
chunkTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
|
||||
chunkProposerCircleTotal prometheus.Counter
|
||||
proposeChunkFailureTotal prometheus.Counter
|
||||
proposeChunkUpdateInfoTotal prometheus.Counter
|
||||
proposeChunkUpdateInfoFailureTotal prometheus.Counter
|
||||
chunkL2TxNum prometheus.Gauge
|
||||
chunkEstimateL1CommitGas prometheus.Gauge
|
||||
totalL1CommitCalldataSize prometheus.Gauge
|
||||
totalTxGasUsed prometheus.Gauge
|
||||
maxTxConsumption prometheus.Gauge
|
||||
chunkBlocksNum prometheus.Gauge
|
||||
chunkBlockTimeoutReached prometheus.Counter
|
||||
chunkBlocksSuperposeNotEnoughTotal prometheus.Counter
|
||||
}
|
||||
|
||||
// NewChunkProposer creates a new ChunkProposer instance.
|
||||
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
|
||||
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB) *ChunkProposer {
|
||||
return &ChunkProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
@@ -89,70 +74,18 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
|
||||
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
|
||||
chunkTimeoutSec: cfg.ChunkTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
|
||||
chunkProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_circle_total",
|
||||
Help: "Total number of propose chunk total.",
|
||||
}),
|
||||
proposeChunkFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_failure_circle_total",
|
||||
Help: "Total number of propose chunk failure total.",
|
||||
}),
|
||||
proposeChunkUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_update_info_total",
|
||||
Help: "Total number of propose chunk update info total.",
|
||||
}),
|
||||
proposeChunkUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_update_info_failure_total",
|
||||
Help: "Total number of propose chunk update info failure total.",
|
||||
}),
|
||||
chunkL2TxNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_l2_tx_num",
|
||||
Help: "The chunk l2 tx num",
|
||||
}),
|
||||
chunkEstimateL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_estimate_l1_commit_gas",
|
||||
Help: "The chunk estimate l1 commit gas",
|
||||
}),
|
||||
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_total_l1_commit_call_data_size",
|
||||
Help: "The total l1 commit call data size",
|
||||
}),
|
||||
totalTxGasUsed: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_total_tx_gas_used",
|
||||
Help: "The total tx gas used",
|
||||
}),
|
||||
maxTxConsumption: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_max_tx_consumption",
|
||||
Help: "The max tx consumption",
|
||||
}),
|
||||
chunkBlocksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_chunk_block_number",
|
||||
Help: "The number of blocks in the chunk",
|
||||
}),
|
||||
chunkBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_first_block_timeout_reached_total",
|
||||
Help: "Total times of chunk's first block timeout reached",
|
||||
}),
|
||||
chunkBlocksSuperposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_blocks_superpose_not_enough_total",
|
||||
Help: "Total number of chunk block superpose not enough",
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// TryProposeChunk tries to propose a new chunk.
|
||||
func (p *ChunkProposer) TryProposeChunk() {
|
||||
p.chunkProposerCircleTotal.Inc()
|
||||
proposedChunk, err := p.proposeChunk()
|
||||
if err != nil {
|
||||
p.proposeChunkFailureTotal.Inc()
|
||||
log.Error("propose new chunk failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := p.updateChunkInfoInDB(proposedChunk); err != nil {
|
||||
p.proposeChunkUpdateInfoFailureTotal.Inc()
|
||||
log.Error("update chunk info in orm failed", "err", err)
|
||||
}
|
||||
}
|
||||
@@ -162,11 +95,9 @@ func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
p.proposeChunkUpdateInfoTotal.Inc()
|
||||
err := p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, dbTX)
|
||||
if err != nil {
|
||||
log.Warn("ChunkProposer.InsertChunk failed", "chunk hash", chunk.Hash)
|
||||
return err
|
||||
}
|
||||
if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
|
||||
@@ -200,7 +131,6 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
|
||||
}
|
||||
|
||||
p.chunkL2TxNum.Set(float64(totalL2TxNum))
|
||||
// Check if the first block breaks hard limits.
|
||||
// If so, it indicates there are bugs in sequencer, manual fix is needed.
|
||||
if totalL2TxNum > p.maxL2TxNumPerChunk {
|
||||
@@ -212,7 +142,6 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
)
|
||||
}
|
||||
|
||||
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
@@ -222,7 +151,6 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
)
|
||||
}
|
||||
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
|
||||
@@ -232,7 +160,6 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
)
|
||||
}
|
||||
|
||||
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
|
||||
// Check if the first block breaks any soft limits.
|
||||
if totalTxGasUsed > p.maxTxGasPerChunk {
|
||||
log.Warn(
|
||||
@@ -242,10 +169,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
"max gas limit", p.maxTxGasPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
max := crc.max()
|
||||
p.maxTxConsumption.Set(float64(max))
|
||||
if max > p.maxRowConsumptionPerChunk {
|
||||
if max := crc.max(); max > p.maxRowConsumptionPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
@@ -276,8 +200,6 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
}
|
||||
}
|
||||
|
||||
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
|
||||
|
||||
var hasBlockTimeout bool
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
|
||||
@@ -286,7 +208,6 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
"block timestamp", blocks[0].Header.Time,
|
||||
"block outdated time threshold", currentTimeSec,
|
||||
)
|
||||
p.chunkBlockTimeoutReached.Inc()
|
||||
hasBlockTimeout = true
|
||||
}
|
||||
|
||||
@@ -295,7 +216,6 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
"totalL1CommitCalldataSize", totalL1CommitCalldataSize,
|
||||
"minL1CommitCalldataSizePerChunk", p.minL1CommitCalldataSizePerChunk,
|
||||
)
|
||||
p.chunkBlocksSuperposeNotEnoughTotal.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
return chunk, nil
|
||||
|
||||
@@ -30,7 +30,7 @@ func testChunkProposer(t *testing.T) {
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db, nil)
|
||||
}, db)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
expectedChunk := &types.Chunk{
|
||||
@@ -62,7 +62,7 @@ func testChunkProposerRowConsumption(t *testing.T) {
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 0, // !
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db, nil)
|
||||
}, db)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -12,9 +11,11 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
@@ -22,6 +23,12 @@ import (
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
bridgeL1MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsRollupEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
type rollupEvent struct {
|
||||
batchHash common.Hash
|
||||
txHash common.Hash
|
||||
@@ -52,12 +59,10 @@ type L1WatcherClient struct {
|
||||
processedMsgHeight uint64
|
||||
// The height of the block that the watcher has retrieved header rlp
|
||||
processedBlockHeight uint64
|
||||
|
||||
metrics *l1WatcherMetrics
|
||||
}
|
||||
|
||||
// NewL1WatcherClient returns a new instance of L1WatcherClient.
|
||||
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB, reg prometheus.Registerer) *L1WatcherClient {
|
||||
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB) *L1WatcherClient {
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
savedHeight, err := l1MessageOrm.GetLayer1LatestWatchedHeight()
|
||||
if err != nil {
|
||||
@@ -97,7 +102,6 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
|
||||
|
||||
processedMsgHeight: uint64(savedHeight),
|
||||
processedBlockHeight: savedL1BlockHeight,
|
||||
metrics: initL1WatcherMetrics(reg),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -121,7 +125,6 @@ func (w *L1WatcherClient) SetConfirmations(confirmations rpc.BlockNumber) {
|
||||
|
||||
// FetchBlockHeader pull latest L1 blocks and save in DB
|
||||
func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
|
||||
w.metrics.l1WatcherFetchBlockHeaderTotal.Inc()
|
||||
fromBlock := int64(w.processedBlockHeight) + 1
|
||||
toBlock := int64(blockHeight)
|
||||
if toBlock < fromBlock {
|
||||
@@ -168,7 +171,6 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
|
||||
|
||||
// update processed height
|
||||
w.processedBlockHeight = uint64(toBlock)
|
||||
w.metrics.l1WatcherFetchBlockHeaderProcessedBlockHeight.Set(float64(w.processedBlockHeight))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -187,7 +189,6 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
toBlock := int64(blockHeight)
|
||||
|
||||
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
|
||||
w.metrics.l1WatcherFetchContractEventTotal.Inc()
|
||||
to := from + contractEventsBlocksFetchLimit - 1
|
||||
|
||||
if to > toBlock {
|
||||
@@ -219,10 +220,9 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(to)
|
||||
w.metrics.l1WatcherFetchContractEventProcessedBlockHeight.Set(float64(to))
|
||||
bridgeL1MsgsSyncHeightGauge.Update(to)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
|
||||
sentMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
|
||||
@@ -232,8 +232,8 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
}
|
||||
sentMessageCount := int64(len(sentMessageEvents))
|
||||
rollupEventCount := int64(len(rollupEvents))
|
||||
w.metrics.l1WatcherFetchContractEventSentEventsTotal.Add(float64(sentMessageCount))
|
||||
w.metrics.l1WatcherFetchContractEventRollupEventsTotal.Add(float64(rollupEventCount))
|
||||
bridgeL1MsgsSentEventsTotalCounter.Inc(sentMessageCount)
|
||||
bridgeL1MsgsRollupEventsTotalCounter.Inc(rollupEventCount)
|
||||
log.Info("L1 events types", "SentMessageCount", sentMessageCount, "RollupEventCount", rollupEventCount)
|
||||
|
||||
// use rollup event to update rollup results db status
|
||||
@@ -273,8 +273,7 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
w.metrics.l1WatcherFetchContractEventSuccessTotal.Inc()
|
||||
w.metrics.l1WatcherFetchContractEventProcessedBlockHeight.Set(float64(w.processedMsgHeight))
|
||||
bridgeL1MsgsSyncHeightGauge.Update(to)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type l1WatcherMetrics struct {
|
||||
l1WatcherFetchBlockHeaderTotal prometheus.Counter
|
||||
l1WatcherFetchBlockHeaderProcessedBlockHeight prometheus.Gauge
|
||||
l1WatcherFetchContractEventTotal prometheus.Counter
|
||||
l1WatcherFetchContractEventSuccessTotal prometheus.Counter
|
||||
l1WatcherFetchContractEventProcessedBlockHeight prometheus.Gauge
|
||||
l1WatcherFetchContractEventSentEventsTotal prometheus.Counter
|
||||
l1WatcherFetchContractEventRollupEventsTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
initL1WatcherMetricOnce sync.Once
|
||||
l1WatcherMetric *l1WatcherMetrics
|
||||
)
|
||||
|
||||
func initL1WatcherMetrics(reg prometheus.Registerer) *l1WatcherMetrics {
|
||||
initL1WatcherMetricOnce.Do(func() {
|
||||
l1WatcherMetric = &l1WatcherMetrics{
|
||||
l1WatcherFetchBlockHeaderTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_header_total",
|
||||
Help: "The total number of l1 watcher fetch block header total",
|
||||
}),
|
||||
l1WatcherFetchBlockHeaderProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_header_processed_block_height",
|
||||
Help: "The current processed block height of l1 watcher fetch block header",
|
||||
}),
|
||||
l1WatcherFetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_total",
|
||||
Help: "The total number of l1 watcher fetch contract event total",
|
||||
}),
|
||||
l1WatcherFetchContractEventSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_success_total",
|
||||
Help: "The total number of l1 watcher fetch contract event success total",
|
||||
}),
|
||||
l1WatcherFetchContractEventProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_processed_block_height",
|
||||
Help: "The current processed block height of l1 watcher fetch contract event",
|
||||
}),
|
||||
l1WatcherFetchContractEventSentEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_sent_event_total",
|
||||
Help: "The current processed block height of l1 watcher fetch contract sent event",
|
||||
}),
|
||||
l1WatcherFetchContractEventRollupEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_rollup_event_total",
|
||||
Help: "The current processed block height of l1 watcher fetch contract rollup event",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l1WatcherMetric
|
||||
}
|
||||
@@ -30,8 +30,7 @@ func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
|
||||
client, err := ethclient.Dial(base.L1gethImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
l1Cfg := cfg.L1Config
|
||||
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress,
|
||||
l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db, nil)
|
||||
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
|
||||
assert.NoError(t, watcher.FetchContractEvent())
|
||||
return watcher, db
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -14,9 +13,11 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/event"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
@@ -24,6 +25,14 @@ import (
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
// Metrics
|
||||
var (
|
||||
bridgeL2MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
|
||||
bridgeL2BlocksFetchedHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
|
||||
bridgeL2BlocksFetchedGapGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
|
||||
bridgeL2MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
|
||||
type L2WatcherClient struct {
|
||||
ctx context.Context
|
||||
@@ -47,12 +56,10 @@ type L2WatcherClient struct {
|
||||
processedMsgHeight uint64
|
||||
|
||||
stopped uint64
|
||||
|
||||
metrics *l2WatcherMetrics
|
||||
}
|
||||
|
||||
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB, reg prometheus.Registerer) *L2WatcherClient {
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB) *L2WatcherClient {
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
var savedHeight uint64
|
||||
l1msg, err := l1MessageOrm.GetLayer1LatestMessageWithLayer2Hash()
|
||||
@@ -86,7 +93,6 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
|
||||
withdrawTrieRootSlot: withdrawTrieRootSlot,
|
||||
|
||||
stopped: 0,
|
||||
metrics: initL2WatcherMetrics(reg),
|
||||
}
|
||||
|
||||
return &w
|
||||
@@ -96,7 +102,6 @@ const blockTracesFetchLimit = uint64(10)
|
||||
|
||||
// TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks.
|
||||
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
|
||||
w.metrics.fetchRunningMissingBlocksTotal.Inc()
|
||||
heightInDB, err := w.l2BlockOrm.GetL2BlocksLatestHeight(w.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to GetL2BlocksLatestHeight", "err", err)
|
||||
@@ -115,8 +120,8 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
|
||||
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
|
||||
return
|
||||
}
|
||||
w.metrics.fetchRunningMissingBlocksHeight.Set(float64(to))
|
||||
w.metrics.bridgeL2BlocksFetchedGap.Set(float64(blockHeight - to))
|
||||
bridgeL2BlocksFetchedHeightGauge.Update(int64(to))
|
||||
bridgeL2BlocksFetchedGapGauge.Update(int64(blockHeight - to))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -194,7 +199,6 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
|
||||
}()
|
||||
|
||||
w.metrics.fetchContractEventTotal.Inc()
|
||||
blockHeight, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.Client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
@@ -234,7 +238,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(to)
|
||||
w.metrics.fetchContractEventHeight.Set(float64(to))
|
||||
bridgeL2MsgsSyncHeightGauge.Update(to)
|
||||
continue
|
||||
}
|
||||
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
@@ -246,7 +250,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
}
|
||||
|
||||
relayedMessageCount := int64(len(relayedMessageEvents))
|
||||
w.metrics.bridgeL2MsgsRelayedEventsTotal.Add(float64(relayedMessageCount))
|
||||
bridgeL2MsgsRelayedEventsTotalCounter.Inc(relayedMessageCount)
|
||||
log.Info("L2 events types", "RelayedMessageCount", relayedMessageCount)
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submited message.
|
||||
@@ -265,7 +269,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
w.metrics.fetchContractEventHeight.Set(float64(to))
|
||||
bridgeL2MsgsSyncHeightGauge.Update(to)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type l2WatcherMetrics struct {
|
||||
fetchRunningMissingBlocksTotal prometheus.Counter
|
||||
fetchRunningMissingBlocksHeight prometheus.Gauge
|
||||
fetchContractEventTotal prometheus.Counter
|
||||
fetchContractEventHeight prometheus.Gauge
|
||||
bridgeL2MsgsRelayedEventsTotal prometheus.Counter
|
||||
bridgeL2BlocksFetchedGap prometheus.Gauge
|
||||
}
|
||||
|
||||
var (
|
||||
initL2WatcherMetricOnce sync.Once
|
||||
l2WatcherMetric *l2WatcherMetrics
|
||||
)
|
||||
|
||||
func initL2WatcherMetrics(reg prometheus.Registerer) *l2WatcherMetrics {
|
||||
initL2WatcherMetricOnce.Do(func() {
|
||||
l2WatcherMetric = &l2WatcherMetrics{
|
||||
fetchRunningMissingBlocksTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l2_watcher_fetch_running_missing_blocks_total",
|
||||
Help: "The total number of l2 watcher fetch running missing blocks",
|
||||
}),
|
||||
fetchRunningMissingBlocksHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_l2_watcher_fetch_running_missing_blocks_height",
|
||||
Help: "The total number of l2 watcher fetch running missing blocks height",
|
||||
}),
|
||||
fetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l2_watcher_fetch_contract_events_total",
|
||||
Help: "The total number of l2 watcher fetch contract events",
|
||||
}),
|
||||
fetchContractEventHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_l2_watcher_fetch_contract_height",
|
||||
Help: "The total number of l2 watcher fetch contract height",
|
||||
}),
|
||||
bridgeL2MsgsRelayedEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l2_watcher_msg_relayed_events_total",
|
||||
Help: "The total number of l2 watcher msg relayed event",
|
||||
}),
|
||||
bridgeL2BlocksFetchedGap: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_l2_watcher_blocks_fetched_gap",
|
||||
Help: "The gap of l2 fetch",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l2WatcherMetric
|
||||
}
|
||||
@@ -34,8 +34,7 @@ import (
|
||||
func setupL2Watcher(t *testing.T) (*L2WatcherClient, *gorm.DB) {
|
||||
db := setupDB(t)
|
||||
l2cfg := cfg.L2Config
|
||||
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress,
|
||||
l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db, nil)
|
||||
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
|
||||
return watcher, db
|
||||
}
|
||||
|
||||
@@ -51,7 +50,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
|
||||
l1cfg := cfg.L1Config
|
||||
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
|
||||
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKey, "test", "test", nil)
|
||||
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create several transactions and commit to block
|
||||
@@ -96,7 +95,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
|
||||
|
||||
func prepareWatcherClient(l2Cli *ethclient.Client, db *gorm.DB, contractAddr common.Address) *L2WatcherClient {
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db, nil)
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db)
|
||||
}
|
||||
|
||||
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
|
||||
|
||||
@@ -40,6 +40,8 @@ type Batch struct {
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
|
||||
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
|
||||
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
|
||||
TotalAttempts int16 `json:"total_attempts" gorm:"column:total_attempts;default:0"`
|
||||
ActiveAttempts int16 `json:"active_attempts" gorm:"column:active_attempts;default:0"`
|
||||
|
||||
// rollup
|
||||
RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"`
|
||||
@@ -267,6 +269,8 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
|
||||
BatchHeader: batchHeader.Encode(),
|
||||
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
TotalAttempts: 0,
|
||||
ActiveAttempts: 0,
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
}
|
||||
|
||||
@@ -37,6 +37,8 @@ type Chunk struct {
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
|
||||
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
|
||||
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
|
||||
TotalAttempts int16 `json:"total_attempts" gorm:"column:total_attempts;default:0"`
|
||||
ActiveAttempts int16 `json:"active_attempts" gorm:"column:active_attempts;default:0"`
|
||||
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
@@ -175,6 +177,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
|
||||
ParentChunkStateRoot: parentChunkStateRoot,
|
||||
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
TotalAttempts: 0,
|
||||
ActiveAttempts: 0,
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -26,14 +26,13 @@ func testImportL1GasPrice(t *testing.T) {
|
||||
l1Cfg := bridgeApp.Config.L1Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create L1Watcher
|
||||
startHeight, err := l1Client.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0,
|
||||
l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
|
||||
// fetch new blocks
|
||||
number, err := l1Client.BlockNumber(context.Background())
|
||||
@@ -68,7 +67,7 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
prepareContracts(t)
|
||||
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, nil)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add fake chunk
|
||||
|
||||
@@ -29,16 +29,14 @@ func testRelayL1MessageSucceed(t *testing.T) {
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
// Create L1Watcher
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress,
|
||||
l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
|
||||
// Create L2Watcher
|
||||
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress,
|
||||
l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db, nil)
|
||||
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
|
||||
|
||||
// send message through l1 messenger contract
|
||||
nonce, err := l1MessengerInstance.MessageNonce(&bind.CallOpts{})
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -12,7 +13,6 @@ import (
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
@@ -28,13 +28,12 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, nil)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := bridgeApp.Config.L1Config
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress,
|
||||
l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
|
||||
// add some blocks to db
|
||||
var wrappedBlocks []*types.WrappedBlock
|
||||
@@ -65,7 +64,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db, nil)
|
||||
}, db)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
@@ -79,7 +78,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
MinChunkNumPerBatch: 1,
|
||||
BatchTimeoutSec: 300,
|
||||
}, db, nil)
|
||||
}, db)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
@@ -92,22 +91,20 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.NotEmpty(t, batch.CommitTxHash)
|
||||
assert.Equal(t, types.RollupCommitting, types.RollupStatus(batch.RollupStatus))
|
||||
|
||||
success := utils.TryTimes(30, func() bool {
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash))
|
||||
return err == nil && receipt.Status == 1
|
||||
})
|
||||
assert.True(t, success)
|
||||
assert.NoError(t, err)
|
||||
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(batch.CommitTxHash))
|
||||
assert.NoError(t, err)
|
||||
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(commitTxReceipt.Logs), 1)
|
||||
|
||||
// fetch rollup events
|
||||
success = utils.TryTimes(30, func() bool {
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0]
|
||||
})
|
||||
assert.True(t, success)
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupCommitted, statuses[0])
|
||||
|
||||
// add dummy proof
|
||||
proof := &message.BatchProof{
|
||||
@@ -121,7 +118,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
@@ -131,20 +128,17 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, batch.FinalizeTxHash)
|
||||
|
||||
success = utils.TryTimes(30, func() bool {
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
return err == nil && receipt.Status == 1
|
||||
})
|
||||
assert.True(t, success)
|
||||
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
assert.NoError(t, err)
|
||||
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
|
||||
|
||||
// fetch rollup events
|
||||
success = utils.TryTimes(30, func() bool {
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0]
|
||||
})
|
||||
assert.True(t, success)
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalized, statuses[0])
|
||||
}
|
||||
|
||||
79
common/libzkp/impl/Cargo.lock
generated
79
common/libzkp/impl/Cargo.lock
generated
@@ -32,7 +32,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "aggregator"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
|
||||
dependencies = [
|
||||
"ark-std",
|
||||
"env_logger 0.10.0",
|
||||
@@ -380,7 +380,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b"
|
||||
dependencies = [
|
||||
"borsh-derive",
|
||||
"hashbrown 0.12.3",
|
||||
"hashbrown 0.13.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -433,7 +433,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
|
||||
[[package]]
|
||||
name = "bus-mapping"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -1049,7 +1049,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "eth-types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
|
||||
dependencies = [
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
@@ -1226,7 +1226,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "external-tracer"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"geth-utils",
|
||||
@@ -1439,7 +1439,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "gadgets"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
|
||||
dependencies = [
|
||||
"digest 0.7.6",
|
||||
"eth-types",
|
||||
@@ -1479,7 +1479,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "geth-utils"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
|
||||
@@ -1580,21 +1580,6 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "halo2-base"
|
||||
version = "0.2.2"
|
||||
source = "git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
|
||||
dependencies = [
|
||||
"ff",
|
||||
"halo2_proofs",
|
||||
"itertools",
|
||||
"num-bigint",
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
"rand_chacha",
|
||||
"rustc-hash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "halo2-base"
|
||||
version = "0.2.2"
|
||||
@@ -1610,25 +1595,6 @@ dependencies = [
|
||||
"rustc-hash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "halo2-ecc"
|
||||
version = "0.2.2"
|
||||
source = "git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
|
||||
dependencies = [
|
||||
"ff",
|
||||
"group",
|
||||
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0)",
|
||||
"itertools",
|
||||
"num-bigint",
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
"rand",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "halo2-ecc"
|
||||
version = "0.2.2"
|
||||
@@ -1636,7 +1602,7 @@ source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#2c22586422
|
||||
dependencies = [
|
||||
"ff",
|
||||
"group",
|
||||
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?branch=develop)",
|
||||
"halo2-base",
|
||||
"itertools",
|
||||
"num-bigint",
|
||||
"num-integer",
|
||||
@@ -1689,7 +1655,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2_proofs"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/scroll-tech/halo2.git?branch=develop#b612b1e2a9fa2ccd150a6cb99e67592c8d62cd99"
|
||||
source = "git+https://github.com/scroll-tech/halo2.git?branch=develop#01f0b5260445a9190299af7b06b766c1e925fdaf"
|
||||
dependencies = [
|
||||
"ark-std",
|
||||
"blake2b_simd",
|
||||
@@ -2111,7 +2077,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "keccak256"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"eth-types",
|
||||
@@ -2298,7 +2264,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mock"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -2313,7 +2279,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mpt-zktrie"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
|
||||
dependencies = [
|
||||
"bus-mapping",
|
||||
"eth-types",
|
||||
@@ -2789,7 +2755,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "prover"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.2#8c439b1dd62c429223221484fb8a5470242d1cbc"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.18#fcf4bf3137ad37becdeb5360b23ac978405c6b2c"
|
||||
dependencies = [
|
||||
"aggregator",
|
||||
"anyhow",
|
||||
@@ -3658,12 +3624,12 @@ checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9"
|
||||
[[package]]
|
||||
name = "snark-verifier"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech//snark-verifier?tag=v0.1.1#11a09d4a37c31c659b29e2dac0ceb544a776ad7b"
|
||||
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#100127726ac210226ac1096767e0efc5230775e3"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"ethereum-types 0.14.1",
|
||||
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0)",
|
||||
"halo2-ecc 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0)",
|
||||
"halo2-base",
|
||||
"halo2-ecc",
|
||||
"hex",
|
||||
"itertools",
|
||||
"lazy_static",
|
||||
@@ -3682,12 +3648,12 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "snark-verifier-sdk"
|
||||
version = "0.0.1"
|
||||
source = "git+https://github.com/scroll-tech//snark-verifier?tag=v0.1.1#11a09d4a37c31c659b29e2dac0ceb544a776ad7b"
|
||||
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#100127726ac210226ac1096767e0efc5230775e3"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"env_logger 0.10.0",
|
||||
"ethereum-types 0.14.1",
|
||||
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0)",
|
||||
"halo2-base",
|
||||
"hex",
|
||||
"itertools",
|
||||
"lazy_static",
|
||||
@@ -4074,7 +4040,7 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
|
||||
[[package]]
|
||||
name = "types"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.2#8c439b1dd62c429223221484fb8a5470242d1cbc"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.18#fcf4bf3137ad37becdeb5360b23ac978405c6b2c"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"blake2",
|
||||
@@ -4525,18 +4491,17 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
|
||||
[[package]]
|
||||
name = "zkevm-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
|
||||
dependencies = [
|
||||
"array-init",
|
||||
"bus-mapping",
|
||||
"either",
|
||||
"env_logger 0.9.3",
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
"gadgets",
|
||||
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?branch=develop)",
|
||||
"halo2-ecc 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?branch=develop)",
|
||||
"halo2-base",
|
||||
"halo2-ecc",
|
||||
"halo2_proofs",
|
||||
"hex",
|
||||
"itertools",
|
||||
|
||||
@@ -19,13 +19,9 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
|
||||
[patch."https://github.com/privacy-scaling-explorations/halo2curves.git"]
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
|
||||
|
||||
[patch."https://github.com/scroll-tech/snark-verifier"]
|
||||
snark-verifier = { git = "https://github.com/scroll-tech//snark-verifier", tag = "v0.1.1" }
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech//snark-verifier", tag = "v0.1.1" }
|
||||
|
||||
[dependencies]
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.2" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.2" }
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.18" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.18" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
|
||||
log = "0.4"
|
||||
|
||||
57
common/metrics/metrics.go
Normal file
57
common/metrics/metrics.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/metrics/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
// ScrollRegistry is used for scroll metrics.
|
||||
ScrollRegistry = metrics.NewRegistry()
|
||||
)
|
||||
|
||||
// Serve starts the metrics server on the given address, will be closed when the given
|
||||
// context is canceled.
|
||||
func Serve(ctx context.Context, c *cli.Context) {
|
||||
if !c.Bool(utils.MetricsEnabled.Name) {
|
||||
return
|
||||
}
|
||||
|
||||
address := net.JoinHostPort(
|
||||
c.String(utils.MetricsAddr.Name),
|
||||
strconv.Itoa(c.Int(utils.MetricsPort.Name)),
|
||||
)
|
||||
|
||||
server := &http.Server{
|
||||
Addr: address,
|
||||
Handler: prometheus.Handler(ScrollRegistry),
|
||||
ReadTimeout: rpc.DefaultHTTPTimeouts.ReadTimeout,
|
||||
WriteTimeout: rpc.DefaultHTTPTimeouts.WriteTimeout,
|
||||
IdleTimeout: rpc.DefaultHTTPTimeouts.IdleTimeout,
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if err := server.Close(); err != nil {
|
||||
log.Error("Failed to close metrics server", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info("Starting metrics server", "address", address)
|
||||
|
||||
go func() {
|
||||
if err := server.ListenAndServe(); err != nil {
|
||||
log.Error("start metrics server error", "error", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -103,6 +103,10 @@ const (
|
||||
ProverTaskFailureTypeUndefined ProverTaskFailureType = iota
|
||||
// ProverTaskFailureTypeTimeout prover task failure of timeout
|
||||
ProverTaskFailureTypeTimeout
|
||||
// ProverTaskFailureTypeValidatedFailed prover task failure of validated failed by coordinator
|
||||
ProverTaskFailureTypeValidatedFailed
|
||||
// ProverTaskFailureTypeVerifiedFailed prover task failure of verified failed by coordinator
|
||||
ProverTaskFailureTypeVerifiedFailed
|
||||
)
|
||||
|
||||
func (r ProverTaskFailureType) String() string {
|
||||
@@ -111,8 +115,12 @@ func (r ProverTaskFailureType) String() string {
|
||||
return "prover task failure undefined"
|
||||
case ProverTaskFailureTypeTimeout:
|
||||
return "prover task failure timeout"
|
||||
case ProverTaskFailureTypeValidatedFailed:
|
||||
return "prover task failure validated failed"
|
||||
case ProverTaskFailureTypeVerifiedFailed:
|
||||
return "prover task failure verified failed"
|
||||
default:
|
||||
return "illegal prover task failure type"
|
||||
return fmt.Sprintf("illegal prover task failure type (%d)", int32(r))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,18 +13,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// ProofFailureType the proof failure type
|
||||
type ProofFailureType int
|
||||
|
||||
const (
|
||||
// ProofFailureUndefined the undefined type proof failure type
|
||||
ProofFailureUndefined ProofFailureType = iota
|
||||
// ProofFailurePanic proof failure for prover panic
|
||||
ProofFailurePanic
|
||||
// ProofFailureNoPanic proof failure for no prover panic
|
||||
ProofFailureNoPanic
|
||||
)
|
||||
|
||||
// RespStatus represents status code from prover to scroll
|
||||
type RespStatus uint32
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var tag = "v4.1.72"
|
||||
var tag = "v4.1.60"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -45,28 +45,26 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
|
||||
},
|
||||
}
|
||||
|
||||
if spp.Status == int(message.StatusOk) {
|
||||
switch message.ProofType(spp.TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
var tmpChunkProof message.ChunkProof
|
||||
if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil {
|
||||
nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
proofMsg.ChunkProof = &tmpChunkProof
|
||||
case message.ProofTypeBatch:
|
||||
var tmpBatchProof message.BatchProof
|
||||
if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil {
|
||||
nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
proofMsg.BatchProof = &tmpBatchProof
|
||||
switch message.ProofType(spp.TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
var tmpChunkProof message.ChunkProof
|
||||
if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil {
|
||||
nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
proofMsg.ChunkProof = &tmpChunkProof
|
||||
case message.ProofTypeBatch:
|
||||
var tmpBatchProof message.BatchProof
|
||||
if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil {
|
||||
nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
proofMsg.BatchProof = &tmpBatchProof
|
||||
}
|
||||
|
||||
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg, spp); err != nil {
|
||||
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg); err != nil {
|
||||
nerr := fmt.Errorf("handle zk proof failure, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr, nil)
|
||||
return
|
||||
|
||||
@@ -146,13 +146,7 @@ func (c *Collector) timeoutChunkProofTask() {
|
||||
}
|
||||
|
||||
func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout prometheus.Counter) {
|
||||
// here not update the block batch proving status failed, because the collector loop will check
|
||||
// the attempt times. if reach the times, the collector will set the block batch proving status.
|
||||
for _, assignedProverTask := range assignedProverTasks {
|
||||
if c.proverTaskOrm.TaskTimeoutMoreThanOnce(c.ctx, assignedProverTask.TaskID) {
|
||||
log.Warn("Task timeout more than once", "taskType", message.ProofType(assignedProverTask.TaskType).String(), "hash", assignedProverTask.TaskID)
|
||||
}
|
||||
|
||||
timeout.Inc()
|
||||
log.Warn("proof task have reach the timeout", "task id", assignedProverTask.TaskID,
|
||||
"prover public key", assignedProverTask.ProverPublicKey, "prover name", assignedProverTask.ProverName, "task type", assignedProverTask.TaskType)
|
||||
@@ -160,26 +154,55 @@ func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout promethe
|
||||
// update prover task proving status as ProverProofInvalid
|
||||
if err := c.proverTaskOrm.UpdateProverTaskProvingStatus(c.ctx, message.ProofType(assignedProverTask.TaskType),
|
||||
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverProofInvalid, tx); err != nil {
|
||||
log.Error("update prover task proving status failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
|
||||
log.Error("update prover task proving status failure",
|
||||
"hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey,
|
||||
"prover proving status", types.ProverProofInvalid, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update prover task failure type
|
||||
// update prover task failure type as ProverTaskFailureTypeTimeout
|
||||
if err := c.proverTaskOrm.UpdateProverTaskFailureType(c.ctx, message.ProofType(assignedProverTask.TaskType),
|
||||
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverTaskFailureTypeTimeout, tx); err != nil {
|
||||
log.Error("update prover task failure type failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
|
||||
log.Error("update prover task failure type failure",
|
||||
"hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey,
|
||||
"prover failure type", types.ProverTaskFailureTypeTimeout, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update the task to unassigned, let collector restart it
|
||||
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeChunk {
|
||||
if err := c.chunkOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
|
||||
log.Error("update chunk proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
|
||||
if err := c.chunkOrm.DecreaseActiveAttemptsByHash(c.ctx, assignedProverTask.TaskID, tx); err != nil {
|
||||
log.Error("decrease active attempts of chunk failure", "hash", assignedProverTask.TaskID, "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeBatch {
|
||||
if err := c.batchOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
|
||||
log.Error("update batch proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
|
||||
if err := c.batchOrm.DecreaseActiveAttemptsByHash(c.ctx, assignedProverTask.TaskID, tx); err != nil {
|
||||
log.Error("decrease active attempts of batch failure", "hash", assignedProverTask.TaskID, "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var failedAssignmentCount uint64
|
||||
failedAssignmentCount, err := c.proverTaskOrm.GetFailedTaskAssignmentCount(c.ctx, assignedProverTask.TaskID, tx)
|
||||
if err != nil {
|
||||
log.Error("get failed task assignment count failure", "taskID", assignedProverTask.TaskID, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if failedAssignmentCount >= uint64(c.cfg.ProverManager.SessionAttempts) {
|
||||
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeChunk {
|
||||
if err := c.chunkOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskFailed, tx); err != nil {
|
||||
log.Error("update chunk proving status failure", "hash", assignedProverTask.TaskID,
|
||||
"status", types.ProvingTaskFailed, "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeBatch {
|
||||
if err := c.batchOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskFailed, tx); err != nil {
|
||||
log.Error("update batch proving status failure", "hash", assignedProverTask.TaskID,
|
||||
"status", types.ProvingTaskFailed, "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -24,7 +24,12 @@ import (
|
||||
|
||||
// BatchProverTask is prover task implement for batch proof
|
||||
type BatchProverTask struct {
|
||||
BaseProverTask
|
||||
cfg *config.Config
|
||||
db *gorm.DB
|
||||
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
proverTaskOrm *orm.ProverTask
|
||||
|
||||
batchAttemptsExceedTotal prometheus.Counter
|
||||
batchTaskGetTaskTotal prometheus.Counter
|
||||
@@ -33,13 +38,11 @@ type BatchProverTask struct {
|
||||
// NewBatchProverTask new a batch collector
|
||||
func NewBatchProverTask(cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) *BatchProverTask {
|
||||
bp := &BatchProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
},
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
batchAttemptsExceedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_batch_attempts_exceed_total",
|
||||
Help: "Total number of batch attempts exceed.",
|
||||
@@ -81,27 +84,17 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task", publicKey)
|
||||
}
|
||||
|
||||
batchTasks, err := bp.batchOrm.UpdateUnassignedBatchReturning(ctx, 1)
|
||||
// load and send a batch task
|
||||
batchTask, err := bp.batchOrm.UpdateBatchAttemptsReturning(ctx, bp.cfg.ProverManager.ProversPerSession, bp.cfg.ProverManager.SessionAttempts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get unassigned batch proving tasks, error:%w", err)
|
||||
}
|
||||
|
||||
if len(batchTasks) == 0 {
|
||||
if batchTask == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if len(batchTasks) != 1 {
|
||||
return nil, fmt.Errorf("get unassigned batch proving task len not 1, batch tasks:%v", batchTasks)
|
||||
}
|
||||
|
||||
batchTask := batchTasks[0]
|
||||
log.Info("start batch proof generation session", "id", batchTask.Hash, "public key", publicKey, "prover name", proverName)
|
||||
|
||||
if !bp.checkAttemptsExceeded(batchTask.Hash, message.ProofTypeBatch) {
|
||||
bp.batchAttemptsExceedTotal.Inc()
|
||||
return nil, fmt.Errorf("the batch task id:%s check attempts have reach the maximum", batchTask.Hash)
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: batchTask.Hash,
|
||||
ProverPublicKey: publicKey.(string),
|
||||
|
||||
@@ -24,7 +24,12 @@ import (
|
||||
|
||||
// ChunkProverTask the chunk prover task
|
||||
type ChunkProverTask struct {
|
||||
BaseProverTask
|
||||
cfg *config.Config
|
||||
db *gorm.DB
|
||||
|
||||
chunkOrm *orm.Chunk
|
||||
blockOrm *orm.L2Block
|
||||
proverTaskOrm *orm.ProverTask
|
||||
|
||||
chunkAttemptsExceedTotal prometheus.Counter
|
||||
chunkTaskGetTaskTotal prometheus.Counter
|
||||
@@ -33,14 +38,11 @@ type ChunkProverTask struct {
|
||||
// NewChunkProverTask new a chunk prover task
|
||||
func NewChunkProverTask(cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) *ChunkProverTask {
|
||||
cp := &ChunkProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
},
|
||||
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
chunkAttemptsExceedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_chunk_attempts_exceed_total",
|
||||
Help: "Total number of chunk attempts exceed.",
|
||||
@@ -82,29 +84,18 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task", publicKey)
|
||||
}
|
||||
|
||||
// load and send chunk tasks
|
||||
chunkTasks, err := cp.chunkOrm.UpdateUnassignedChunkReturning(ctx, getTaskParameter.ProverHeight, 1)
|
||||
// load and send a chunk task
|
||||
chunkTask, err := cp.chunkOrm.UpdateChunkAttemptsReturning(ctx, getTaskParameter.ProverHeight, cp.cfg.ProverManager.ProversPerSession, cp.cfg.ProverManager.SessionAttempts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get unassigned chunk proving tasks, error:%w", err)
|
||||
}
|
||||
|
||||
if len(chunkTasks) == 0 {
|
||||
if chunkTask == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if len(chunkTasks) != 1 {
|
||||
return nil, fmt.Errorf("get unassigned chunk proving task len not 1, chunk tasks:%v", chunkTasks)
|
||||
}
|
||||
|
||||
chunkTask := chunkTasks[0]
|
||||
|
||||
log.Info("start chunk generation session", "id", chunkTask.Hash, "public key", publicKey, "prover name", proverName)
|
||||
|
||||
if !cp.checkAttemptsExceeded(chunkTask.Hash, message.ProofTypeChunk) {
|
||||
cp.chunkAttemptsExceedTotal.Inc()
|
||||
return nil, fmt.Errorf("chunk proof hash id:%s check attempts have reach the maximum", chunkTask.Hash)
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: chunkTask.Hash,
|
||||
ProverPublicKey: publicKey.(string),
|
||||
|
||||
@@ -1,17 +1,8 @@
|
||||
package provertask
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
@@ -19,53 +10,3 @@ import (
|
||||
type ProverTask interface {
|
||||
Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error)
|
||||
}
|
||||
|
||||
// BaseProverTask a base prover task which contain series functions
|
||||
type BaseProverTask struct {
|
||||
cfg *config.Config
|
||||
ctx context.Context
|
||||
db *gorm.DB
|
||||
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
blockOrm *orm.L2Block
|
||||
proverTaskOrm *orm.ProverTask
|
||||
}
|
||||
|
||||
// checkAttempts use the count of prover task info to check the attempts
|
||||
func (b *BaseProverTask) checkAttemptsExceeded(hash string, taskType message.ProofType) bool {
|
||||
whereFields := make(map[string]interface{})
|
||||
whereFields["task_id"] = hash
|
||||
whereFields["task_type"] = int16(taskType)
|
||||
proverTasks, err := b.proverTaskOrm.GetProverTasks(b.ctx, whereFields, nil, 0, 0)
|
||||
if err != nil {
|
||||
log.Error("get prover task error", "hash id", hash, "error", err)
|
||||
return true
|
||||
}
|
||||
|
||||
if len(proverTasks) >= int(b.cfg.ProverManager.SessionAttempts) {
|
||||
log.Warn("proof generation prover task reach the max attempts", "hash", hash)
|
||||
|
||||
transErr := b.db.Transaction(func(tx *gorm.DB) error {
|
||||
switch message.ProofType(proverTasks[0].TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
if err := b.chunkOrm.UpdateProvingStatus(b.ctx, hash, types.ProvingTaskFailed, tx); err != nil {
|
||||
log.Error("failed to update chunk proving_status as failed", "msg.ID", hash, "error", err)
|
||||
}
|
||||
case message.ProofTypeBatch:
|
||||
if err := b.batchOrm.UpdateProvingStatus(b.ctx, hash, types.ProvingTaskFailed, tx); err != nil {
|
||||
log.Error("failed to update batch proving_status as failed", "msg.ID", hash, "error", err)
|
||||
}
|
||||
}
|
||||
// update the prover task status to let timeout checker don't check it.
|
||||
if err := b.proverTaskOrm.UpdateAllProverTaskProvingStatusOfTaskID(b.ctx, message.ProofType(proverTasks[0].TaskType), hash, types.ProverProofInvalid, tx); err != nil {
|
||||
log.Error("failed to update prover task proving_status as failed", "msg.ID", hash, "error", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if transErr == nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB, reg pro
|
||||
proverTaskProveDuration: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "coordinator_task_prove_duration_seconds",
|
||||
Help: "Time spend by prover prove task.",
|
||||
Buckets: []float64{180, 300, 480, 600, 900, 1200, 1800},
|
||||
Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 20, 30, 60},
|
||||
}),
|
||||
validateFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_validate_failure_total",
|
||||
@@ -121,7 +121,7 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB, reg pro
|
||||
// HandleZkProof handle a ZkProof submitted from a prover.
|
||||
// For now only proving/verifying error will lead to setting status as skipped.
|
||||
// db/unmarshal errors will not because they are errors on the business logic side.
|
||||
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) error {
|
||||
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.ProofMsg) error {
|
||||
m.proofReceivedTotal.Inc()
|
||||
pk := ctx.GetString(coordinatorType.PublicKey)
|
||||
if len(pk) == 0 {
|
||||
@@ -141,10 +141,11 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName,
|
||||
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec)
|
||||
log.Info("handling zk proof", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proverTask.TaskType, "proof time", proofTimeSec)
|
||||
|
||||
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter); err != nil {
|
||||
if err = m.validator(ctx, proverTask, pk, proofMsg); err != nil {
|
||||
m.proofFailure(ctx, proverTask, types.ProverTaskFailureTypeValidatedFailed)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -161,7 +162,8 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
|
||||
if verifyErr != nil || !success {
|
||||
m.verifierFailureTotal.WithLabelValues(proverVersion).Inc()
|
||||
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
|
||||
|
||||
m.proofFailure(ctx, proverTask, types.ProverTaskFailureTypeVerifiedFailed)
|
||||
|
||||
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
@@ -186,26 +188,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, chunkHash string) error {
|
||||
batchHash, err := m.chunkOrm.GetChunkBatchHash(ctx, chunkHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allReady, err := m.chunkOrm.CheckIfBatchChunkProofsAreReady(ctx, batchHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if allReady {
|
||||
err := m.batchOrm.UpdateChunkProofsStatusByBatchHash(ctx, batchHash, types.ChunkProofsStatusReady)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) (err error) {
|
||||
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
m.validateFailureTotal.Inc()
|
||||
@@ -221,9 +204,9 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
// (ii) set the maximum failure retry times
|
||||
log.Warn(
|
||||
"cannot submit valid proof for a prover task twice",
|
||||
"taskType", proverTask.TaskType, "hash", proofMsg.ID,
|
||||
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
|
||||
"proverPublicKey", proverTask.ProverPublicKey,
|
||||
"proof type", proverTask.TaskType, "hash", proofMsg.ID,
|
||||
"prover name", proverTask.ProverName, "prover version", proverTask.ProverVersion,
|
||||
"prover pk", proverTask.ProverPublicKey,
|
||||
)
|
||||
return ErrValidatorFailureProverTaskCannotSubmitTwice
|
||||
}
|
||||
@@ -232,51 +215,81 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
if proofMsg.Status != message.StatusOk {
|
||||
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
|
||||
m.validateFailureProverTaskStatusNotOk.Inc()
|
||||
log.Info("proof generated by prover failed",
|
||||
"taskType", proofMsg.Type, "hash", proofMsg.ID,
|
||||
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
|
||||
"proverPublicKey", pk, "failureType", proofParameter.FailureType, "failureMessage", proofParameter.FailureMsg)
|
||||
log.Info("proof generated by prover failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "error", proofMsg.Error)
|
||||
|
||||
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proofMsg.Type, proofMsg.ID, pk, types.ProverProofInvalid); updateErr != nil {
|
||||
log.Error("proof generated by prover failed update prover task proving status failure", "proof id", proofMsg.ID,
|
||||
"prover name", proverTask.ProverName, "prover pk", pk, "prove type", proofMsg.Type, "error", proofMsg.Error)
|
||||
}
|
||||
return ErrValidatorFailureProofMsgStatusNotOk
|
||||
}
|
||||
|
||||
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, need skip it
|
||||
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
|
||||
m.validateFailureProverTaskTimeout.Inc()
|
||||
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "taskType", proverTask.TaskType,
|
||||
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
|
||||
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "proof type", proverTask.TaskType,
|
||||
"prover name", proverTask.ProverName, "prover public key", pk, "proof time", proofTimeSec)
|
||||
return ErrValidatorFailureProofTimeout
|
||||
}
|
||||
|
||||
// store the proof to prover task
|
||||
if updateTaskProofErr := m.updateProverTaskProof(ctx, pk, proofMsg); updateTaskProofErr != nil {
|
||||
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk,
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr)
|
||||
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "public key", pk,
|
||||
"prover name", proverTask.ProverName, "error", updateTaskProofErr)
|
||||
}
|
||||
|
||||
// if the batch/chunk have proved and verifier success, need skip this submit proof
|
||||
if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) {
|
||||
m.validateFailureProverTaskHaveVerifier.Inc()
|
||||
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID,
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk)
|
||||
"proof type", proverTask.TaskType, "prover name", proverTask.ProverName, "prover public key", pk)
|
||||
return ErrValidatorFailureTaskHaveVerifiedSuccess
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//func (m *ProofReceiverLogic) proofFailure(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
|
||||
// log.Info("proof failure update proof status", "hash", hash, "public key", pubKey,
|
||||
// "proof type", proofMsg.Type.String(), "status", types.ProvingTaskFailed.String())
|
||||
//
|
||||
// if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskFailed, 0); err != nil {
|
||||
// log.Error("failed to updated proof status ProvingTaskFailed", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
// }
|
||||
//}
|
||||
func (m *ProofReceiverLogic) proofFailure(ctx context.Context, proverTask *orm.ProverTask, failedType types.ProverTaskFailureType) {
|
||||
err := m.db.Transaction(func(tx *gorm.DB) error {
|
||||
// update prover task proving status as ProverProofInvalid
|
||||
if err := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, message.ProofType(proverTask.TaskType),
|
||||
proverTask.TaskID, proverTask.ProverPublicKey, types.ProverProofInvalid); err != nil {
|
||||
log.Error("update prover task proving status failure", "hash", proverTask.TaskID, "pubKey", proverTask.ProverPublicKey,
|
||||
"prover proving status", types.ProverProofInvalid, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update prover task failure type as ProverTaskFailureTypeVerifiedFailed
|
||||
if err := m.proverTaskOrm.UpdateProverTaskFailureType(ctx, message.ProofType(proverTask.TaskType),
|
||||
proverTask.TaskID, proverTask.ProverPublicKey, failedType, tx); err != nil {
|
||||
log.Error("update prover task failure type failure", "hash", proverTask.TaskID, "pubKey", proverTask.ProverPublicKey,
|
||||
"prover failure type", failedType, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if message.ProofType(proverTask.TaskType) == message.ProofTypeChunk {
|
||||
if err := m.chunkOrm.DecreaseActiveAttemptsByHash(ctx, proverTask.TaskID, tx); err != nil {
|
||||
log.Error("decrease active attempts of chunk failure", "hash", proverTask.TaskID, "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
if message.ProofType(proverTask.TaskType) == message.ProofTypeBatch {
|
||||
if err := m.batchOrm.DecreaseActiveAttemptsByHash(ctx, proverTask.TaskID, tx); err != nil {
|
||||
log.Error("decrease active attempts of batch failure", "hash", proverTask.TaskID, "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("failed to update proof status as failed", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
|
||||
log.Info("proof recover update proof status", "hash", hash, "proverPublicKey", pubKey,
|
||||
"taskType", proofMsg.Type.String(), "status", types.ProvingTaskUnassigned.String())
|
||||
log.Info("proof recover update proof status", "hash", hash, "public key", pubKey,
|
||||
"proof type", proofMsg.Type.String(), "status", types.ProvingTaskUnassigned.String())
|
||||
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskUnassigned, 0); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
@@ -284,11 +297,11 @@ func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubK
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg, proofTimeSec uint64) error {
|
||||
log.Info("proof close task update proof status", "hash", hash, "proverPublicKey", pubKey,
|
||||
"taskType", proofMsg.Type.String(), "status", types.ProvingTaskVerified.String())
|
||||
log.Info("proof close task update proof status", "hash", hash, "public key", pubKey,
|
||||
"proof type", proofMsg.Type.String(), "status", types.ProvingTaskVerified.String())
|
||||
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskVerified, proofTimeSec); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskVerified", "hash", hash, "proverPublicKey", pubKey, "error", err)
|
||||
log.Error("failed to updated proof status ProvingTaskVerified", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -332,30 +345,18 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string,
|
||||
switch proofMsg.Type {
|
||||
case message.ProofTypeChunk:
|
||||
if err := m.chunkOrm.UpdateProvingStatus(ctx, hash, status, tx); err != nil {
|
||||
log.Error("failed to update chunk proving_status as failed", "msg.ID", hash, "error", err)
|
||||
log.Error("failed to update chunk proving_status", "msg.ID", hash, "error", err)
|
||||
return err
|
||||
}
|
||||
case message.ProofTypeBatch:
|
||||
if err := m.batchOrm.UpdateProvingStatus(ctx, hash, status, tx); err != nil {
|
||||
log.Error("failed to update batch proving_status as failed", "msg.ID", hash, "error", err)
|
||||
log.Error("failed to update batch proving_status", "msg.ID", hash, "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if status == types.ProvingTaskVerified && proofMsg.Type == message.ProofTypeChunk {
|
||||
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, hash); checkReadyErr != nil {
|
||||
log.Error("failed to check are all chunk proofs ready", "error", checkReadyErr)
|
||||
return checkReadyErr
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) checkIsTaskSuccess(ctx context.Context, hash string, proofType message.ProofType) bool {
|
||||
|
||||
@@ -41,6 +41,8 @@ type Batch struct {
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
|
||||
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
|
||||
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
|
||||
TotalAttempts int16 `json:"total_attempts" gorm:"column:total_attempts;default:0"`
|
||||
ActiveAttempts int16 `json:"active_attempts" gorm:"column:active_attempts;default:0"`
|
||||
|
||||
// rollup
|
||||
RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"`
|
||||
@@ -191,6 +193,8 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
|
||||
BatchHeader: batchHeader.Encode(),
|
||||
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
TotalAttempts: 0,
|
||||
ActiveAttempts: 0,
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
}
|
||||
@@ -209,19 +213,6 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
|
||||
return &newBatch, nil
|
||||
}
|
||||
|
||||
// UpdateChunkProofsStatusByBatchHash updates the status of chunk_proofs_status field for a given batch hash.
|
||||
// The function will set the chunk_proofs_status to the status provided.
|
||||
func (o *Batch) UpdateChunkProofsStatusByBatchHash(ctx context.Context, batchHash string, status types.ChunkProofsStatus) error {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash = ?", batchHash)
|
||||
|
||||
if err := db.Update("chunk_proofs_status", status).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateChunkProofsStatusByBatchHash error: %w, batch hash: %v, status: %v", err, batchHash, status.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProvingStatus updates the proving status of a batch.
|
||||
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
@@ -275,28 +266,52 @@ func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *messa
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateUnassignedBatchReturning update the unassigned batch and return the update record
|
||||
func (o *Batch) UpdateUnassignedBatchReturning(ctx context.Context, limit int) ([]*Batch, error) {
|
||||
if limit < 0 {
|
||||
return nil, errors.New("limit must not be smaller than zero")
|
||||
// UpdateBatchAttemptsReturning atomically increments the attempts count for the earliest available batch that meets the conditions.
|
||||
func (o *Batch) UpdateBatchAttemptsReturning(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
subQueryDB := db.Model(&Batch{})
|
||||
subQueryDB = subQueryDB.Select("batch.index")
|
||||
// Lock the selected row to ensure atomic updates
|
||||
subQueryDB = subQueryDB.Clauses(clause.Locking{Strength: "UPDATE"})
|
||||
subQueryDB = subQueryDB.Where("batch.total_attempts < ?", maxTotalAttempts)
|
||||
subQueryDB = subQueryDB.Where("batch.active_attempts < ?", maxActiveAttempts)
|
||||
subQueryDB = subQueryDB.Joins("JOIN chunk ON chunk.batch_hash = batch.hash AND chunk.proving_status = ?", types.ProvingTaskVerified)
|
||||
subQueryDB = subQueryDB.Order("index ASC")
|
||||
subQueryDB = subQueryDB.Limit(1)
|
||||
|
||||
// Perform the update and return the modified batch
|
||||
var updatedBatch Batch
|
||||
db = db.Model(&updatedBatch).Clauses(clause.Returning{})
|
||||
db = db.Where("index = (?)", subQueryDB)
|
||||
result := db.Updates(map[string]interface{}{
|
||||
"total_attempts": gorm.Expr("total_attempts + ?", 1),
|
||||
"active_attempts": gorm.Expr("active_attempts + ?", 1),
|
||||
})
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, fmt.Errorf("failed to select and update batch, max active attempts: %v, max total attempts: %v, err: %w",
|
||||
maxActiveAttempts, maxTotalAttempts, result.Error)
|
||||
}
|
||||
if limit == 0 {
|
||||
|
||||
if result.RowsAffected == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
|
||||
subQueryDB := db.Model(&Batch{}).Select("index")
|
||||
subQueryDB = subQueryDB.Where("proving_status = ? AND chunk_proofs_status = ?", types.ProvingTaskUnassigned, types.ChunkProofsStatusReady)
|
||||
subQueryDB = subQueryDB.Order("index ASC")
|
||||
subQueryDB = subQueryDB.Limit(limit)
|
||||
|
||||
var batches []*Batch
|
||||
db = db.Model(&batches).Clauses(clause.Returning{})
|
||||
db = db.Where("index = (?)", subQueryDB)
|
||||
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
|
||||
if err := db.Update("proving_status", types.ProvingTaskAssigned).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.UpdateUnassignedBatchReturning error: %w", err)
|
||||
}
|
||||
return batches, nil
|
||||
return &updatedBatch, nil
|
||||
}
|
||||
|
||||
// DecreaseActiveAttemptsByHash decrements the active_attempts of a batch given its hash.
|
||||
func (o *Batch) DecreaseActiveAttemptsByHash(ctx context.Context, batchHash string, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash = ?", batchHash)
|
||||
|
||||
if err := db.UpdateColumn("active_attempts", gorm.Expr("active_attempts - ?", 1)).Error; err != nil {
|
||||
return fmt.Errorf("Batch.DecreaseActiveAttemptsByHash error: %w, batch hash: %v", err, batchHash)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -40,6 +40,8 @@ type Chunk struct {
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
|
||||
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
|
||||
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
|
||||
TotalAttempts int16 `json:"total_attempts" gorm:"column:total_attempts;default:0"`
|
||||
ActiveAttempts int16 `json:"active_attempts" gorm:"column:active_attempts;default:0"`
|
||||
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
@@ -168,19 +170,6 @@ func (o *Chunk) GetAssignedChunks(ctx context.Context) ([]*Chunk, error) {
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// CheckIfBatchChunkProofsAreReady checks if all proofs for all chunks of a given batchHash are collected.
|
||||
func (o *Chunk) CheckIfBatchChunkProofsAreReady(ctx context.Context, batchHash string) (bool, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("batch_hash = ? AND proving_status != ?", batchHash, types.ProvingTaskVerified)
|
||||
|
||||
var count int64
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return false, fmt.Errorf("Chunk.CheckIfBatchChunkProofsAreReady error: %w, batch hash: %v", err, batchHash)
|
||||
}
|
||||
return count == 0, nil
|
||||
}
|
||||
|
||||
// GetChunkBatchHash retrieves the batchHash of a given chunk.
|
||||
func (o *Chunk) GetChunkBatchHash(ctx context.Context, chunkHash string) (string, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
@@ -259,6 +248,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
|
||||
ParentChunkStateRoot: parentChunkStateRoot,
|
||||
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
TotalAttempts: 0,
|
||||
ActiveAttempts: 0,
|
||||
}
|
||||
|
||||
db := o.db
|
||||
@@ -341,32 +332,52 @@ func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, e
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateUnassignedChunkReturning update the unassigned batch which end_block_number <= height and return the update record
|
||||
func (o *Chunk) UpdateUnassignedChunkReturning(ctx context.Context, height, limit int) ([]*Chunk, error) {
|
||||
if height <= 0 {
|
||||
return nil, errors.New("Chunk.UpdateUnassignedBatchReturning error: height must be larger than zero")
|
||||
// UpdateChunkAttemptsReturning atomically increments the attempts count for the earliest available chunk that meets the conditions.
|
||||
func (o *Chunk) UpdateChunkAttemptsReturning(ctx context.Context, proverBlockHeight int, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
subQueryDB := db.Model(&Chunk{})
|
||||
subQueryDB = subQueryDB.Select("index")
|
||||
// Lock the selected row to ensure atomic updates
|
||||
subQueryDB = subQueryDB.Clauses(clause.Locking{Strength: "UPDATE"})
|
||||
subQueryDB = subQueryDB.Where("total_attempts < ?", maxTotalAttempts)
|
||||
subQueryDB = subQueryDB.Where("active_attempts < ?", maxActiveAttempts)
|
||||
subQueryDB = subQueryDB.Where("end_block_number <= ?", proverBlockHeight)
|
||||
subQueryDB = subQueryDB.Order("index ASC")
|
||||
subQueryDB = subQueryDB.Limit(1)
|
||||
|
||||
// Perform the update and return the modified chunk
|
||||
var updatedChunk Chunk
|
||||
db = db.Model(&updatedChunk).Clauses(clause.Returning{})
|
||||
db = db.Where("index = (?)", subQueryDB)
|
||||
result := db.Updates(map[string]interface{}{
|
||||
"total_attempts": gorm.Expr("total_attempts + ?", 1),
|
||||
"active_attempts": gorm.Expr("active_attempts + ?", 1),
|
||||
})
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, fmt.Errorf("failed to select and update batch, max active attempts: %v, max total attempts: %v, err: %w",
|
||||
maxActiveAttempts, maxTotalAttempts, result.Error)
|
||||
}
|
||||
if limit < 0 {
|
||||
return nil, errors.New("Chunk.UpdateUnassignedBatchReturning error: limit must not be smaller than zero")
|
||||
}
|
||||
if limit == 0 {
|
||||
|
||||
if result.RowsAffected == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
|
||||
subQueryDB := db.Model(&Chunk{}).Select("index")
|
||||
subQueryDB = subQueryDB.Where("proving_status = ?", types.ProvingTaskUnassigned)
|
||||
subQueryDB = subQueryDB.Where("end_block_number <= ?", height)
|
||||
subQueryDB = subQueryDB.Order("index ASC")
|
||||
subQueryDB = subQueryDB.Limit(limit)
|
||||
|
||||
var chunks []*Chunk
|
||||
db = db.Model(&chunks).Clauses(clause.Returning{})
|
||||
db = db.Where("index = (?)", subQueryDB)
|
||||
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
|
||||
if err := db.Update("proving_status", types.ProvingTaskAssigned).Error; err != nil {
|
||||
return nil, fmt.Errorf("Chunk.UpdateUnassignedBatchReturning error: %w", err)
|
||||
}
|
||||
return chunks, nil
|
||||
return &updatedChunk, nil
|
||||
}
|
||||
|
||||
// DecreaseActiveAttemptsByHash decrements the active_attempts of a chunk given its hash.
|
||||
func (o *Chunk) DecreaseActiveAttemptsByHash(ctx context.Context, chunkHash string, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("hash = ?", chunkHash)
|
||||
|
||||
if err := db.UpdateColumn("active_attempts", gorm.Expr("active_attempts - ?", 1)).Error; err != nil {
|
||||
return fmt.Errorf("Chunk.DecreaseActiveAttemptsByHash error: %w, chunk hash: %v", err, chunkHash)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -94,6 +94,24 @@ func (o *ProverTask) GetProverTasks(ctx context.Context, fields map[string]inter
|
||||
return proverTasks, nil
|
||||
}
|
||||
|
||||
// GetFailedTaskAssignmentCount returns the number of times a task with the specified TaskID has been assigned and failed.
|
||||
func (o *ProverTask) GetFailedTaskAssignmentCount(ctx context.Context, taskID string, dbTX ...*gorm.DB) (uint64, error) {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_id = ?", taskID)
|
||||
db = db.Where("proving_status = ?", types.ProverProofInvalid)
|
||||
|
||||
var count int64
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return 0, fmt.Errorf("ProverTask.GetTaskAssignmentCount failed, taskID: %v, err: %w", taskID, err)
|
||||
}
|
||||
return uint64(count), nil
|
||||
}
|
||||
|
||||
// GetProverTasksByHashes retrieves the ProverTask records associated with the specified hashes.
|
||||
// The returned prover task objects are sorted in ascending order by their ids.
|
||||
func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, hashes []string) ([]*ProverTask, error) {
|
||||
@@ -143,6 +161,20 @@ func (o *ProverTask) GetProvingStatusByTaskID(ctx context.Context, taskID string
|
||||
return types.ProverProveStatus(proverTask.ProvingStatus), nil
|
||||
}
|
||||
|
||||
// GetFailureTypeByTaskID retrieves the failure type of a prover task
|
||||
func (o *ProverTask) GetFailureTypeByTaskID(ctx context.Context, taskID string) (types.ProverTaskFailureType, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Select("failure_type")
|
||||
db = db.Where("task_id = ?", taskID)
|
||||
|
||||
var proverTask ProverTask
|
||||
if err := db.Find(&proverTask).Error; err != nil {
|
||||
return types.ProverTaskFailureTypeUndefined, fmt.Errorf("ProverTask.GetFailureTypeByTaskID error: %w, taskID: %v", err, taskID)
|
||||
}
|
||||
return types.ProverTaskFailureType(proverTask.FailureType), nil
|
||||
}
|
||||
|
||||
// GetTimeoutAssignedProverTasks get the timeout and assigned proving_status prover task
|
||||
func (o *ProverTask) GetTimeoutAssignedProverTasks(ctx context.Context, limit int, taskType message.ProofType, timeout time.Duration) ([]ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
@@ -160,32 +192,14 @@ func (o *ProverTask) GetTimeoutAssignedProverTasks(ctx context.Context, limit in
|
||||
return proverTasks, nil
|
||||
}
|
||||
|
||||
// TaskTimeoutMoreThanOnce get the timeout twice task. a temp design
|
||||
func (o *ProverTask) TaskTimeoutMoreThanOnce(ctx context.Context, taskID string) bool {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_id", taskID)
|
||||
db = db.Where("proving_status", int(types.ProverProofInvalid))
|
||||
|
||||
var count int64
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if count >= 1 {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// SetProverTask updates or inserts a ProverTask record.
|
||||
func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask, dbTX ...*gorm.DB) error {
|
||||
db := o.db.WithContext(ctx)
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}, {Name: "prover_version"}},
|
||||
|
||||
@@ -2,10 +2,8 @@ package types
|
||||
|
||||
// SubmitProofParameter the SubmitProof api request parameter
|
||||
type SubmitProofParameter struct {
|
||||
TaskID string `form:"task_id" json:"task_id" binding:"required"`
|
||||
TaskType int `form:"task_type" json:"task_type" binding:"required"`
|
||||
Status int `form:"status" json:"status"`
|
||||
Proof string `form:"proof" json:"proof"`
|
||||
FailureType int `form:"failure_type" json:"failure_type"`
|
||||
FailureMsg string `form:"failure_msg" json:"failure_msg"`
|
||||
TaskID string `form:"task_id" json:"task_id" binding:"required"`
|
||||
TaskType int `form:"task_type" json:"task_type" binding:"required"`
|
||||
Status int `form:"status" json:"status"`
|
||||
Proof string `form:"proof" json:"proof"`
|
||||
}
|
||||
|
||||
@@ -283,7 +283,7 @@ func testInvalidProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create mock provers.
|
||||
@@ -292,13 +292,17 @@ func testInvalidProof(t *testing.T) {
|
||||
var proofType message.ProofType
|
||||
if i%2 == 0 {
|
||||
proofType = message.ProofTypeChunk
|
||||
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType)
|
||||
proverTask := provers[i].getProverTask(t, proofType)
|
||||
assert.NotNil(t, proverTask)
|
||||
provers[i].submitProof(t, proverTask, verifiedSuccess, types.Success)
|
||||
} else {
|
||||
proofType = message.ProofTypeBatch
|
||||
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType)
|
||||
proverTask := provers[i].getProverTask(t, proofType)
|
||||
assert.NotNil(t, proverTask)
|
||||
provers[i].submitProof(t, proverTask, verifiedFailed, types.ErrCoordinatorHandleZkProofFailure)
|
||||
}
|
||||
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType)
|
||||
proverTask := provers[i].getProverTask(t, proofType)
|
||||
assert.NotNil(t, proverTask)
|
||||
provers[i].submitProof(t, proverTask, verifiedFailed, types.ErrCoordinatorHandleZkProofFailure)
|
||||
}
|
||||
|
||||
// verify proof status
|
||||
@@ -307,17 +311,25 @@ func testInvalidProof(t *testing.T) {
|
||||
tickStop = time.Tick(time.Minute)
|
||||
)
|
||||
|
||||
var chunkProofStatus types.ProvingStatus
|
||||
var batchProofStatus types.ProvingStatus
|
||||
var (
|
||||
chunkProofStatus types.ProverProveStatus
|
||||
batchProofStatus types.ProverProveStatus
|
||||
batchFailureType types.ProverTaskFailureType
|
||||
)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
chunkProofStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
batchProofStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskUnassigned && batchProofStatus == types.ProvingTaskUnassigned {
|
||||
batchFailureType, err = proverTaskOrm.GetFailureTypeByTaskID(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
|
||||
if chunkProofStatus == types.ProverProofValid &&
|
||||
batchProofStatus == types.ProverProofInvalid &&
|
||||
batchFailureType == types.ProverTaskFailureTypeVerifiedFailed {
|
||||
return
|
||||
}
|
||||
case <-tickStop:
|
||||
@@ -342,20 +354,12 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create mock provers.
|
||||
provers := make([]*mockProver, 2)
|
||||
provers := make([]*mockProver, 1)
|
||||
for i := 0; i < len(provers); i++ {
|
||||
var proofType message.ProofType
|
||||
if i%2 == 0 {
|
||||
proofType = message.ProofTypeChunk
|
||||
} else {
|
||||
proofType = message.ProofTypeBatch
|
||||
}
|
||||
proofType = message.ProofTypeChunk
|
||||
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType)
|
||||
proverTask := provers[i].getProverTask(t, proofType)
|
||||
assert.NotNil(t, proverTask)
|
||||
@@ -370,9 +374,7 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
|
||||
var (
|
||||
chunkProofStatus types.ProvingStatus
|
||||
batchProofStatus types.ProvingStatus
|
||||
chunkProverTaskProvingStatus types.ProverProveStatus
|
||||
batchProverTaskProvingStatus types.ProverProveStatus
|
||||
)
|
||||
|
||||
for {
|
||||
@@ -380,21 +382,17 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
case <-tick:
|
||||
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned {
|
||||
if chunkProofStatus == types.ProvingTaskAssigned {
|
||||
return
|
||||
}
|
||||
|
||||
chunkProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
batchProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProverTaskProvingStatus == types.ProverProofInvalid && batchProverTaskProvingStatus == types.ProverProofInvalid {
|
||||
if chunkProverTaskProvingStatus == types.ProverProofInvalid {
|
||||
return
|
||||
}
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String(), "batchProofStatus", batchProofStatus.String())
|
||||
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String())
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -414,32 +412,14 @@ func testTimeoutProof(t *testing.T) {
|
||||
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
|
||||
assert.NoError(t, err)
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create first chunk & batch mock prover, that will not send any proof.
|
||||
chunkProver1 := newMockProver(t, "prover_test"+strconv.Itoa(0), coordinatorURL, message.ProofTypeChunk)
|
||||
proverChunkTask := chunkProver1.getProverTask(t, message.ProofTypeChunk)
|
||||
assert.NotNil(t, proverChunkTask)
|
||||
|
||||
batchProver1 := newMockProver(t, "prover_test"+strconv.Itoa(1), coordinatorURL, message.ProofTypeBatch)
|
||||
proverBatchTask := batchProver1.getProverTask(t, message.ProofTypeBatch)
|
||||
assert.NotNil(t, proverBatchTask)
|
||||
|
||||
// verify proof status, it should be assigned, because prover didn't send any proof
|
||||
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, chunkProofStatus, types.ProvingTaskAssigned)
|
||||
|
||||
batchProofStatus, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, batchProofStatus, types.ProvingTaskAssigned)
|
||||
|
||||
// wait coordinator to reset the prover task proving status
|
||||
time.Sleep(time.Duration(conf.ProverManager.BatchCollectionTimeSec*2) * time.Second)
|
||||
time.Sleep(time.Duration(conf.ProverManager.ChunkCollectionTimeSec*2) * time.Second)
|
||||
|
||||
// create second mock prover, that will send valid proof.
|
||||
chunkProver2 := newMockProver(t, "prover_test"+strconv.Itoa(2), coordinatorURL, message.ProofTypeChunk)
|
||||
@@ -447,17 +427,8 @@ func testTimeoutProof(t *testing.T) {
|
||||
assert.NotNil(t, proverChunkTask2)
|
||||
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success)
|
||||
|
||||
batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch)
|
||||
proverBatchTask2 := batchProver2.getProverTask(t, message.ProofTypeBatch)
|
||||
assert.NotNil(t, proverBatchTask2)
|
||||
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success)
|
||||
|
||||
// verify proof status, it should be verified now, because second prover sent valid proof
|
||||
chunkProofStatus2, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, chunkProofStatus2, types.ProvingTaskVerified)
|
||||
|
||||
batchProofStatus2, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, batchProofStatus2, types.ProvingTaskVerified)
|
||||
}
|
||||
|
||||
@@ -24,6 +24,8 @@ create table chunk
|
||||
prover_assigned_at TIMESTAMP(0) DEFAULT NULL,
|
||||
proved_at TIMESTAMP(0) DEFAULT NULL,
|
||||
proof_time_sec INTEGER DEFAULT NULL,
|
||||
total_attempts SMALLINT NOT NULL DEFAULT 0,
|
||||
active_attempts SMALLINT NOT NULL DEFAULT 0,
|
||||
|
||||
-- batch
|
||||
batch_hash VARCHAR DEFAULT NULL,
|
||||
@@ -50,6 +52,10 @@ on chunk (hash) where deleted_at IS NULL;
|
||||
create index batch_hash_index
|
||||
on chunk (batch_hash) where deleted_at IS NULL;
|
||||
|
||||
create index chunk_idx_total_attempts_and_active_attempts_and_end_block_number
|
||||
on chunk (total_attempts, active_attempts, end_block_number)
|
||||
where deleted_at IS NULL;
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
|
||||
@@ -22,6 +22,8 @@ create table batch
|
||||
prover_assigned_at TIMESTAMP(0) DEFAULT NULL,
|
||||
proved_at TIMESTAMP(0) DEFAULT NULL,
|
||||
proof_time_sec INTEGER DEFAULT NULL,
|
||||
total_attempts SMALLINT NOT NULL DEFAULT 0,
|
||||
active_attempts SMALLINT NOT NULL DEFAULT 0,
|
||||
|
||||
-- rollup
|
||||
rollup_status SMALLINT NOT NULL DEFAULT 1,
|
||||
@@ -46,6 +48,10 @@ on batch (index) where deleted_at IS NULL;
|
||||
create unique index batch_hash_uindex
|
||||
on batch (hash) where deleted_at IS NULL;
|
||||
|
||||
create index batch_idx_total_attempts_and_active_attempts_and_chunk_proofs_status
|
||||
on batch (total_attempts, active_attempts, chunk_proofs_status)
|
||||
where deleted_at IS NULL;
|
||||
|
||||
comment
|
||||
on column batch.chunk_proofs_status is 'undefined, pending, ready';
|
||||
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
drop index l1_message_hash_uindex;
|
||||
|
||||
create index l1_message_hash_index
|
||||
on l1_message (msg_hash) where deleted_at IS NULL;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop index l1_message_hash_index;
|
||||
|
||||
create unique index l1_message_hash_uindex
|
||||
on l1_message (msg_hash) where deleted_at IS NULL;
|
||||
-- +goose StatementEnd
|
||||
@@ -1,6 +1,7 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
@@ -53,6 +55,14 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
defer func() {
|
||||
cancel()
|
||||
}()
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
|
||||
// init Prover Stats API
|
||||
port := ctx.String(httpPortFlag.Name)
|
||||
|
||||
|
||||
@@ -37,8 +37,11 @@ func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv
|
||||
SetBaseURL(cfg.BaseURL).
|
||||
AddRetryCondition(func(r *resty.Response, _ error) bool {
|
||||
// Check for HTTP 5xx errors, e.g., coordinator is restarting.
|
||||
log.Warn("Received unexpected HTTP response. Retrying...", "status code", r.StatusCode())
|
||||
return r.StatusCode() >= http.StatusInternalServerError
|
||||
if r.StatusCode() >= http.StatusInternalServerError {
|
||||
log.Warn("Received unexpected HTTP response. Retrying...", "status code", r.StatusCode())
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
log.Info("successfully initialized prover client",
|
||||
@@ -175,20 +178,17 @@ func (c *CoordinatorClient) SubmitProof(ctx context.Context, req *SubmitProofReq
|
||||
Post("/coordinator/v1/submit_proof")
|
||||
|
||||
if err != nil {
|
||||
log.Error("submit proof request failed: %v", err)
|
||||
return fmt.Errorf("submit proof request failed: %w", ConnectErr)
|
||||
return fmt.Errorf("submit proof request failed: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode() != 200 {
|
||||
log.Error("failed to submit proof, status code: %v", resp.StatusCode())
|
||||
return fmt.Errorf("failed to submit proof, status code not 200: %w", ConnectErr)
|
||||
return fmt.Errorf("failed to submit proof, status code: %v", resp.StatusCode())
|
||||
}
|
||||
|
||||
if result.ErrCode == types.ErrJWTTokenExpired {
|
||||
log.Info("JWT expired, attempting to re-login")
|
||||
if err := c.Login(ctx); err != nil {
|
||||
log.Error("JWT expired, re-login failed: %v", err)
|
||||
return fmt.Errorf("JWT expired, re-login failed: %w", ConnectErr)
|
||||
return fmt.Errorf("JWT expired, re-login failed: %v", err)
|
||||
}
|
||||
log.Info("re-login success")
|
||||
return c.SubmitProof(ctx, req)
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
var ConnectErr = errors.New("connect coordinator error")
|
||||
|
||||
// ChallengeResponse defines the response structure for random API
|
||||
type ChallengeResponse struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
@@ -57,12 +53,10 @@ type GetTaskResponse struct {
|
||||
|
||||
// SubmitProofRequest defines the request structure for the SubmitProof API.
|
||||
type SubmitProofRequest struct {
|
||||
TaskID string `json:"task_id"`
|
||||
TaskType int `json:"task_type"`
|
||||
Status int `json:"status"`
|
||||
Proof string `json:"proof"`
|
||||
FailureType int `json:"failure_type,omitempty"`
|
||||
FailureMsg string `json:"failure_msg,omitempty"`
|
||||
TaskID string `json:"task_id"`
|
||||
TaskType int `json:"task_type"`
|
||||
Status int `json:"status"`
|
||||
Proof string `json:"proof"`
|
||||
}
|
||||
|
||||
// SubmitProofResponse defines the response structure for the SubmitProof API.
|
||||
|
||||
@@ -151,9 +151,16 @@ func (r *Prover) proveAndSubmit() error {
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = r.stack.Delete(task.Task.ID)
|
||||
if err != nil {
|
||||
log.Error("prover stack pop failed!", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
var proofMsg *message.ProofDetail
|
||||
if task.Times <= 2 {
|
||||
// If tried times <= 2, try to proof the task.
|
||||
// If panic times <= 2, try to proof the task.
|
||||
if err = r.stack.UpdateTimes(task, task.Times+1); err != nil {
|
||||
return fmt.Errorf("failed to update times on stack: %v", err)
|
||||
}
|
||||
@@ -161,15 +168,15 @@ func (r *Prover) proveAndSubmit() error {
|
||||
log.Info("start to prove task", "task-type", task.Task.Type, "task-id", task.Task.ID)
|
||||
proofMsg, err = r.prove(task)
|
||||
if err != nil { // handling error from prove
|
||||
log.Error("failed to prove task", "task_type", task.Task.Type, "task-id", task.Task.ID, "err", err)
|
||||
return r.submitErr(task, true, message.ProofFailureNoPanic, err)
|
||||
return fmt.Errorf("failed to prove task, task-type: %v, err: %v", task.Task.Type, err)
|
||||
}
|
||||
|
||||
return r.submitProof(proofMsg)
|
||||
}
|
||||
|
||||
// if tried times >= 3, it's probably due to circuit proving panic
|
||||
log.Error("zk proving panic for task", "task-type", task.Task.Type, "task-id", task.Task.ID)
|
||||
return r.submitErr(task, message.ProofFailurePanic, errors.New("zk proving panic for task"))
|
||||
// when the prover has more than 3 times panic,
|
||||
// it will omit to prove the task, submit StatusProofError and then Delete the task.
|
||||
return fmt.Errorf("zk proving panic for task, task-type: %v, task-id: %v", task.Task.Type, task.Task.ID)
|
||||
}
|
||||
|
||||
// fetchTaskFromCoordinator fetches a new task from the server
|
||||
@@ -322,43 +329,10 @@ func (r *Prover) submitProof(msg *message.ProofDetail) error {
|
||||
|
||||
// send the submit request
|
||||
if err := r.coordinatorClient.SubmitProof(r.ctx, req); err != nil {
|
||||
if !errors.Is(errors.Unwrap(err), client.ConnectErr) {
|
||||
if deleteErr := r.stack.Delete(msg.ID); deleteErr != nil {
|
||||
log.Error("prover stack pop failed", "task_type", msg.Type, "task_id", msg.ID, "err", deleteErr)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("error submitting proof: %v", err)
|
||||
}
|
||||
|
||||
log.Info("proof submitted successfully", "task-id", msg.ID, "task-type", msg.Type, "task-status", msg.Status, "err", msg.Error)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Prover) submitErr(task *store.ProvingTask, isRetry bool, proofFailureType message.ProofFailureType, err error) error {
|
||||
// prepare the submit request
|
||||
req := &client.SubmitProofRequest{
|
||||
TaskID: task.Task.ID,
|
||||
TaskType: int(task.Task.Type),
|
||||
Status: int(message.StatusProofError),
|
||||
Proof: "",
|
||||
FailureType: int(proofFailureType),
|
||||
FailureMsg: err.Error(),
|
||||
}
|
||||
|
||||
// send the submit request
|
||||
if submitErr := r.coordinatorClient.SubmitProof(r.ctx, req); submitErr != nil {
|
||||
if !errors.Is(errors.Unwrap(err), client.ConnectErr) {
|
||||
if deleteErr := r.stack.Delete(task.Task.ID); deleteErr != nil {
|
||||
log.Error("prover stack pop failed", "task_type", task.Task.Type, "task_id", task.Task.ID, "err", deleteErr)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("error submitting proof: %v", submitErr)
|
||||
}
|
||||
|
||||
log.Info("proof submitted report failure successfully",
|
||||
"task-id", task.Task.ID, "task-type", task.Task.Type,
|
||||
"task-status", message.StatusProofError, "err", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user