Compare commits

..

14 Commits

Author SHA1 Message Date
georgehao
2764018553 feat: update 2023-08-18 16:46:24 +08:00
georgehao
4180105c45 feat: lint 2023-08-18 16:28:14 +08:00
georgehao
0292a772fd chore: auto version bump [bot] 2023-08-18 08:24:13 +00:00
georgehao
d2d75d8ee9 Merge branch 'develop' into fix/prover_bug 2023-08-18 16:23:55 +08:00
georgehao
fca6fc1579 feat: fix bug 2023-08-18 16:21:39 +08:00
Péter Garamvölgyi
8542a176ed fix: drop l1 message hash unique index (#823)
Co-authored-by: Thegaram <Thegaram@users.noreply.github.com>
2023-08-18 09:47:05 +02:00
Steven
ea40517527 fix: upgrade to use scroll-prover v0.6.2 (#821)
Co-authored-by: silathdiir <silathdiir@users.noreply.github.com>
2023-08-18 15:04:19 +08:00
georgehao
1f151f6ae5 feat(bridge): add metric bridge (#808)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2023-08-18 14:51:57 +08:00
georgehao
ca76835712 fix(coordinator): fix coordinator recover proving status from prover error (#820)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2023-08-18 12:26:36 +08:00
HAOYUatHZ
247dd24a8f fix(prover): prover should pop task each time it submits (#819)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-08-18 11:29:07 +08:00
Steven
b006eaa33e fix: upgrade to scroll-prover v0.6.0 (#817)
Co-authored-by: silathdiir <silathdiir@users.noreply.github.com>
2023-08-18 07:12:22 +08:00
HAOYUatHZ
ee4d7c3549 build: update Makefile (#818)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-08-18 05:42:13 +08:00
Péter Garamvölgyi
3c3f56b9b0 feat: allow overwriting bridge-history batch indexing start height (#816) 2023-08-17 17:25:15 +02:00
HAOYUatHZ
95121093c8 feat(prover): prover report err (#815)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
2023-08-17 21:19:08 +08:00
44 changed files with 878 additions and 282 deletions

View File

@@ -1,5 +1,7 @@
.PHONY: check update dev_docker build_test_docker run_test_docker clean
L2GETH_TAG=scroll-v4.3.34
help: ## Display this help message
@grep -h \
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
@@ -15,14 +17,14 @@ lint: ## The code's format and security checks.
update: ## update dependencies
go work sync
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/bridge-history-api/ && go get -u github.com/ethereum/go-ethereum@latest && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
goimports -local $(PWD)/bridge/ -w .
goimports -local $(PWD)/bridge-history-api/ -w .
goimports -local $(PWD)/common/ -w .

View File

@@ -95,6 +95,9 @@ func (b *BatchInfoFetcher) fetchBatchInfo() error {
} else {
startHeight = latestBatchHeight + 1
}
if startHeight < b.batchInfoStartNumber {
startHeight = b.batchInfoStartNumber
}
for from := startHeight; number >= from; from += fetchLimit {
to := from + fetchLimit - 1
// number - confirmation can never less than 0 since the for loop condition

View File

@@ -7,6 +7,7 @@ import (
"os/signal"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
@@ -59,8 +60,8 @@ func action(ctx *cli.Context) error {
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
registry := prometheus.DefaultRegisterer
metrics.Server(ctx, registry.(*prometheus.Registry))
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
@@ -72,8 +73,12 @@ func action(ctx *cli.Context) error {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
go utils.Loop(subCtx, 10*time.Second, func() {
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {

View File

@@ -7,6 +7,7 @@ import (
"os/signal"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
@@ -61,8 +62,8 @@ func action(ctx *cli.Context) error {
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
registry := prometheus.DefaultRegisterer
metrics.Server(ctx, registry.(*prometheus.Registry))
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
@@ -78,14 +79,14 @@ func action(ctx *cli.Context) error {
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */, registry)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err

View File

@@ -7,6 +7,7 @@ import (
"os/signal"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
@@ -59,10 +60,10 @@ func action(ctx *cli.Context) error {
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
registry := prometheus.DefaultRegisterer
metrics.Server(ctx, registry.(*prometheus.Registry))
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err

View File

@@ -7,6 +7,7 @@ import (
"os/signal"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
@@ -62,8 +63,8 @@ func action(ctx *cli.Context) error {
}
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
registry := prometheus.DefaultRegisterer
metrics.Server(ctx, registry.(*prometheus.Registry))
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
@@ -73,26 +74,26 @@ func action(ctx *cli.Context) error {
}
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis, registry)
if err != nil {
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
return err
}
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db)
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db, registry)
if err != nil {
log.Error("failed to create chunkProposer", "config file", cfgFile, "error", err)
return err
}
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db)
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db, registry)
if err != nil {
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
return err
}
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
// Watcher loop to fetch missing blocks
go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {

View File

@@ -5,6 +5,7 @@ go 1.19
require (
github.com/agiledragon/gomonkey/v2 v2.9.0
github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/prometheus/client_golang v1.14.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28
github.com/smartystreets/goconvey v1.8.0
github.com/stretchr/testify v1.8.3
@@ -13,6 +14,7 @@ require (
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
@@ -20,6 +22,7 @@ require (
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gopherjs/gopherjs v1.17.2 // indirect
@@ -35,8 +38,12 @@ require (
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
@@ -54,7 +61,7 @@ require (
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/time v0.3.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -2,6 +2,8 @@ github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/agiledragon/gomonkey/v2 v2.9.0 h1:PDiKKybR596O6FHW+RVSG0Z7uGCBNbmbUXh3uCNQ7Hc=
github.com/agiledragon/gomonkey/v2 v2.9.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
@@ -31,8 +33,14 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@@ -62,11 +70,8 @@ github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlT
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
@@ -77,6 +82,8 @@ github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APP
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
@@ -90,6 +97,14 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
@@ -140,6 +155,7 @@ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
@@ -157,9 +173,13 @@ golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=

View File

@@ -6,15 +6,13 @@ import (
"fmt"
"math/big"
// not sure if this will make problems when relay with l1geth
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
@@ -23,11 +21,6 @@ import (
"scroll-tech/bridge/internal/orm"
)
var (
bridgeL1MsgsRelayedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/total", metrics.ScrollRegistry)
bridgeL1MsgsRelayedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
)
// Layer1Relayer is responsible for
// 1. fetch pending L1Message from db
// 2. relay pending message to layer 2 node
@@ -54,17 +47,18 @@ type Layer1Relayer struct {
l1MessageOrm *orm.L1Message
l1BlockOrm *orm.L1Block
metrics *l1RelayerMetrics
}
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey)
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, reg prometheus.Registerer) (*Layer1Relayer, error) {
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey, "l1_relayer", "message_sender", reg)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new message sender failed for address %s, err: %v", addr.Hex(), err)
}
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey)
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l1_relayer", "gas_oracle_sender", reg)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %v", addr.Hex(), err)
@@ -86,6 +80,7 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
}
l1Relayer := &Layer1Relayer{
cfg: cfg,
ctx: ctx,
l1MessageOrm: orm.NewL1Message(db),
l1BlockOrm: orm.NewL1Block(db),
@@ -100,10 +95,10 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
cfg: cfg,
}
l1Relayer.metrics = initL1RelayerMetrics(reg)
go l1Relayer.handleConfirmLoop(ctx)
return l1Relayer, nil
}
@@ -123,7 +118,9 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
for _, msg := range msgs {
tmpMsg := msg
r.metrics.bridgeL1RelayedMsgsTotal.Inc()
if err = r.processSavedEvent(&tmpMsg); err != nil {
r.metrics.bridgeL1RelayedMsgsFailureTotal.Inc()
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
}
@@ -145,7 +142,6 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
if err != nil {
return err
}
bridgeL1MsgsRelayedTotalCounter.Inc(1)
log.Info("relayMessage to layer2", "msg hash", msg.MsgHash, "tx hash", hash)
err = r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
@@ -157,6 +153,7 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
// ProcessGasPriceOracle imports gas price to layer2
func (r *Layer1Relayer) ProcessGasPriceOracle() {
r.metrics.bridgeL1RelayerGasPriceOraclerRunTotal.Inc()
latestBlockHeight, err := r.l1BlockOrm.GetLatestL1BlockHeight(r.ctx)
if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
@@ -201,6 +198,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
return
}
r.lastGasPrice = block.BaseFee
r.metrics.bridgeL1RelayerLastGasPrice.Set(float64(r.lastGasPrice))
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee)
}
}
@@ -212,7 +210,7 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
case <-ctx.Done():
return
case cfm := <-r.messageSender.ConfirmChan():
bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1)
r.metrics.bridgeL1MsgsRelayedConfirmedTotal.Inc()
if !cfm.IsSuccessful {
err := r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
if err != nil {
@@ -228,6 +226,7 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
case cfm := <-r.gasOracleSender.ConfirmChan():
r.metrics.bridgeL1MsgsRelayedConfirmedTotal.Inc()
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())

View File

@@ -0,0 +1,54 @@
package relayer
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l1RelayerMetrics struct {
bridgeL1RelayedMsgsTotal prometheus.Counter
bridgeL1RelayedMsgsFailureTotal prometheus.Counter
bridgeL1RelayerGasPriceOraclerRunTotal prometheus.Counter
bridgeL1RelayerLastGasPrice prometheus.Gauge
bridgeL1MsgsRelayedConfirmedTotal prometheus.Counter
bridgeL1GasOraclerConfirmedTotal prometheus.Counter
}
var (
initL1RelayerMetricOnce sync.Once
l1RelayerMetric *l1RelayerMetrics
)
func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
initL1RelayerMetricOnce.Do(func() {
l1RelayerMetric = &l1RelayerMetrics{
bridgeL1RelayedMsgsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_msg_relayed_total",
Help: "The total number of the l1 relayed message.",
}),
bridgeL1RelayedMsgsFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_msg_relayed_failure_total",
Help: "The total number of the l1 relayed failure message.",
}),
bridgeL1MsgsRelayedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_relayed_confirmed_total",
Help: "The total number of layer1 relayed confirmed",
}),
bridgeL1RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_gas_price_oracler_total",
Help: "The total number of layer1 gas price oracler run total",
}),
bridgeL1RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_layer1_gas_price_latest_gas_price",
Help: "The latest gas price of bridge relayer l1",
}),
bridgeL1GasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_gas_oracler_confirmed_total",
Help: "The total number of layer1 relayed confirmed",
}),
}
})
return l1RelayerMetric
}

View File

@@ -62,7 +62,7 @@ func setupL1RelayerDB(t *testing.T) *gorm.DB {
func testCreateNewL1Relayer(t *testing.T) {
db := setupL1RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
}
@@ -72,7 +72,7 @@ func testL1RelayerProcessSaveEvents(t *testing.T) {
defer database.CloseDB(db)
l1MessageOrm := orm.NewL1Message(db)
l1Cfg := cfg.L1Config
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
assert.NoError(t, l1MessageOrm.SaveL1Messages(context.Background(), templateL1Message))
@@ -99,7 +99,7 @@ func testL1RelayerMsgConfirm(t *testing.T) {
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
assert.NoError(t, err)
// Simulate message confirmations.
@@ -138,7 +138,7 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
assert.NoError(t, err)
// Simulate message confirmations.
@@ -168,7 +168,7 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
assert.NoError(t, err)
assert.NotNil(t, l1Relayer)

View File

@@ -8,16 +8,15 @@ import (
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
@@ -26,13 +25,6 @@ import (
"scroll-tech/bridge/internal/orm"
)
var (
bridgeL2BatchesFinalizedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry)
bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry)
)
// Layer2Relayer is responsible for
// 1. Committing and finalizing L2 blocks on L1
// 2. Relaying messages from L2 to L1
@@ -78,29 +70,30 @@ type Layer2Relayer struct {
// A list of processing batch finalization.
// key(string): confirmation ID, value(string): batch hash.
processingFinalization sync.Map
metrics *l2RelayerMetrics
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool) (*Layer2Relayer, error) {
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey)
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool, reg prometheus.Registerer) (*Layer2Relayer, error) {
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey, "l2_relayer", "message_sender", reg)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new message sender failed for address %s, err: %w", addr.Hex(), err)
}
commitSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderPrivateKey)
commitSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderPrivateKey, "l2_relayer", "commit_sender", reg)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.CommitSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new commit sender failed for address %s, err: %w", addr.Hex(), err)
}
finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderPrivateKey)
finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderPrivateKey, "l2_relayer", "finalize_sender", reg)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.FinalizeSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new finalize sender failed for address %s, err: %w", addr.Hex(), err)
}
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey)
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l2_relayer", "gas_oracle_sender", reg)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %w", addr.Hex(), err)
@@ -158,6 +151,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
return nil, fmt.Errorf("failed to initialize and commit genesis batch, err: %v", err)
}
}
layer2Relayer.metrics = initL2RelayerMetrics(reg)
go layer2Relayer.handleConfirmLoop(ctx)
return layer2Relayer, nil
@@ -275,6 +269,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
// ProcessGasPriceOracle imports gas price to layer1
func (r *Layer2Relayer) ProcessGasPriceOracle() {
r.metrics.bridgeL2RelayerGasPriceOraclerRunTotal.Inc()
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
if batch == nil || err != nil {
log.Error("Failed to GetLatestBatch", "batch", batch, "err", err)
@@ -312,6 +307,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
return
}
r.lastGasPrice = suggestGasPriceUint64
r.metrics.bridgeL2RelayerLastGasPrice.Set(float64(r.lastGasPrice))
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
}
}
@@ -326,6 +322,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
return
}
for _, batch := range pendingBatches {
r.metrics.bridgeL2RelayerProcessPendingBatchTotal.Inc()
// get current header and parent header.
currentBatchHeader, err := types.DecodeBatchHeader(batch.BatchHeader)
if err != nil {
@@ -400,7 +397,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batch.Hash, "index", batch.Index, "err", err)
return
}
bridgeL2BatchesCommittedTotalCounter.Inc(1)
r.metrics.bridgeL2RelayerProcessPendingBatchSuccessTotal.Inc()
r.processingCommitment.Store(txID, batch.Hash)
log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex())
}
@@ -424,6 +421,8 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
return
}
r.metrics.bridgeL2RelayerProcessCommittedBatchesTotal.Inc()
batch := batches[0]
hash := batch.Hash
status := types.ProvingStatus(batch.ProvingStatus)
@@ -437,6 +436,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
return
case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash)
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
var parentBatchStateRoot string
if batch.Index > 0 {
@@ -495,7 +495,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
}
return
}
bridgeL2BatchesFinalizedTotalCounter.Inc(1)
log.Info("finalizeBatchWithProof in layer1", "index", batch.Index, "batch hash", batch.Hash, "tx hash", hash)
// record and sync with db, @todo handle db error
@@ -506,6 +505,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
"tx hash", finalizeTxHash.String(), "err", err)
}
r.processingFinalization.Store(txID, hash)
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
case types.ProvingTaskFailed:
// We were unable to prove this batch. There are two possibilities:
@@ -550,7 +550,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
"batch hash", batchHash.(string),
"tx hash", confirmation.TxHash.String(), "err", err)
}
bridgeL2BatchesCommittedConfirmedTotalCounter.Inc(1)
r.metrics.bridgeL2BatchesCommittedConfirmedTotal.Inc()
r.processingCommitment.Delete(confirmation.ID)
}
@@ -572,7 +572,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
"batch hash", batchHash.(string),
"tx hash", confirmation.TxHash.String(), "err", err)
}
bridgeL2BatchesFinalizedConfirmedTotalCounter.Inc(1)
r.metrics.bridgeL2BatchesFinalizedConfirmedTotal.Inc()
r.processingFinalization.Delete(confirmation.ID)
}
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
@@ -590,6 +590,7 @@ func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
case confirmation := <-r.finalizeSender.ConfirmChan():
r.handleConfirmation(confirmation)
case cfm := <-r.gasOracleSender.ConfirmChan():
r.metrics.bridgeL2BatchesGasOraclerConfirmedTotal.Inc()
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())

View File

@@ -0,0 +1,74 @@
package relayer
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l2RelayerMetrics struct {
bridgeL2RelayerProcessPendingBatchTotal prometheus.Counter
bridgeL2RelayerProcessPendingBatchSuccessTotal prometheus.Counter
bridgeL2RelayerGasPriceOraclerRunTotal prometheus.Counter
bridgeL2RelayerLastGasPrice prometheus.Gauge
bridgeL2RelayerProcessCommittedBatchesTotal prometheus.Counter
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal prometheus.Counter
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal prometheus.Counter
bridgeL2BatchesCommittedConfirmedTotal prometheus.Counter
bridgeL2BatchesFinalizedConfirmedTotal prometheus.Counter
bridgeL2BatchesGasOraclerConfirmedTotal prometheus.Counter
}
var (
initL2RelayerMetricOnce sync.Once
l2RelayerMetric *l2RelayerMetrics
)
func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
initL2RelayerMetricOnce.Do(func() {
l2RelayerMetric = &l2RelayerMetrics{
bridgeL2RelayerProcessPendingBatchTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_pending_batch_total",
Help: "The total number of layer2 process pending batch",
}),
bridgeL2RelayerProcessPendingBatchSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_pending_batch_success_total",
Help: "The total number of layer2 process pending success batch",
}),
bridgeL2RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_gas_price_oracler_total",
Help: "The total number of layer2 gas price oracler run total",
}),
bridgeL2RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_layer2_gas_price_latest_gas_price",
Help: "The latest gas price of bridge relayer l2",
}),
bridgeL2RelayerProcessCommittedBatchesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_total",
Help: "The total number of layer2 process committed batches run total",
}),
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_finalized_total",
Help: "The total number of layer2 process committed batches finalized total",
}),
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_finalized_success_total",
Help: "The total number of layer2 process committed batches finalized success total",
}),
bridgeL2BatchesCommittedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_confirmed_total",
Help: "The total number of layer2 process committed batches confirmed total",
}),
bridgeL2BatchesFinalizedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_finalized_batches_confirmed_total",
Help: "The total number of layer2 process finalized batches confirmed total",
}),
bridgeL2BatchesGasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_gras_oracler_confirmed_total",
Help: "The total number of layer2 process finalized batches confirmed total",
}),
}
})
return l2RelayerMetric
}

View File

@@ -35,7 +35,7 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
func testCreateNewRelayer(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
}
@@ -45,7 +45,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, nil)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
@@ -73,7 +73,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, nil)
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
@@ -114,7 +114,7 @@ func testL2RelayerCommitConfirm(t *testing.T) {
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
assert.NoError(t, err)
// Simulate message confirmations.
@@ -164,7 +164,7 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
assert.NoError(t, err)
// Simulate message confirmations.
@@ -221,7 +221,7 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
assert.NoError(t, err)
// Simulate message confirmations.
@@ -259,7 +259,7 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)

View File

@@ -0,0 +1,95 @@
package sender
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type senderMetrics struct {
senderCheckBalancerTotal *prometheus.CounterVec
senderCheckPendingTransactionTotal *prometheus.CounterVec
sendTransactionTotal *prometheus.CounterVec
sendTransactionFailureFullTx *prometheus.GaugeVec
sendTransactionFailureRepeatTransaction *prometheus.CounterVec
sendTransactionFailureGetFee *prometheus.CounterVec
sendTransactionFailureSendTx *prometheus.CounterVec
resubmitTransactionTotal *prometheus.CounterVec
currentPendingTxsNum *prometheus.GaugeVec
currentGasFeeCap *prometheus.GaugeVec
currentGasTipCap *prometheus.GaugeVec
currentGasPrice *prometheus.GaugeVec
currentGasLimit *prometheus.GaugeVec
currentNonce *prometheus.GaugeVec
}
var (
initSenderMetricOnce sync.Once
sm *senderMetrics
)
func initSenderMetrics(reg prometheus.Registerer) *senderMetrics {
initSenderMetricOnce.Do(func() {
sm = &senderMetrics{
sendTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_total",
Help: "The total number of sending transaction.",
}, []string{"service", "name"}),
sendTransactionFailureFullTx: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_send_transaction_full_tx_failure_total",
Help: "The total number of sending transaction failure for full size tx.",
}, []string{"service", "name"}),
sendTransactionFailureRepeatTransaction: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_repeat_transaction_failure_total",
Help: "The total number of sending transaction failure for repeat transaction.",
}, []string{"service", "name"}),
sendTransactionFailureGetFee: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_get_fee_failure_total",
Help: "The total number of sending transaction failure for getting fee.",
}, []string{"service", "name"}),
sendTransactionFailureSendTx: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_send_tx_failure_total",
Help: "The total number of sending transaction failure for sending tx.",
}, []string{"service", "name"}),
resubmitTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_resubmit_send_transaction_total",
Help: "The total number of resubmit transaction.",
}, []string{"service", "name"}),
currentPendingTxsNum: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_pending_tx_count",
Help: "The pending tx count in the sender.",
}, []string{"service", "name"}),
currentGasFeeCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_gas_fee_cap",
Help: "The gas fee of current transaction.",
}, []string{"service", "name"}),
currentGasTipCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_gas_tip_cap",
Help: "The gas tip of current transaction.",
}, []string{"service", "name"}),
currentGasPrice: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_gas_price_cap",
Help: "The gas price of current transaction.",
}, []string{"service", "name"}),
currentGasLimit: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_gas_limit",
Help: "The gas limit of current transaction.",
}, []string{"service", "name"}),
currentNonce: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_nonce",
Help: "The nonce of current transaction.",
}, []string{"service", "name"}),
senderCheckPendingTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_check_pending_transaction_total",
Help: "The total number of check pending transaction.",
}, []string{"service", "name"}),
senderCheckBalancerTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_check_balancer_total",
Help: "The total number of check balancer.",
}, []string{"service", "name"}),
}
})
return sm
}

View File

@@ -11,6 +11,7 @@ import (
"time"
cmapV2 "github.com/orcaman/concurrent-map/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
@@ -70,6 +71,8 @@ type Sender struct {
client *ethclient.Client // The client to retrieve on chain data or send transaction.
chainID *big.Int // The chain id of the endpoint
ctx context.Context
service string
name string
auth *bind.TransactOpts
minBalance *big.Int
@@ -80,11 +83,13 @@ type Sender struct {
confirmCh chan *Confirmation
stopCh chan struct{}
metrics *senderMetrics
}
// NewSender returns a new instance of transaction sender
// txConfirmationCh is used to notify confirmed transaction
func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.PrivateKey) (*Sender, error) {
func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.PrivateKey, service, name string, reg prometheus.Registerer) (*Sender, error) {
client, err := ethclient.Dial(config.Endpoint)
if err != nil {
return nil, fmt.Errorf("failed to dial eth client, err: %w", err)
@@ -135,7 +140,10 @@ func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.Pri
baseFeePerGas: baseFeePerGas,
pendingTxs: cmapV2.New[*PendingTransaction](),
stopCh: make(chan struct{}),
name: name,
service: service,
}
sender.metrics = initSenderMetrics(reg)
go sender.loop(ctx)
@@ -183,10 +191,15 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
// SendTransaction send a signed L2tL1 transaction.
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (common.Hash, error) {
s.metrics.sendTransactionTotal.WithLabelValues(s.service, s.name).Inc()
if s.IsFull() {
s.metrics.sendTransactionFailureFullTx.WithLabelValues(s.service, s.name).Set(1)
return common.Hash{}, ErrFullPending
}
s.metrics.sendTransactionFailureFullTx.WithLabelValues(s.service, s.name).Set(0)
if ok := s.pendingTxs.SetIfAbsent(ID, nil); !ok {
s.metrics.sendTransactionFailureRepeatTransaction.WithLabelValues(s.service, s.name).Inc()
return common.Hash{}, fmt.Errorf("repeat transaction ID: %s", ID)
}
@@ -203,9 +216,12 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
}()
if feeData, err = s.getFeeData(s.auth, target, value, data, minGasLimit); err != nil {
s.metrics.sendTransactionFailureGetFee.WithLabelValues(s.service, s.name).Inc()
return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err)
}
if tx, err = s.createAndSendTx(s.auth, feeData, target, value, data, nil); err != nil {
s.metrics.sendTransactionFailureSendTx.WithLabelValues(s.service, s.name).Inc()
return common.Hash{}, fmt.Errorf("failed to create and send transaction, err: %w", err)
}
@@ -294,6 +310,20 @@ func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, targ
return nil, err
}
if feeData.gasTipCap != nil {
s.metrics.currentGasTipCap.WithLabelValues(s.service, s.name).Set(float64(feeData.gasTipCap.Uint64()))
}
if feeData.gasFeeCap != nil {
s.metrics.currentGasFeeCap.WithLabelValues(s.service, s.name).Set(float64(feeData.gasFeeCap.Uint64()))
}
if feeData.gasPrice != nil {
s.metrics.currentGasPrice.WithLabelValues(s.service, s.name).Set(float64(feeData.gasPrice.Uint64()))
}
s.metrics.currentGasLimit.WithLabelValues(s.service, s.name).Set(float64(feeData.gasLimit))
// update nonce when it is not from resubmit
if overrideNonce == nil {
auth.Nonce = big.NewInt(int64(nonce + 1))
@@ -362,6 +392,7 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
}
nonce := tx.Nonce()
s.metrics.resubmitTransactionTotal.WithLabelValues(s.service, s.name).Inc()
return s.createAndSendTx(auth, feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
}
@@ -472,6 +503,7 @@ func (s *Sender) loop(ctx context.Context) {
for {
select {
case <-checkTick.C:
s.metrics.senderCheckPendingTransactionTotal.WithLabelValues(s.service, s.name).Inc()
header, err := s.client.HeaderByNumber(s.ctx, nil)
if err != nil {
log.Error("failed to get latest head", "err", err)
@@ -486,6 +518,7 @@ func (s *Sender) loop(ctx context.Context) {
s.checkPendingTransaction(header, confirmed)
case <-checkBalanceTicker.C:
s.metrics.senderCheckBalancerTotal.WithLabelValues(s.service, s.name).Inc()
// Check and set balance.
if err := s.checkBalance(ctx); err != nil {
log.Error("check balance error", "err", err)

View File

@@ -70,7 +70,7 @@ func testNewSender(t *testing.T) {
// exit by Stop()
cfgCopy1 := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy1.TxType = txType
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey)
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey, "test", "test", nil)
assert.NoError(t, err)
newSender1.Stop()
@@ -78,7 +78,7 @@ func testNewSender(t *testing.T) {
cfgCopy2 := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy2.TxType = txType
subCtx, cancel := context.WithCancel(context.Background())
_, err = NewSender(subCtx, &cfgCopy2, privateKey)
_, err = NewSender(subCtx, &cfgCopy2, privateKey, "test", "test", nil)
assert.NoError(t, err)
cancel()
}
@@ -90,7 +90,7 @@ func testPendLimit(t *testing.T) {
cfgCopy.TxType = txType
cfgCopy.Confirmations = rpc.LatestBlockNumber
cfgCopy.PendingLimit = 2
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey)
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
assert.NoError(t, err)
for i := 0; i < 2*newSender.PendingLimit(); i++ {
@@ -107,7 +107,7 @@ func testMinGasLimit(t *testing.T) {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
cfgCopy.Confirmations = rpc.LatestBlockNumber
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey)
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
assert.NoError(t, err)
client, err := ethclient.Dial(cfgCopy.Endpoint)
@@ -135,7 +135,7 @@ func testResubmitTransaction(t *testing.T) {
for _, txType := range txTypes {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey)
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
assert.NoError(t, err)
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
feeData, err := s.getFeeData(s.auth, &common.Address{}, big.NewInt(0), nil, 0)
@@ -151,7 +151,7 @@ func testResubmitTransactionWithRisingBaseFee(t *testing.T) {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey)
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
assert.NoError(t, err)
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
s.baseFeePerGas = 1000
@@ -186,7 +186,7 @@ func testCheckPendingTransaction(t *testing.T) {
for _, txType := range txTypes {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey)
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
assert.NoError(t, err)
header := &types.Header{Number: big.NewInt(100), BaseFee: big.NewInt(100)}

View File

@@ -5,6 +5,8 @@ import (
"fmt"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
@@ -29,10 +31,20 @@ type BatchProposer struct {
minChunkNumPerBatch uint64
batchTimeoutSec uint64
gasCostIncreaseMultiplier float64
batchProposerCircleTotal prometheus.Counter
proposeBatchFailureTotal prometheus.Counter
proposeBatchUpdateInfoTotal prometheus.Counter
proposeBatchUpdateInfoFailureTotal prometheus.Counter
totalL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
batchChunksNum prometheus.Gauge
batchFirstChunkTimeoutReached prometheus.Counter
batchChunksSuperposeNotEnoughTotal prometheus.Counter
}
// NewBatchProposer creates a new BatchProposer instance.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB) *BatchProposer {
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
return &BatchProposer{
ctx: ctx,
db: db,
@@ -45,22 +57,63 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
minChunkNumPerBatch: cfg.MinChunkNumPerBatch,
batchTimeoutSec: cfg.BatchTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_circle_total",
Help: "Total number of propose batch total.",
}),
proposeBatchFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_failure_circle_total",
Help: "Total number of propose batch total.",
}),
proposeBatchUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_update_info_total",
Help: "Total number of propose batch update info total.",
}),
proposeBatchUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_update_info_failure_total",
Help: "Total number of propose batch update info failure total.",
}),
totalL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_batch_total_l1_commit_gas",
Help: "The total l1 commit gas",
}),
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_batch_total_l1_call_data_size",
Help: "The total l1 call data size",
}),
batchChunksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_batch_chunks_number",
Help: "The number of chunks in the batch",
}),
batchFirstChunkTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_first_chunk_timeout_reached_total",
Help: "Total times of batch's first chunk timeout reached",
}),
batchChunksSuperposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_chunks_superpose_not_enough_total",
Help: "Total number of batch chunk superpose not enough",
}),
}
}
// TryProposeBatch tries to propose a new batches.
func (p *BatchProposer) TryProposeBatch() {
p.batchProposerCircleTotal.Inc()
dbChunks, err := p.proposeBatchChunks()
if err != nil {
p.proposeBatchFailureTotal.Inc()
log.Error("proposeBatchChunks failed", "err", err)
return
}
if err := p.updateBatchInfoInDB(dbChunks); err != nil {
p.proposeBatchUpdateInfoFailureTotal.Inc()
log.Error("update batch info in db failed", "err", err)
}
}
func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
p.proposeBatchUpdateInfoTotal.Inc()
numChunks := len(dbChunks)
if numChunks <= 0 {
return nil
@@ -77,10 +130,12 @@ func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
err = p.db.Transaction(func(dbTX *gorm.DB) error {
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, startChunkIndex, endChunkIndex, startChunkHash, endChunkHash, chunks, dbTX)
if dbErr != nil {
log.Warn("BatchProposer.updateBatchInfoInDB insert batch failure", "error", "start chunk index", startChunkIndex, "end chunk index", endChunkIndex, dbErr)
return dbErr
}
dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, startChunkIndex, endChunkIndex, batch.Hash, dbTX)
if dbErr != nil {
log.Warn("BatchProposer.UpdateBatchHashInRange update the chunk's batch hash failure", "hash", batch.Hash, "error", dbErr)
return dbErr
}
return nil
@@ -132,6 +187,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
// batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
// Check if the first chunk breaks hard limits.
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
@@ -144,6 +200,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
)
}
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
@@ -174,6 +231,8 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
}
}
p.batchChunksNum.Set(float64(len(dbChunks)))
var hasChunkTimeout bool
currentTimeSec := uint64(time.Now().Unix())
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
@@ -183,12 +242,14 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
"chunk outdated time threshold", currentTimeSec,
)
hasChunkTimeout = true
p.batchFirstChunkTimeoutReached.Inc()
}
if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch {
log.Warn("The chunk number of the batch is less than the minimum limit",
"chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch,
)
p.batchChunksSuperposeNotEnoughTotal.Inc()
return nil, nil
}
return dbChunks, nil

View File

@@ -30,7 +30,7 @@ func testBatchProposer(t *testing.T) {
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db)
}, db, nil)
cp.TryProposeChunk()
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
@@ -39,7 +39,7 @@ func testBatchProposer(t *testing.T) {
MaxL1CommitCalldataSizePerBatch: 1000000,
MinChunkNumPerBatch: 1,
BatchTimeoutSec: 300,
}, db)
}, db, nil)
bp.TryProposeBatch()
chunkOrm := orm.NewChunk(db)

View File

@@ -6,6 +6,8 @@ import (
"fmt"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
@@ -57,10 +59,23 @@ type ChunkProposer struct {
maxRowConsumptionPerChunk uint64
chunkTimeoutSec uint64
gasCostIncreaseMultiplier float64
chunkProposerCircleTotal prometheus.Counter
proposeChunkFailureTotal prometheus.Counter
proposeChunkUpdateInfoTotal prometheus.Counter
proposeChunkUpdateInfoFailureTotal prometheus.Counter
chunkL2TxNum prometheus.Gauge
chunkEstimateL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
totalTxGasUsed prometheus.Gauge
maxTxConsumption prometheus.Gauge
chunkBlocksNum prometheus.Gauge
chunkBlockTimeoutReached prometheus.Counter
chunkBlocksSuperposeNotEnoughTotal prometheus.Counter
}
// NewChunkProposer creates a new ChunkProposer instance.
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB) *ChunkProposer {
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
return &ChunkProposer{
ctx: ctx,
db: db,
@@ -74,18 +89,70 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
chunkTimeoutSec: cfg.ChunkTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
chunkProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_circle_total",
Help: "Total number of propose chunk total.",
}),
proposeChunkFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_failure_circle_total",
Help: "Total number of propose chunk failure total.",
}),
proposeChunkUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_update_info_total",
Help: "Total number of propose chunk update info total.",
}),
proposeChunkUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_update_info_failure_total",
Help: "Total number of propose chunk update info failure total.",
}),
chunkL2TxNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_l2_tx_num",
Help: "The chunk l2 tx num",
}),
chunkEstimateL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_estimate_l1_commit_gas",
Help: "The chunk estimate l1 commit gas",
}),
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_total_l1_commit_call_data_size",
Help: "The total l1 commit call data size",
}),
totalTxGasUsed: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_total_tx_gas_used",
Help: "The total tx gas used",
}),
maxTxConsumption: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_max_tx_consumption",
Help: "The max tx consumption",
}),
chunkBlocksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_chunk_block_number",
Help: "The number of blocks in the chunk",
}),
chunkBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_first_block_timeout_reached_total",
Help: "Total times of chunk's first block timeout reached",
}),
chunkBlocksSuperposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_blocks_superpose_not_enough_total",
Help: "Total number of chunk block superpose not enough",
}),
}
}
// TryProposeChunk tries to propose a new chunk.
func (p *ChunkProposer) TryProposeChunk() {
p.chunkProposerCircleTotal.Inc()
proposedChunk, err := p.proposeChunk()
if err != nil {
p.proposeChunkFailureTotal.Inc()
log.Error("propose new chunk failed", "err", err)
return
}
if err := p.updateChunkInfoInDB(proposedChunk); err != nil {
p.proposeChunkUpdateInfoFailureTotal.Inc()
log.Error("update chunk info in orm failed", "err", err)
}
}
@@ -95,9 +162,11 @@ func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
return nil
}
p.proposeChunkUpdateInfoTotal.Inc()
err := p.db.Transaction(func(dbTX *gorm.DB) error {
dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, dbTX)
if err != nil {
log.Warn("ChunkProposer.InsertChunk failed", "chunk hash", chunk.Hash)
return err
}
if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
@@ -131,6 +200,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
}
p.chunkL2TxNum.Set(float64(totalL2TxNum))
// Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed.
if totalL2TxNum > p.maxL2TxNumPerChunk {
@@ -142,6 +212,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
)
}
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
@@ -151,6 +222,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
)
}
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
@@ -160,6 +232,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
)
}
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
// Check if the first block breaks any soft limits.
if totalTxGasUsed > p.maxTxGasPerChunk {
log.Warn(
@@ -169,7 +242,10 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
"max gas limit", p.maxTxGasPerChunk,
)
}
if max := crc.max(); max > p.maxRowConsumptionPerChunk {
max := crc.max()
p.maxTxConsumption.Set(float64(max))
if max > p.maxRowConsumptionPerChunk {
return nil, fmt.Errorf(
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
firstBlock.Header.Number,
@@ -200,6 +276,8 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
}
}
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
var hasBlockTimeout bool
currentTimeSec := uint64(time.Now().Unix())
if blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
@@ -208,6 +286,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
"block timestamp", blocks[0].Header.Time,
"block outdated time threshold", currentTimeSec,
)
p.chunkBlockTimeoutReached.Inc()
hasBlockTimeout = true
}
@@ -216,6 +295,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
"totalL1CommitCalldataSize", totalL1CommitCalldataSize,
"minL1CommitCalldataSizePerChunk", p.minL1CommitCalldataSizePerChunk,
)
p.chunkBlocksSuperposeNotEnoughTotal.Inc()
return nil, nil
}
return chunk, nil

View File

@@ -30,7 +30,7 @@ func testChunkProposer(t *testing.T) {
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db)
}, db, nil)
cp.TryProposeChunk()
expectedChunk := &types.Chunk{
@@ -62,7 +62,7 @@ func testChunkProposerRowConsumption(t *testing.T) {
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 0, // !
ChunkTimeoutSec: 300,
}, db)
}, db, nil)
cp.TryProposeChunk()
chunkOrm := orm.NewChunk(db)

View File

@@ -4,6 +4,7 @@ import (
"context"
"math/big"
"github.com/prometheus/client_golang/prometheus"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
@@ -11,11 +12,9 @@ import (
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
@@ -23,12 +22,6 @@ import (
"scroll-tech/bridge/internal/utils"
)
var (
bridgeL1MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry)
bridgeL1MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry)
bridgeL1MsgsRollupEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
)
type rollupEvent struct {
batchHash common.Hash
txHash common.Hash
@@ -59,10 +52,12 @@ type L1WatcherClient struct {
processedMsgHeight uint64
// The height of the block that the watcher has retrieved header rlp
processedBlockHeight uint64
metrics *l1WatcherMetrics
}
// NewL1WatcherClient returns a new instance of L1WatcherClient.
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB) *L1WatcherClient {
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB, reg prometheus.Registerer) *L1WatcherClient {
l1MessageOrm := orm.NewL1Message(db)
savedHeight, err := l1MessageOrm.GetLayer1LatestWatchedHeight()
if err != nil {
@@ -102,6 +97,7 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
processedMsgHeight: uint64(savedHeight),
processedBlockHeight: savedL1BlockHeight,
metrics: initL1WatcherMetrics(reg),
}
}
@@ -125,6 +121,7 @@ func (w *L1WatcherClient) SetConfirmations(confirmations rpc.BlockNumber) {
// FetchBlockHeader pull latest L1 blocks and save in DB
func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
w.metrics.l1WatcherFetchBlockHeaderTotal.Inc()
fromBlock := int64(w.processedBlockHeight) + 1
toBlock := int64(blockHeight)
if toBlock < fromBlock {
@@ -171,6 +168,7 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
// update processed height
w.processedBlockHeight = uint64(toBlock)
w.metrics.l1WatcherFetchBlockHeaderProcessedBlockHeight.Set(float64(w.processedBlockHeight))
return nil
}
@@ -189,6 +187,7 @@ func (w *L1WatcherClient) FetchContractEvent() error {
toBlock := int64(blockHeight)
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
w.metrics.l1WatcherFetchContractEventTotal.Inc()
to := from + contractEventsBlocksFetchLimit - 1
if to > toBlock {
@@ -220,9 +219,10 @@ func (w *L1WatcherClient) FetchContractEvent() error {
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
bridgeL1MsgsSyncHeightGauge.Update(to)
w.metrics.l1WatcherFetchContractEventProcessedBlockHeight.Set(float64(to))
continue
}
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
sentMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
@@ -232,8 +232,8 @@ func (w *L1WatcherClient) FetchContractEvent() error {
}
sentMessageCount := int64(len(sentMessageEvents))
rollupEventCount := int64(len(rollupEvents))
bridgeL1MsgsSentEventsTotalCounter.Inc(sentMessageCount)
bridgeL1MsgsRollupEventsTotalCounter.Inc(rollupEventCount)
w.metrics.l1WatcherFetchContractEventSentEventsTotal.Add(float64(sentMessageCount))
w.metrics.l1WatcherFetchContractEventRollupEventsTotal.Add(float64(rollupEventCount))
log.Info("L1 events types", "SentMessageCount", sentMessageCount, "RollupEventCount", rollupEventCount)
// use rollup event to update rollup results db status
@@ -273,7 +273,8 @@ func (w *L1WatcherClient) FetchContractEvent() error {
}
w.processedMsgHeight = uint64(to)
bridgeL1MsgsSyncHeightGauge.Update(to)
w.metrics.l1WatcherFetchContractEventSuccessTotal.Inc()
w.metrics.l1WatcherFetchContractEventProcessedBlockHeight.Set(float64(w.processedMsgHeight))
}
return nil

View File

@@ -0,0 +1,59 @@
package watcher
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l1WatcherMetrics struct {
l1WatcherFetchBlockHeaderTotal prometheus.Counter
l1WatcherFetchBlockHeaderProcessedBlockHeight prometheus.Gauge
l1WatcherFetchContractEventTotal prometheus.Counter
l1WatcherFetchContractEventSuccessTotal prometheus.Counter
l1WatcherFetchContractEventProcessedBlockHeight prometheus.Gauge
l1WatcherFetchContractEventSentEventsTotal prometheus.Counter
l1WatcherFetchContractEventRollupEventsTotal prometheus.Counter
}
var (
initL1WatcherMetricOnce sync.Once
l1WatcherMetric *l1WatcherMetrics
)
func initL1WatcherMetrics(reg prometheus.Registerer) *l1WatcherMetrics {
initL1WatcherMetricOnce.Do(func() {
l1WatcherMetric = &l1WatcherMetrics{
l1WatcherFetchBlockHeaderTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_header_total",
Help: "The total number of l1 watcher fetch block header total",
}),
l1WatcherFetchBlockHeaderProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l1_watcher_fetch_block_header_processed_block_height",
Help: "The current processed block height of l1 watcher fetch block header",
}),
l1WatcherFetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_total",
Help: "The total number of l1 watcher fetch contract event total",
}),
l1WatcherFetchContractEventSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_success_total",
Help: "The total number of l1 watcher fetch contract event success total",
}),
l1WatcherFetchContractEventProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_processed_block_height",
Help: "The current processed block height of l1 watcher fetch contract event",
}),
l1WatcherFetchContractEventSentEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_sent_event_total",
Help: "The current processed block height of l1 watcher fetch contract sent event",
}),
l1WatcherFetchContractEventRollupEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_rollup_event_total",
Help: "The current processed block height of l1 watcher fetch contract rollup event",
}),
}
})
return l1WatcherMetric
}

View File

@@ -30,7 +30,8 @@ func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
client, err := ethclient.Dial(base.L1gethImg.Endpoint())
assert.NoError(t, err)
l1Cfg := cfg.L1Config
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress,
l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db, nil)
assert.NoError(t, watcher.FetchContractEvent())
return watcher, db
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"math/big"
"github.com/prometheus/client_golang/prometheus"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
@@ -13,11 +14,9 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/event"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
@@ -25,14 +24,6 @@ import (
"scroll-tech/bridge/internal/utils"
)
// Metrics
var (
bridgeL2MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedGapGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
bridgeL2MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
)
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
type L2WatcherClient struct {
ctx context.Context
@@ -56,10 +47,12 @@ type L2WatcherClient struct {
processedMsgHeight uint64
stopped uint64
metrics *l2WatcherMetrics
}
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB) *L2WatcherClient {
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB, reg prometheus.Registerer) *L2WatcherClient {
l1MessageOrm := orm.NewL1Message(db)
var savedHeight uint64
l1msg, err := l1MessageOrm.GetLayer1LatestMessageWithLayer2Hash()
@@ -93,6 +86,7 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
withdrawTrieRootSlot: withdrawTrieRootSlot,
stopped: 0,
metrics: initL2WatcherMetrics(reg),
}
return &w
@@ -102,6 +96,7 @@ const blockTracesFetchLimit = uint64(10)
// TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks.
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
w.metrics.fetchRunningMissingBlocksTotal.Inc()
heightInDB, err := w.l2BlockOrm.GetL2BlocksLatestHeight(w.ctx)
if err != nil {
log.Error("failed to GetL2BlocksLatestHeight", "err", err)
@@ -120,8 +115,8 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
return
}
bridgeL2BlocksFetchedHeightGauge.Update(int64(to))
bridgeL2BlocksFetchedGapGauge.Update(int64(blockHeight - to))
w.metrics.fetchRunningMissingBlocksHeight.Set(float64(to))
w.metrics.bridgeL2BlocksFetchedGap.Set(float64(blockHeight - to))
}
}
@@ -199,6 +194,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
w.metrics.fetchContractEventTotal.Inc()
blockHeight, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
@@ -238,7 +234,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
bridgeL2MsgsSyncHeightGauge.Update(to)
w.metrics.fetchContractEventHeight.Set(float64(to))
continue
}
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
@@ -250,7 +246,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
}
relayedMessageCount := int64(len(relayedMessageEvents))
bridgeL2MsgsRelayedEventsTotalCounter.Inc(relayedMessageCount)
w.metrics.bridgeL2MsgsRelayedEventsTotal.Add(float64(relayedMessageCount))
log.Info("L2 events types", "RelayedMessageCount", relayedMessageCount)
// Update relayed message first to make sure we don't forget to update submited message.
@@ -269,7 +265,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
}
w.processedMsgHeight = uint64(to)
bridgeL2MsgsSyncHeightGauge.Update(to)
w.metrics.fetchContractEventHeight.Set(float64(to))
}
}

View File

@@ -0,0 +1,54 @@
package watcher
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l2WatcherMetrics struct {
fetchRunningMissingBlocksTotal prometheus.Counter
fetchRunningMissingBlocksHeight prometheus.Gauge
fetchContractEventTotal prometheus.Counter
fetchContractEventHeight prometheus.Gauge
bridgeL2MsgsRelayedEventsTotal prometheus.Counter
bridgeL2BlocksFetchedGap prometheus.Gauge
}
var (
initL2WatcherMetricOnce sync.Once
l2WatcherMetric *l2WatcherMetrics
)
func initL2WatcherMetrics(reg prometheus.Registerer) *l2WatcherMetrics {
initL2WatcherMetricOnce.Do(func() {
l2WatcherMetric = &l2WatcherMetrics{
fetchRunningMissingBlocksTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l2_watcher_fetch_running_missing_blocks_total",
Help: "The total number of l2 watcher fetch running missing blocks",
}),
fetchRunningMissingBlocksHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l2_watcher_fetch_running_missing_blocks_height",
Help: "The total number of l2 watcher fetch running missing blocks height",
}),
fetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l2_watcher_fetch_contract_events_total",
Help: "The total number of l2 watcher fetch contract events",
}),
fetchContractEventHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l2_watcher_fetch_contract_height",
Help: "The total number of l2 watcher fetch contract height",
}),
bridgeL2MsgsRelayedEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l2_watcher_msg_relayed_events_total",
Help: "The total number of l2 watcher msg relayed event",
}),
bridgeL2BlocksFetchedGap: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l2_watcher_blocks_fetched_gap",
Help: "The gap of l2 fetch",
}),
}
})
return l2WatcherMetric
}

View File

@@ -34,7 +34,8 @@ import (
func setupL2Watcher(t *testing.T) (*L2WatcherClient, *gorm.DB) {
db := setupDB(t)
l2cfg := cfg.L2Config
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress,
l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db, nil)
return watcher, db
}
@@ -50,7 +51,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
l1cfg := cfg.L1Config
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKey)
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKey, "test", "test", nil)
assert.NoError(t, err)
// Create several transactions and commit to block
@@ -95,7 +96,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
func prepareWatcherClient(l2Cli *ethclient.Client, db *gorm.DB, contractAddr common.Address) *L2WatcherClient {
confirmations := rpc.LatestBlockNumber
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db)
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db, nil)
}
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {

View File

@@ -26,13 +26,14 @@ func testImportL1GasPrice(t *testing.T) {
l1Cfg := bridgeApp.Config.L1Config
// Create L1Relayer
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
assert.NoError(t, err)
// Create L1Watcher
startHeight, err := l1Client.BlockNumber(context.Background())
assert.NoError(t, err)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0,
l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
// fetch new blocks
number, err := l1Client.BlockNumber(context.Background())
@@ -67,7 +68,7 @@ func testImportL2GasPrice(t *testing.T) {
prepareContracts(t)
l2Cfg := bridgeApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, nil)
assert.NoError(t, err)
// add fake chunk

View File

@@ -29,14 +29,16 @@ func testRelayL1MessageSucceed(t *testing.T) {
l2Cfg := bridgeApp.Config.L2Config
// Create L1Relayer
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
assert.NoError(t, err)
// Create L1Watcher
confirmations := rpc.LatestBlockNumber
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress,
l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
// Create L2Watcher
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress,
l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db, nil)
// send message through l1 messenger contract
nonce, err := l1MessengerInstance.MessageNonce(&bind.CallOpts{})

View File

@@ -28,12 +28,13 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// Create L2Relayer
l2Cfg := bridgeApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, nil)
assert.NoError(t, err)
// Create L1Watcher
l1Cfg := bridgeApp.Config.L1Config
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress,
l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
// add some blocks to db
var wrappedBlocks []*types.WrappedBlock
@@ -64,7 +65,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db)
}, db, nil)
cp.TryProposeChunk()
chunkOrm := orm.NewChunk(db)
@@ -78,7 +79,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
MaxL1CommitCalldataSizePerBatch: 1000000,
MinChunkNumPerBatch: 1,
BatchTimeoutSec: 300,
}, db)
}, db, nil)
bp.TryProposeBatch()
l2Relayer.ProcessPendingBatches()

View File

@@ -32,7 +32,7 @@ dependencies = [
[[package]]
name = "aggregator"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"ark-std",
"env_logger 0.10.0",
@@ -380,7 +380,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b"
dependencies = [
"borsh-derive",
"hashbrown 0.13.2",
"hashbrown 0.12.3",
]
[[package]]
@@ -433,7 +433,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
[[package]]
name = "bus-mapping"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"eth-types",
"ethers-core",
@@ -1049,7 +1049,7 @@ dependencies = [
[[package]]
name = "eth-types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"ethers-core",
"ethers-signers",
@@ -1226,7 +1226,7 @@ dependencies = [
[[package]]
name = "external-tracer"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"eth-types",
"geth-utils",
@@ -1439,7 +1439,7 @@ dependencies = [
[[package]]
name = "gadgets"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"digest 0.7.6",
"eth-types",
@@ -1479,7 +1479,7 @@ dependencies = [
[[package]]
name = "geth-utils"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"env_logger 0.9.3",
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
@@ -1580,6 +1580,21 @@ dependencies = [
"tracing",
]
[[package]]
name = "halo2-base"
version = "0.2.2"
source = "git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
dependencies = [
"ff",
"halo2_proofs",
"itertools",
"num-bigint",
"num-integer",
"num-traits",
"rand_chacha",
"rustc-hash",
]
[[package]]
name = "halo2-base"
version = "0.2.2"
@@ -1595,6 +1610,25 @@ dependencies = [
"rustc-hash",
]
[[package]]
name = "halo2-ecc"
version = "0.2.2"
source = "git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
dependencies = [
"ff",
"group",
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0)",
"itertools",
"num-bigint",
"num-integer",
"num-traits",
"rand",
"rand_chacha",
"rand_core",
"serde",
"serde_json",
]
[[package]]
name = "halo2-ecc"
version = "0.2.2"
@@ -1602,7 +1636,7 @@ source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#2c22586422
dependencies = [
"ff",
"group",
"halo2-base",
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?branch=develop)",
"itertools",
"num-bigint",
"num-integer",
@@ -1655,7 +1689,7 @@ dependencies = [
[[package]]
name = "halo2_proofs"
version = "0.2.0"
source = "git+https://github.com/scroll-tech/halo2.git?branch=develop#01f0b5260445a9190299af7b06b766c1e925fdaf"
source = "git+https://github.com/scroll-tech/halo2.git?branch=develop#b612b1e2a9fa2ccd150a6cb99e67592c8d62cd99"
dependencies = [
"ark-std",
"blake2b_simd",
@@ -2077,7 +2111,7 @@ dependencies = [
[[package]]
name = "keccak256"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"env_logger 0.9.3",
"eth-types",
@@ -2264,7 +2298,7 @@ dependencies = [
[[package]]
name = "mock"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"eth-types",
"ethers-core",
@@ -2279,7 +2313,7 @@ dependencies = [
[[package]]
name = "mpt-zktrie"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"bus-mapping",
"eth-types",
@@ -2755,7 +2789,7 @@ dependencies = [
[[package]]
name = "prover"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.18#fcf4bf3137ad37becdeb5360b23ac978405c6b2c"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.2#8c439b1dd62c429223221484fb8a5470242d1cbc"
dependencies = [
"aggregator",
"anyhow",
@@ -3624,12 +3658,12 @@ checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9"
[[package]]
name = "snark-verifier"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#100127726ac210226ac1096767e0efc5230775e3"
source = "git+https://github.com/scroll-tech//snark-verifier?tag=v0.1.1#11a09d4a37c31c659b29e2dac0ceb544a776ad7b"
dependencies = [
"bytes",
"ethereum-types 0.14.1",
"halo2-base",
"halo2-ecc",
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0)",
"halo2-ecc 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0)",
"hex",
"itertools",
"lazy_static",
@@ -3648,12 +3682,12 @@ dependencies = [
[[package]]
name = "snark-verifier-sdk"
version = "0.0.1"
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#100127726ac210226ac1096767e0efc5230775e3"
source = "git+https://github.com/scroll-tech//snark-verifier?tag=v0.1.1#11a09d4a37c31c659b29e2dac0ceb544a776ad7b"
dependencies = [
"bincode",
"env_logger 0.10.0",
"ethereum-types 0.14.1",
"halo2-base",
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0)",
"hex",
"itertools",
"lazy_static",
@@ -4040,7 +4074,7 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
[[package]]
name = "types"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.18#fcf4bf3137ad37becdeb5360b23ac978405c6b2c"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.2#8c439b1dd62c429223221484fb8a5470242d1cbc"
dependencies = [
"base64 0.13.1",
"blake2",
@@ -4491,17 +4525,18 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
[[package]]
name = "zkevm-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"array-init",
"bus-mapping",
"either",
"env_logger 0.9.3",
"eth-types",
"ethers-core",
"ethers-signers",
"gadgets",
"halo2-base",
"halo2-ecc",
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?branch=develop)",
"halo2-ecc 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?branch=develop)",
"halo2_proofs",
"hex",
"itertools",

View File

@@ -19,9 +19,13 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
[patch."https://github.com/privacy-scaling-explorations/halo2curves.git"]
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
[patch."https://github.com/scroll-tech/snark-verifier"]
snark-verifier = { git = "https://github.com/scroll-tech//snark-verifier", tag = "v0.1.1" }
snark-verifier-sdk = { git = "https://github.com/scroll-tech//snark-verifier", tag = "v0.1.1" }
[dependencies]
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.18" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.18" }
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.2" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.2" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
log = "0.4"

View File

@@ -1,57 +0,0 @@
package metrics
import (
"context"
"net"
"net/http"
"strconv"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/metrics/prometheus"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/urfave/cli/v2"
"scroll-tech/common/utils"
)
var (
// ScrollRegistry is used for scroll metrics.
ScrollRegistry = metrics.NewRegistry()
)
// Serve starts the metrics server on the given address, will be closed when the given
// context is canceled.
func Serve(ctx context.Context, c *cli.Context) {
if !c.Bool(utils.MetricsEnabled.Name) {
return
}
address := net.JoinHostPort(
c.String(utils.MetricsAddr.Name),
strconv.Itoa(c.Int(utils.MetricsPort.Name)),
)
server := &http.Server{
Addr: address,
Handler: prometheus.Handler(ScrollRegistry),
ReadTimeout: rpc.DefaultHTTPTimeouts.ReadTimeout,
WriteTimeout: rpc.DefaultHTTPTimeouts.WriteTimeout,
IdleTimeout: rpc.DefaultHTTPTimeouts.IdleTimeout,
}
go func() {
<-ctx.Done()
if err := server.Close(); err != nil {
log.Error("Failed to close metrics server", "error", err)
}
}()
log.Info("Starting metrics server", "address", address)
go func() {
if err := server.ListenAndServe(); err != nil {
log.Error("start metrics server error", "error", err)
}
}()
}

View File

@@ -13,6 +13,18 @@ import (
"github.com/scroll-tech/go-ethereum/rlp"
)
// ProofFailureType the proof failure type
type ProofFailureType int
const (
// ProofFailureUndefined the undefined type proof failure type
ProofFailureUndefined ProofFailureType = iota
// ProofFailurePanic proof failure for prover panic
ProofFailurePanic
// ProofFailureNoPanic proof failure for no prover panic
ProofFailureNoPanic
)
// RespStatus represents status code from prover to scroll
type RespStatus uint32

View File

@@ -6,7 +6,7 @@ import (
"strings"
)
var tag = "v4.1.62"
var tag = "v4.1.72"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -45,23 +45,25 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
},
}
switch message.ProofType(spp.TaskType) {
case message.ProofTypeChunk:
var tmpChunkProof message.ChunkProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
if spp.Status == int(message.StatusOk) {
switch message.ProofType(spp.TaskType) {
case message.ProofTypeChunk:
var tmpChunkProof message.ChunkProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
proofMsg.ChunkProof = &tmpChunkProof
case message.ProofTypeBatch:
var tmpBatchProof message.BatchProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
proofMsg.BatchProof = &tmpBatchProof
}
proofMsg.ChunkProof = &tmpChunkProof
case message.ProofTypeBatch:
var tmpBatchProof message.BatchProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
proofMsg.BatchProof = &tmpBatchProof
}
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg, spp); err != nil {

View File

@@ -150,7 +150,7 @@ func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout promethe
// the attempt times. if reach the times, the collector will set the block batch proving status.
for _, assignedProverTask := range assignedProverTasks {
if c.proverTaskOrm.TaskTimeoutMoreThanOnce(c.ctx, assignedProverTask.TaskID) {
log.Warn("Task timeout more than once", "hash", assignedProverTask.TaskID)
log.Warn("Task timeout more than once", "taskType", message.ProofType(assignedProverTask.TaskType).String(), "hash", assignedProverTask.TaskID)
}
timeout.Inc()

View File

@@ -93,7 +93,7 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB, reg pro
proverTaskProveDuration: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
Name: "coordinator_task_prove_duration_seconds",
Help: "Time spend by prover prove task.",
Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 20, 30, 60},
Buckets: []float64{180, 300, 480, 600, 900, 1200, 1800},
}),
validateFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_validate_failure_total",
@@ -141,8 +141,8 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
log.Info("handling zk proof", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proverTask.TaskType, "proof time", proofTimeSec)
log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName,
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec)
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter); err != nil {
return err
@@ -221,9 +221,9 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
// (ii) set the maximum failure retry times
log.Warn(
"cannot submit valid proof for a prover task twice",
"proof type", proverTask.TaskType, "hash", proofMsg.ID,
"prover name", proverTask.ProverName, "prover version", proverTask.ProverVersion,
"prover pk", proverTask.ProverPublicKey,
"taskType", proverTask.TaskType, "hash", proofMsg.ID,
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
"proverPublicKey", proverTask.ProverPublicKey,
)
return ErrValidatorFailureProverTaskCannotSubmitTwice
}
@@ -235,31 +235,31 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
m.validateFailureProverTaskStatusNotOk.Inc()
log.Info("proof generated by prover failed",
"prove type", proofMsg.Type, "proof id", proofMsg.ID,
"prover name", proverTask.ProverName, "prover version", proverTask.ProverVersion,
"prover pk", "failure type", proofParameter.FailureType, "failure message", proofParameter.FailureMsg)
"taskType", proofMsg.Type, "hash", proofMsg.ID,
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
"proverPublicKey", pk, "failureType", proofParameter.FailureType, "failureMessage", proofParameter.FailureMsg)
return ErrValidatorFailureProofMsgStatusNotOk
}
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, need skip it
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
m.validateFailureProverTaskTimeout.Inc()
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "proof type", proverTask.TaskType,
"prover name", proverTask.ProverName, "prover public key", pk, "proof time", proofTimeSec)
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "taskType", proverTask.TaskType,
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
return ErrValidatorFailureProofTimeout
}
// store the proof to prover task
if updateTaskProofErr := m.updateProverTaskProof(ctx, pk, proofMsg); updateTaskProofErr != nil {
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "public key", pk,
"prover name", proverTask.ProverName, "error", updateTaskProofErr)
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk,
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr)
}
// if the batch/chunk have proved and verifier success, need skip this submit proof
if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) {
m.validateFailureProverTaskHaveVerifier.Inc()
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID,
"proof type", proverTask.TaskType, "prover name", proverTask.ProverName, "prover public key", pk)
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk)
return ErrValidatorFailureTaskHaveVerifiedSuccess
}
return nil
@@ -275,8 +275,8 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
//}
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
log.Info("proof recover update proof status", "hash", hash, "public key", pubKey,
"proof type", proofMsg.Type.String(), "status", types.ProvingTaskUnassigned.String())
log.Info("proof recover update proof status", "hash", hash, "proverPublicKey", pubKey,
"taskType", proofMsg.Type.String(), "status", types.ProvingTaskUnassigned.String())
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskUnassigned, 0); err != nil {
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", hash, "pubKey", pubKey, "error", err)
@@ -284,11 +284,11 @@ func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubK
}
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg, proofTimeSec uint64) error {
log.Info("proof close task update proof status", "hash", hash, "public key", pubKey,
"proof type", proofMsg.Type.String(), "status", types.ProvingTaskVerified.String())
log.Info("proof close task update proof status", "hash", hash, "proverPublicKey", pubKey,
"taskType", proofMsg.Type.String(), "status", types.ProvingTaskVerified.String())
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskVerified, proofTimeSec); err != nil {
log.Error("failed to updated proof status ProvingTaskVerified", "hash", hash, "pubKey", pubKey, "error", err)
log.Error("failed to updated proof status ProvingTaskVerified", "hash", hash, "proverPublicKey", pubKey, "error", err)
return err
}
return nil

View File

@@ -0,0 +1,15 @@
-- +goose Up
-- +goose StatementBegin
drop index l1_message_hash_uindex;
create index l1_message_hash_index
on l1_message (msg_hash) where deleted_at IS NULL;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop index l1_message_hash_index;
create unique index l1_message_hash_uindex
on l1_message (msg_hash) where deleted_at IS NULL;
-- +goose StatementEnd

View File

@@ -193,6 +193,7 @@ github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7 h1:6IrxszG5G+O7zhtkWxq6+unVvnrm1fqV2Pe+T95DUzw=
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7/go.mod h1:gFnFS95y8HstDP6P9pPwzrxOOC5TRDkwbM+ao15ChAI=
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 h1:DuBDHVjgGMPki7bAyh91+3cF1Vh34sAEdH8JQgbc2R0=
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI=
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
@@ -238,6 +239,7 @@ github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf h1:Yt+4K30SdjOkRoRRm3v
github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7 h1:kgvzE5wLsLa7XKfV85VZl40QXaMCaeFtHpPwJ8fhotY=
github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7/go.mod h1:yRkwfj0CBpOGre+TwBsqPV0IH0Pk73e4PXJOeNDboGs=
github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 h1:+3HCtB74++ClLy8GgjUQYeC8R4ILzVcIe8+5edAJJnE=
github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4=
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7 h1:tYwu/z8Y0NkkzGEh3z21mSWggMg4LwLRFucLS7TjARg=
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
@@ -277,6 +279,7 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILD
github.com/gavv/httpexpect v2.0.0+incompatible h1:1X9kcRshkSKEjNJJxX9Y9mQ5BRfbxU5kORdjhlA1yX8=
github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732 h1:AB7YjNrzlVHsYz06zCULVV2zYCEft82P86dSmtwxKL0=
github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732/go.mod h1:o/XfIXWi4/GqbQirfRm5uTbXMG5NpqxkxblnbZ+QM9I=
github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b h1:vMT47RYsrftsHSTQhqXwC3BYflo38OLC3Y4LtXtLyU0=
github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b/go.mod h1:CDncRYVRSDqwakm282WEkjfaAj1hxU/v5RXxk5nXOiI=
github.com/getkin/kin-openapi v0.61.0 h1:6awGqF5nG5zkVpMsAih1QH4VgzS8phTxECUWIFo7zko=
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM=
@@ -374,6 +377,7 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U=
github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg=
github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
@@ -472,6 +476,7 @@ github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQ
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=
github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c h1:AqsttAyEyIEsNz5WLRwuRwjiT5CMDUfLk6cFJDVPebs=
github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/kataras/blocks v0.0.7 h1:cF3RDY/vxnSRezc7vLFlQFTYXG/yAr1o7WImJuZbzC4=
github.com/kataras/blocks v0.0.7/go.mod h1:UJIU97CluDo0f+zEjbnbkeMRlvYORtmc1304EeyXf4I=
@@ -711,6 +716,7 @@ go.opentelemetry.io/otel v1.9.0/go.mod h1:np4EoPGzoPs3O67xUVNoPPcmSvsfOxNlNA4F4A
go.opentelemetry.io/otel/trace v1.9.0 h1:oZaCNJUjWcg60VXWee8lJKlqhPbXAPB51URuR47pQYc=
go.opentelemetry.io/otel/trace v1.9.0/go.mod h1:2737Q0MuG8q1uILYm2YYVkAyLtOofiTNGg6VODnOiPo=
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=
go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
@@ -782,6 +788,7 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=

View File

@@ -1,7 +1,6 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
@@ -11,7 +10,6 @@ import (
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/metrics"
"scroll-tech/common/utils"
"scroll-tech/common/version"
@@ -55,14 +53,6 @@ func action(ctx *cli.Context) error {
}
}()
subCtx, cancel := context.WithCancel(ctx.Context)
defer func() {
cancel()
}()
// Start metrics server.
metrics.Serve(subCtx, ctx)
// init Prover Stats API
port := ctx.String(httpPortFlag.Name)

View File

@@ -175,17 +175,20 @@ func (c *CoordinatorClient) SubmitProof(ctx context.Context, req *SubmitProofReq
Post("/coordinator/v1/submit_proof")
if err != nil {
return fmt.Errorf("submit proof request failed: %v", err)
log.Error("submit proof request failed: %v", err)
return fmt.Errorf("submit proof request failed: %w", ConnectErr)
}
if resp.StatusCode() != 200 {
return fmt.Errorf("failed to submit proof, status code: %v", resp.StatusCode())
log.Error("failed to submit proof, status code: %v", resp.StatusCode())
return fmt.Errorf("failed to submit proof, status code not 200: %w", ConnectErr)
}
if result.ErrCode == types.ErrJWTTokenExpired {
log.Info("JWT expired, attempting to re-login")
if err := c.Login(ctx); err != nil {
return fmt.Errorf("JWT expired, re-login failed: %v", err)
log.Error("JWT expired, re-login failed: %v", err)
return fmt.Errorf("JWT expired, re-login failed: %w", ConnectErr)
}
log.Info("re-login success")
return c.SubmitProof(ctx, req)

View File

@@ -1,9 +1,13 @@
package client
import (
"errors"
"scroll-tech/common/types/message"
)
var ConnectErr = errors.New("connect coordinator error")
// ChallengeResponse defines the response structure for random API
type ChallengeResponse struct {
ErrCode int `json:"errcode"`
@@ -53,10 +57,12 @@ type GetTaskResponse struct {
// SubmitProofRequest defines the request structure for the SubmitProof API.
type SubmitProofRequest struct {
TaskID string `json:"task_id"`
TaskType int `json:"task_type"`
Status int `json:"status"`
Proof string `json:"proof"`
TaskID string `json:"task_id"`
TaskType int `json:"task_type"`
Status int `json:"status"`
Proof string `json:"proof"`
FailureType int `json:"failure_type,omitempty"`
FailureMsg string `json:"failure_msg,omitempty"`
}
// SubmitProofResponse defines the response structure for the SubmitProof API.

View File

@@ -151,16 +151,9 @@ func (r *Prover) proveAndSubmit() error {
}
}
defer func() {
err = r.stack.Delete(task.Task.ID)
if err != nil {
log.Error("prover stack pop failed!", "err", err)
}
}()
var proofMsg *message.ProofDetail
if task.Times <= 2 {
// If panic times <= 2, try to proof the task.
// If tried times <= 2, try to proof the task.
if err = r.stack.UpdateTimes(task, task.Times+1); err != nil {
return fmt.Errorf("failed to update times on stack: %v", err)
}
@@ -168,15 +161,15 @@ func (r *Prover) proveAndSubmit() error {
log.Info("start to prove task", "task-type", task.Task.Type, "task-id", task.Task.ID)
proofMsg, err = r.prove(task)
if err != nil { // handling error from prove
return fmt.Errorf("failed to prove task, task-type: %v, err: %v", task.Task.Type, err)
log.Error("failed to prove task", "task_type", task.Task.Type, "task-id", task.Task.ID, "err", err)
return r.submitErr(task, true, message.ProofFailureNoPanic, err)
}
return r.submitProof(proofMsg)
}
// when the prover has more than 3 times panic,
// it will omit to prove the task, submit StatusProofError and then Delete the task.
return fmt.Errorf("zk proving panic for task, task-type: %v, task-id: %v", task.Task.Type, task.Task.ID)
// if tried times >= 3, it's probably due to circuit proving panic
log.Error("zk proving panic for task", "task-type", task.Task.Type, "task-id", task.Task.ID)
return r.submitErr(task, message.ProofFailurePanic, errors.New("zk proving panic for task"))
}
// fetchTaskFromCoordinator fetches a new task from the server
@@ -329,10 +322,43 @@ func (r *Prover) submitProof(msg *message.ProofDetail) error {
// send the submit request
if err := r.coordinatorClient.SubmitProof(r.ctx, req); err != nil {
if !errors.Is(errors.Unwrap(err), client.ConnectErr) {
if deleteErr := r.stack.Delete(msg.ID); deleteErr != nil {
log.Error("prover stack pop failed", "task_type", msg.Type, "task_id", msg.ID, "err", deleteErr)
}
}
return fmt.Errorf("error submitting proof: %v", err)
}
log.Info("proof submitted successfully", "task-id", msg.ID, "task-type", msg.Type, "task-status", msg.Status, "err", msg.Error)
return nil
}
func (r *Prover) submitErr(task *store.ProvingTask, isRetry bool, proofFailureType message.ProofFailureType, err error) error {
// prepare the submit request
req := &client.SubmitProofRequest{
TaskID: task.Task.ID,
TaskType: int(task.Task.Type),
Status: int(message.StatusProofError),
Proof: "",
FailureType: int(proofFailureType),
FailureMsg: err.Error(),
}
// send the submit request
if submitErr := r.coordinatorClient.SubmitProof(r.ctx, req); submitErr != nil {
if !errors.Is(errors.Unwrap(err), client.ConnectErr) {
if deleteErr := r.stack.Delete(task.Task.ID); deleteErr != nil {
log.Error("prover stack pop failed", "task_type", task.Task.Type, "task_id", task.Task.ID, "err", deleteErr)
}
}
return fmt.Errorf("error submitting proof: %v", submitErr)
}
log.Info("proof submitted report failure successfully",
"task-id", task.Task.ID, "task-type", task.Task.Type,
"task-status", message.StatusProofError, "err", err)
return nil
}