mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
44 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b734282a44 | ||
|
|
30bbfc0229 | ||
|
|
60bea74444 | ||
|
|
ae2e010324 | ||
|
|
767a2cbfdf | ||
|
|
74e5de156e | ||
|
|
becfd41b0e | ||
|
|
8542a176ed | ||
|
|
ea40517527 | ||
|
|
1f151f6ae5 | ||
|
|
ca76835712 | ||
|
|
247dd24a8f | ||
|
|
b006eaa33e | ||
|
|
ee4d7c3549 | ||
|
|
3c3f56b9b0 | ||
|
|
95121093c8 | ||
|
|
b85a109fd3 | ||
|
|
c091081f70 | ||
|
|
e42397867b | ||
|
|
9a5ad83121 | ||
|
|
ba90865ec6 | ||
|
|
66f3b42d24 | ||
|
|
a9a6b7464a | ||
|
|
24898602de | ||
|
|
623213a67a | ||
|
|
8e8a9c0351 | ||
|
|
974930f051 | ||
|
|
9654d76356 | ||
|
|
373e98ff3c | ||
|
|
34c3b91fd7 | ||
|
|
3adf077070 | ||
|
|
2a4d6e05a1 | ||
|
|
0ee99024bb | ||
|
|
0a3401772e | ||
|
|
69d2d2b7c2 | ||
|
|
e86399b5d4 | ||
|
|
b1e3b28dc6 | ||
|
|
0a9641be70 | ||
|
|
4a3056667e | ||
|
|
a78c8cf1df | ||
|
|
d68c6b7757 | ||
|
|
e6fe25f10f | ||
|
|
251c706c04 | ||
|
|
55658c6f52 |
37
.github/scripts/bump_version_dot_go.mjs
vendored
Normal file
37
.github/scripts/bump_version_dot_go.mjs
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
import { URL } from "url";
|
||||
import { readFileSync, writeFileSync } from "fs";
|
||||
|
||||
const versionFilePath = new URL(
|
||||
"../../common/version/version.go",
|
||||
import.meta.url
|
||||
).pathname;
|
||||
|
||||
const versionFileContent = readFileSync(versionFilePath, { encoding: "utf-8" });
|
||||
|
||||
const currentVersion = versionFileContent.match(
|
||||
/var tag = "(?<version>v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+))"/
|
||||
);
|
||||
|
||||
try {
|
||||
parseInt(currentVersion.groups.major);
|
||||
parseInt(currentVersion.groups.minor);
|
||||
parseInt(currentVersion.groups.patch);
|
||||
} catch (err) {
|
||||
console.error(new Error("Failed to parse version in version.go file"));
|
||||
throw err;
|
||||
}
|
||||
|
||||
// prettier-ignore
|
||||
const newVersion = `v${currentVersion.groups.major}.${currentVersion.groups.minor}.${parseInt(currentVersion.groups.patch) + 1}`;
|
||||
|
||||
console.log(
|
||||
`Bump version from ${currentVersion.groups.version} to ${newVersion}`
|
||||
);
|
||||
|
||||
writeFileSync(
|
||||
versionFilePath,
|
||||
versionFileContent.replace(
|
||||
`var tag = "${currentVersion.groups.version}"`,
|
||||
`var tag = "${newVersion}"`
|
||||
)
|
||||
);
|
||||
55
.github/workflows/bump_version.yml
vendored
Normal file
55
.github/workflows/bump_version.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: Bump Version
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [develop]
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
|
||||
jobs:
|
||||
try-to-bump:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
- name: check diff
|
||||
id: check_diff
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# fetch develop branch so that we can diff against later
|
||||
git fetch origin develop
|
||||
|
||||
echo 'checking verion changes in diff...'
|
||||
|
||||
# check if version changed in version.go
|
||||
# note: the grep will fail if use \d instead of [0-9]
|
||||
git diff HEAD..origin/develop --text --no-ext-diff --unified=0 --no-prefix common/version/version.go | grep -E '^\+var tag = "v[0-9]+\.[0-9]+\.[0-9]+"$' && true
|
||||
|
||||
exit_code=$?
|
||||
|
||||
# auto bump if version is not bumped manually
|
||||
echo '> require auto version bump?'
|
||||
|
||||
if [ $exit_code -eq 0 ]; then
|
||||
echo '> no, already bumped'
|
||||
echo "result=no-bump" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo '> yes'
|
||||
echo "result=bump" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
- name: Install Node.js 16
|
||||
if: steps.check_diff.outputs.result == 'bump'
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
- name: bump version in common/version/version.go
|
||||
if: steps.check_diff.outputs.result == 'bump'
|
||||
run: node .github/scripts/bump_version_dot_go.mjs
|
||||
- uses: stefanzweifel/git-auto-commit-action@3ea6ae190baf489ba007f7c92608f33ce20ef04a
|
||||
if: steps.check_diff.outputs.result == 'bump'
|
||||
with:
|
||||
commit_message: "chore: auto version bump [bot]"
|
||||
16
Makefile
16
Makefile
@@ -1,5 +1,7 @@
|
||||
.PHONY: check update dev_docker build_test_docker run_test_docker clean
|
||||
|
||||
L2GETH_TAG=scroll-v4.3.34
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
|
||||
@@ -15,14 +17,14 @@ lint: ## The code's format and security checks.
|
||||
|
||||
update: ## update dependencies
|
||||
go work sync
|
||||
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/bridge-history-api/ && go get -u github.com/ethereum/go-ethereum@latest && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
goimports -local $(PWD)/bridge/ -w .
|
||||
goimports -local $(PWD)/bridge-history-api/ -w .
|
||||
goimports -local $(PWD)/common/ -w .
|
||||
|
||||
@@ -95,6 +95,9 @@ func (b *BatchInfoFetcher) fetchBatchInfo() error {
|
||||
} else {
|
||||
startHeight = latestBatchHeight + 1
|
||||
}
|
||||
if startHeight < b.batchInfoStartNumber {
|
||||
startHeight = b.batchInfoStartNumber
|
||||
}
|
||||
for from := startHeight; number >= from; from += fetchLimit {
|
||||
to := from + fetchLimit - 1
|
||||
// number - confirmation can never less than 0 since the for loop condition
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -59,8 +60,8 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
registry := prometheus.DefaultRegisterer
|
||||
metrics.Server(ctx, registry.(*prometheus.Registry))
|
||||
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
|
||||
@@ -72,8 +73,12 @@ func action(ctx *cli.Context) error {
|
||||
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
|
||||
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
|
||||
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
|
||||
|
||||
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
|
||||
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
|
||||
|
||||
go utils.Loop(subCtx, 10*time.Second, func() {
|
||||
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -61,8 +62,8 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
registry := prometheus.DefaultRegisterer
|
||||
metrics.Server(ctx, registry.(*prometheus.Registry))
|
||||
|
||||
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
if err != nil {
|
||||
@@ -78,14 +79,14 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
|
||||
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
|
||||
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
@@ -59,10 +60,10 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
registry := prometheus.DefaultRegisterer
|
||||
metrics.Server(ctx, registry.(*prometheus.Registry))
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -62,8 +63,8 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
registry := prometheus.DefaultRegisterer
|
||||
metrics.Server(ctx, registry.(*prometheus.Registry))
|
||||
|
||||
// Init l2geth connection
|
||||
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
@@ -73,26 +74,26 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db)
|
||||
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create chunkProposer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db)
|
||||
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
|
||||
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
|
||||
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
|
||||
|
||||
// Watcher loop to fetch missing blocks
|
||||
go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
|
||||
@@ -110,7 +111,7 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches)
|
||||
|
||||
go utils.Loop(subCtx, 60*time.Second, l2relayer.ProcessCommittedBatches)
|
||||
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)
|
||||
|
||||
// Finish start all rollup relayer functions.
|
||||
log.Info("Start rollup-relayer successfully")
|
||||
|
||||
@@ -5,6 +5,7 @@ go 1.19
|
||||
require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/stretchr/testify v1.8.3
|
||||
@@ -13,6 +14,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
@@ -20,6 +22,7 @@ require (
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||
@@ -35,8 +38,12 @@ require (
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.39.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
@@ -54,7 +61,7 @@ require (
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -2,6 +2,8 @@ github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0 h1:PDiKKybR596O6FHW+RVSG0Z7uGCBNbmbUXh3uCNQ7Hc=
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
@@ -31,8 +33,14 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
@@ -62,11 +70,8 @@ github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlT
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
@@ -77,6 +82,8 @@ github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APP
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
@@ -90,6 +97,14 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
@@ -140,6 +155,7 @@ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73r
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
@@ -157,9 +173,13 @@ golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
|
||||
@@ -6,15 +6,13 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
// not sure if this will make problems when relay with l1geth
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
@@ -23,11 +21,6 @@ import (
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
var (
|
||||
bridgeL1MsgsRelayedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsRelayedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
// Layer1Relayer is responsible for
|
||||
// 1. fetch pending L1Message from db
|
||||
// 2. relay pending message to layer 2 node
|
||||
@@ -54,17 +47,18 @@ type Layer1Relayer struct {
|
||||
|
||||
l1MessageOrm *orm.L1Message
|
||||
l1BlockOrm *orm.L1Block
|
||||
metrics *l1RelayerMetrics
|
||||
}
|
||||
|
||||
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
|
||||
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey)
|
||||
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, reg prometheus.Registerer) (*Layer1Relayer, error) {
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey, "l1_relayer", "message_sender", reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new message sender failed for address %s, err: %v", addr.Hex(), err)
|
||||
}
|
||||
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey)
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l1_relayer", "gas_oracle_sender", reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %v", addr.Hex(), err)
|
||||
@@ -86,6 +80,7 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
|
||||
}
|
||||
|
||||
l1Relayer := &Layer1Relayer{
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
l1MessageOrm: orm.NewL1Message(db),
|
||||
l1BlockOrm: orm.NewL1Block(db),
|
||||
@@ -100,10 +95,10 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
|
||||
|
||||
minGasPrice: minGasPrice,
|
||||
gasPriceDiff: gasPriceDiff,
|
||||
|
||||
cfg: cfg,
|
||||
}
|
||||
|
||||
l1Relayer.metrics = initL1RelayerMetrics(reg)
|
||||
|
||||
go l1Relayer.handleConfirmLoop(ctx)
|
||||
return l1Relayer, nil
|
||||
}
|
||||
@@ -123,7 +118,9 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
|
||||
|
||||
for _, msg := range msgs {
|
||||
tmpMsg := msg
|
||||
r.metrics.bridgeL1RelayedMsgsTotal.Inc()
|
||||
if err = r.processSavedEvent(&tmpMsg); err != nil {
|
||||
r.metrics.bridgeL1RelayedMsgsFailureTotal.Inc()
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
|
||||
}
|
||||
@@ -145,7 +142,6 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bridgeL1MsgsRelayedTotalCounter.Inc(1)
|
||||
log.Info("relayMessage to layer2", "msg hash", msg.MsgHash, "tx hash", hash)
|
||||
|
||||
err = r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
|
||||
@@ -157,6 +153,7 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
|
||||
// ProcessGasPriceOracle imports gas price to layer2
|
||||
func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
r.metrics.bridgeL1RelayerGasPriceOraclerRunTotal.Inc()
|
||||
latestBlockHeight, err := r.l1BlockOrm.GetLatestL1BlockHeight(r.ctx)
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
|
||||
@@ -201,6 +198,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
return
|
||||
}
|
||||
r.lastGasPrice = block.BaseFee
|
||||
r.metrics.bridgeL1RelayerLastGasPrice.Set(float64(r.lastGasPrice))
|
||||
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee)
|
||||
}
|
||||
}
|
||||
@@ -212,7 +210,7 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case cfm := <-r.messageSender.ConfirmChan():
|
||||
bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1)
|
||||
r.metrics.bridgeL1MsgsRelayedConfirmedTotal.Inc()
|
||||
if !cfm.IsSuccessful {
|
||||
err := r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
@@ -228,6 +226,7 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
r.metrics.bridgeL1MsgsRelayedConfirmedTotal.Inc()
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
|
||||
54
bridge/internal/controller/relayer/l1_relayer_metrics.go
Normal file
54
bridge/internal/controller/relayer/l1_relayer_metrics.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type l1RelayerMetrics struct {
|
||||
bridgeL1RelayedMsgsTotal prometheus.Counter
|
||||
bridgeL1RelayedMsgsFailureTotal prometheus.Counter
|
||||
bridgeL1RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
bridgeL1RelayerLastGasPrice prometheus.Gauge
|
||||
bridgeL1MsgsRelayedConfirmedTotal prometheus.Counter
|
||||
bridgeL1GasOraclerConfirmedTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
initL1RelayerMetricOnce sync.Once
|
||||
l1RelayerMetric *l1RelayerMetrics
|
||||
)
|
||||
|
||||
func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
|
||||
initL1RelayerMetricOnce.Do(func() {
|
||||
l1RelayerMetric = &l1RelayerMetrics{
|
||||
bridgeL1RelayedMsgsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_msg_relayed_total",
|
||||
Help: "The total number of the l1 relayed message.",
|
||||
}),
|
||||
bridgeL1RelayedMsgsFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_msg_relayed_failure_total",
|
||||
Help: "The total number of the l1 relayed failure message.",
|
||||
}),
|
||||
bridgeL1MsgsRelayedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_relayed_confirmed_total",
|
||||
Help: "The total number of layer1 relayed confirmed",
|
||||
}),
|
||||
bridgeL1RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_gas_price_oracler_total",
|
||||
Help: "The total number of layer1 gas price oracler run total",
|
||||
}),
|
||||
bridgeL1RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_layer1_gas_price_latest_gas_price",
|
||||
Help: "The latest gas price of bridge relayer l1",
|
||||
}),
|
||||
bridgeL1GasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_gas_oracler_confirmed_total",
|
||||
Help: "The total number of layer1 relayed confirmed",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l1RelayerMetric
|
||||
}
|
||||
@@ -62,7 +62,7 @@ func setupL1RelayerDB(t *testing.T) *gorm.DB {
|
||||
func testCreateNewL1Relayer(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
}
|
||||
@@ -72,7 +72,7 @@ func testL1RelayerProcessSaveEvents(t *testing.T) {
|
||||
defer database.CloseDB(db)
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
l1Cfg := cfg.L1Config
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
assert.NoError(t, l1MessageOrm.SaveL1Messages(context.Background(), templateL1Message))
|
||||
@@ -99,7 +99,7 @@ func testL1RelayerMsgConfirm(t *testing.T) {
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
@@ -138,7 +138,7 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
@@ -168,7 +168,7 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, l1Relayer)
|
||||
|
||||
|
||||
@@ -8,16 +8,15 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
@@ -26,13 +25,6 @@ import (
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
var (
|
||||
bridgeL2BatchesFinalizedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
// Layer2Relayer is responsible for
|
||||
// 1. Committing and finalizing L2 blocks on L1
|
||||
// 2. Relaying messages from L2 to L1
|
||||
@@ -78,29 +70,30 @@ type Layer2Relayer struct {
|
||||
// A list of processing batch finalization.
|
||||
// key(string): confirmation ID, value(string): batch hash.
|
||||
processingFinalization sync.Map
|
||||
|
||||
metrics *l2RelayerMetrics
|
||||
}
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool) (*Layer2Relayer, error) {
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey)
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool, reg prometheus.Registerer) (*Layer2Relayer, error) {
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey, "l2_relayer", "message_sender", reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new message sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
|
||||
commitSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderPrivateKey)
|
||||
commitSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderPrivateKey, "l2_relayer", "commit_sender", reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.CommitSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new commit sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
|
||||
finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderPrivateKey)
|
||||
finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderPrivateKey, "l2_relayer", "finalize_sender", reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.FinalizeSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new finalize sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey)
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l2_relayer", "gas_oracle_sender", reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
@@ -158,6 +151,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
return nil, fmt.Errorf("failed to initialize and commit genesis batch, err: %v", err)
|
||||
}
|
||||
}
|
||||
layer2Relayer.metrics = initL2RelayerMetrics(reg)
|
||||
|
||||
go layer2Relayer.handleConfirmLoop(ctx)
|
||||
return layer2Relayer, nil
|
||||
@@ -275,6 +269,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
|
||||
// ProcessGasPriceOracle imports gas price to layer1
|
||||
func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
r.metrics.bridgeL2RelayerGasPriceOraclerRunTotal.Inc()
|
||||
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
|
||||
if batch == nil || err != nil {
|
||||
log.Error("Failed to GetLatestBatch", "batch", batch, "err", err)
|
||||
@@ -312,6 +307,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
return
|
||||
}
|
||||
r.lastGasPrice = suggestGasPriceUint64
|
||||
r.metrics.bridgeL2RelayerLastGasPrice.Set(float64(r.lastGasPrice))
|
||||
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
|
||||
}
|
||||
}
|
||||
@@ -320,12 +316,13 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
|
||||
func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// get pending batches from database in ascending order by their index.
|
||||
pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 10)
|
||||
pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 1)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
for _, batch := range pendingBatches {
|
||||
r.metrics.bridgeL2RelayerProcessPendingBatchTotal.Inc()
|
||||
// get current header and parent header.
|
||||
currentBatchHeader, err := types.DecodeBatchHeader(batch.BatchHeader)
|
||||
if err != nil {
|
||||
@@ -400,7 +397,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batch.Hash, "index", batch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
bridgeL2BatchesCommittedTotalCounter.Inc(1)
|
||||
r.metrics.bridgeL2RelayerProcessPendingBatchSuccessTotal.Inc()
|
||||
r.processingCommitment.Store(txID, batch.Hash)
|
||||
log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex())
|
||||
}
|
||||
@@ -424,6 +421,8 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
return
|
||||
}
|
||||
|
||||
r.metrics.bridgeL2RelayerProcessCommittedBatchesTotal.Inc()
|
||||
|
||||
batch := batches[0]
|
||||
hash := batch.Hash
|
||||
status := types.ProvingStatus(batch.ProvingStatus)
|
||||
@@ -437,6 +436,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
return
|
||||
case types.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "hash", hash)
|
||||
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
|
||||
|
||||
var parentBatchStateRoot string
|
||||
if batch.Index > 0 {
|
||||
@@ -495,7 +495,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
}
|
||||
return
|
||||
}
|
||||
bridgeL2BatchesFinalizedTotalCounter.Inc(1)
|
||||
log.Info("finalizeBatchWithProof in layer1", "index", batch.Index, "batch hash", batch.Hash, "tx hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
@@ -506,6 +505,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
"tx hash", finalizeTxHash.String(), "err", err)
|
||||
}
|
||||
r.processingFinalization.Store(txID, hash)
|
||||
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
|
||||
|
||||
case types.ProvingTaskFailed:
|
||||
// We were unable to prove this batch. There are two possibilities:
|
||||
@@ -550,7 +550,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
"batch hash", batchHash.(string),
|
||||
"tx hash", confirmation.TxHash.String(), "err", err)
|
||||
}
|
||||
bridgeL2BatchesCommittedConfirmedTotalCounter.Inc(1)
|
||||
r.metrics.bridgeL2BatchesCommittedConfirmedTotal.Inc()
|
||||
r.processingCommitment.Delete(confirmation.ID)
|
||||
}
|
||||
|
||||
@@ -572,7 +572,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
"batch hash", batchHash.(string),
|
||||
"tx hash", confirmation.TxHash.String(), "err", err)
|
||||
}
|
||||
bridgeL2BatchesFinalizedConfirmedTotalCounter.Inc(1)
|
||||
r.metrics.bridgeL2BatchesFinalizedConfirmedTotal.Inc()
|
||||
r.processingFinalization.Delete(confirmation.ID)
|
||||
}
|
||||
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
|
||||
@@ -590,6 +590,7 @@ func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
case confirmation := <-r.finalizeSender.ConfirmChan():
|
||||
r.handleConfirmation(confirmation)
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
r.metrics.bridgeL2BatchesGasOraclerConfirmedTotal.Inc()
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
|
||||
74
bridge/internal/controller/relayer/l2_relayer_metrics.go
Normal file
74
bridge/internal/controller/relayer/l2_relayer_metrics.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type l2RelayerMetrics struct {
|
||||
bridgeL2RelayerProcessPendingBatchTotal prometheus.Counter
|
||||
bridgeL2RelayerProcessPendingBatchSuccessTotal prometheus.Counter
|
||||
bridgeL2RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
bridgeL2RelayerLastGasPrice prometheus.Gauge
|
||||
bridgeL2RelayerProcessCommittedBatchesTotal prometheus.Counter
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal prometheus.Counter
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal prometheus.Counter
|
||||
bridgeL2BatchesCommittedConfirmedTotal prometheus.Counter
|
||||
bridgeL2BatchesFinalizedConfirmedTotal prometheus.Counter
|
||||
bridgeL2BatchesGasOraclerConfirmedTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
initL2RelayerMetricOnce sync.Once
|
||||
l2RelayerMetric *l2RelayerMetrics
|
||||
)
|
||||
|
||||
func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
|
||||
initL2RelayerMetricOnce.Do(func() {
|
||||
l2RelayerMetric = &l2RelayerMetrics{
|
||||
bridgeL2RelayerProcessPendingBatchTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_pending_batch_total",
|
||||
Help: "The total number of layer2 process pending batch",
|
||||
}),
|
||||
bridgeL2RelayerProcessPendingBatchSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_pending_batch_success_total",
|
||||
Help: "The total number of layer2 process pending success batch",
|
||||
}),
|
||||
bridgeL2RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_gas_price_oracler_total",
|
||||
Help: "The total number of layer2 gas price oracler run total",
|
||||
}),
|
||||
bridgeL2RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_layer2_gas_price_latest_gas_price",
|
||||
Help: "The latest gas price of bridge relayer l2",
|
||||
}),
|
||||
bridgeL2RelayerProcessCommittedBatchesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_total",
|
||||
Help: "The total number of layer2 process committed batches run total",
|
||||
}),
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_finalized_total",
|
||||
Help: "The total number of layer2 process committed batches finalized total",
|
||||
}),
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_finalized_success_total",
|
||||
Help: "The total number of layer2 process committed batches finalized success total",
|
||||
}),
|
||||
bridgeL2BatchesCommittedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_confirmed_total",
|
||||
Help: "The total number of layer2 process committed batches confirmed total",
|
||||
}),
|
||||
bridgeL2BatchesFinalizedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_finalized_batches_confirmed_total",
|
||||
Help: "The total number of layer2 process finalized batches confirmed total",
|
||||
}),
|
||||
bridgeL2BatchesGasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_gras_oracler_confirmed_total",
|
||||
Help: "The total number of layer2 process finalized batches confirmed total",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l2RelayerMetric
|
||||
}
|
||||
@@ -35,7 +35,7 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
|
||||
func testCreateNewRelayer(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
}
|
||||
@@ -45,7 +45,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
@@ -73,7 +73,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
|
||||
@@ -114,7 +114,7 @@ func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
@@ -164,7 +164,7 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
@@ -221,7 +221,7 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
@@ -259,7 +259,7 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
|
||||
|
||||
95
bridge/internal/controller/sender/metrics.go
Normal file
95
bridge/internal/controller/sender/metrics.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package sender
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type senderMetrics struct {
|
||||
senderCheckBalancerTotal *prometheus.CounterVec
|
||||
senderCheckPendingTransactionTotal *prometheus.CounterVec
|
||||
sendTransactionTotal *prometheus.CounterVec
|
||||
sendTransactionFailureFullTx *prometheus.GaugeVec
|
||||
sendTransactionFailureRepeatTransaction *prometheus.CounterVec
|
||||
sendTransactionFailureGetFee *prometheus.CounterVec
|
||||
sendTransactionFailureSendTx *prometheus.CounterVec
|
||||
resubmitTransactionTotal *prometheus.CounterVec
|
||||
currentPendingTxsNum *prometheus.GaugeVec
|
||||
currentGasFeeCap *prometheus.GaugeVec
|
||||
currentGasTipCap *prometheus.GaugeVec
|
||||
currentGasPrice *prometheus.GaugeVec
|
||||
currentGasLimit *prometheus.GaugeVec
|
||||
currentNonce *prometheus.GaugeVec
|
||||
}
|
||||
|
||||
var (
|
||||
initSenderMetricOnce sync.Once
|
||||
sm *senderMetrics
|
||||
)
|
||||
|
||||
func initSenderMetrics(reg prometheus.Registerer) *senderMetrics {
|
||||
initSenderMetricOnce.Do(func() {
|
||||
sm = &senderMetrics{
|
||||
sendTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_send_transaction_total",
|
||||
Help: "The total number of sending transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureFullTx: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_send_transaction_full_tx_failure_total",
|
||||
Help: "The total number of sending transaction failure for full size tx.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureRepeatTransaction: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_send_transaction_repeat_transaction_failure_total",
|
||||
Help: "The total number of sending transaction failure for repeat transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureGetFee: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_send_transaction_get_fee_failure_total",
|
||||
Help: "The total number of sending transaction failure for getting fee.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureSendTx: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_send_transaction_send_tx_failure_total",
|
||||
Help: "The total number of sending transaction failure for sending tx.",
|
||||
}, []string{"service", "name"}),
|
||||
resubmitTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_send_transaction_resubmit_send_transaction_total",
|
||||
Help: "The total number of resubmit transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentPendingTxsNum: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_pending_tx_count",
|
||||
Help: "The pending tx count in the sender.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasFeeCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_gas_fee_cap",
|
||||
Help: "The gas fee of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasTipCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_gas_tip_cap",
|
||||
Help: "The gas tip of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasPrice: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_gas_price_cap",
|
||||
Help: "The gas price of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasLimit: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_gas_limit",
|
||||
Help: "The gas limit of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentNonce: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_nonce",
|
||||
Help: "The nonce of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
senderCheckPendingTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_check_pending_transaction_total",
|
||||
Help: "The total number of check pending transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
senderCheckBalancerTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_check_balancer_total",
|
||||
Help: "The total number of check balancer.",
|
||||
}, []string{"service", "name"}),
|
||||
}
|
||||
})
|
||||
|
||||
return sm
|
||||
}
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"time"
|
||||
|
||||
cmapV2 "github.com/orcaman/concurrent-map/v2"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
@@ -70,6 +71,8 @@ type Sender struct {
|
||||
client *ethclient.Client // The client to retrieve on chain data or send transaction.
|
||||
chainID *big.Int // The chain id of the endpoint
|
||||
ctx context.Context
|
||||
service string
|
||||
name string
|
||||
|
||||
auth *bind.TransactOpts
|
||||
minBalance *big.Int
|
||||
@@ -80,11 +83,13 @@ type Sender struct {
|
||||
confirmCh chan *Confirmation
|
||||
|
||||
stopCh chan struct{}
|
||||
|
||||
metrics *senderMetrics
|
||||
}
|
||||
|
||||
// NewSender returns a new instance of transaction sender
|
||||
// txConfirmationCh is used to notify confirmed transaction
|
||||
func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.PrivateKey) (*Sender, error) {
|
||||
func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.PrivateKey, service, name string, reg prometheus.Registerer) (*Sender, error) {
|
||||
client, err := ethclient.Dial(config.Endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to dial eth client, err: %w", err)
|
||||
@@ -135,7 +140,10 @@ func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.Pri
|
||||
baseFeePerGas: baseFeePerGas,
|
||||
pendingTxs: cmapV2.New[*PendingTransaction](),
|
||||
stopCh: make(chan struct{}),
|
||||
name: name,
|
||||
service: service,
|
||||
}
|
||||
sender.metrics = initSenderMetrics(reg)
|
||||
|
||||
go sender.loop(ctx)
|
||||
|
||||
@@ -183,10 +191,15 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
|
||||
|
||||
// SendTransaction send a signed L2tL1 transaction.
|
||||
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (common.Hash, error) {
|
||||
s.metrics.sendTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
if s.IsFull() {
|
||||
s.metrics.sendTransactionFailureFullTx.WithLabelValues(s.service, s.name).Set(1)
|
||||
return common.Hash{}, ErrFullPending
|
||||
}
|
||||
|
||||
s.metrics.sendTransactionFailureFullTx.WithLabelValues(s.service, s.name).Set(0)
|
||||
if ok := s.pendingTxs.SetIfAbsent(ID, nil); !ok {
|
||||
s.metrics.sendTransactionFailureRepeatTransaction.WithLabelValues(s.service, s.name).Inc()
|
||||
return common.Hash{}, fmt.Errorf("repeat transaction ID: %s", ID)
|
||||
}
|
||||
|
||||
@@ -195,10 +208,20 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
|
||||
tx *types.Transaction
|
||||
err error
|
||||
)
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
s.pendingTxs.Remove(ID) // release the ID on failure
|
||||
}
|
||||
}()
|
||||
|
||||
if feeData, err = s.getFeeData(s.auth, target, value, data, minGasLimit); err != nil {
|
||||
s.metrics.sendTransactionFailureGetFee.WithLabelValues(s.service, s.name).Inc()
|
||||
return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err)
|
||||
}
|
||||
|
||||
if tx, err = s.createAndSendTx(s.auth, feeData, target, value, data, nil); err != nil {
|
||||
s.metrics.sendTransactionFailureSendTx.WithLabelValues(s.service, s.name).Inc()
|
||||
return common.Hash{}, fmt.Errorf("failed to create and send transaction, err: %w", err)
|
||||
}
|
||||
|
||||
@@ -287,6 +310,20 @@ func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, targ
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if feeData.gasTipCap != nil {
|
||||
s.metrics.currentGasTipCap.WithLabelValues(s.service, s.name).Set(float64(feeData.gasTipCap.Uint64()))
|
||||
}
|
||||
|
||||
if feeData.gasFeeCap != nil {
|
||||
s.metrics.currentGasFeeCap.WithLabelValues(s.service, s.name).Set(float64(feeData.gasFeeCap.Uint64()))
|
||||
}
|
||||
|
||||
if feeData.gasPrice != nil {
|
||||
s.metrics.currentGasPrice.WithLabelValues(s.service, s.name).Set(float64(feeData.gasPrice.Uint64()))
|
||||
}
|
||||
|
||||
s.metrics.currentGasLimit.WithLabelValues(s.service, s.name).Set(float64(feeData.gasLimit))
|
||||
|
||||
// update nonce when it is not from resubmit
|
||||
if overrideNonce == nil {
|
||||
auth.Nonce = big.NewInt(int64(nonce + 1))
|
||||
@@ -355,6 +392,7 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
}
|
||||
|
||||
nonce := tx.Nonce()
|
||||
s.metrics.resubmitTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
return s.createAndSendTx(auth, feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
|
||||
}
|
||||
|
||||
@@ -465,6 +503,7 @@ func (s *Sender) loop(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-checkTick.C:
|
||||
s.metrics.senderCheckPendingTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
header, err := s.client.HeaderByNumber(s.ctx, nil)
|
||||
if err != nil {
|
||||
log.Error("failed to get latest head", "err", err)
|
||||
@@ -479,9 +518,10 @@ func (s *Sender) loop(ctx context.Context) {
|
||||
|
||||
s.checkPendingTransaction(header, confirmed)
|
||||
case <-checkBalanceTicker.C:
|
||||
s.metrics.senderCheckBalancerTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
// Check and set balance.
|
||||
if err := s.checkBalance(ctx); err != nil {
|
||||
log.Error("check balance, err: %w", err)
|
||||
log.Error("check balance error", "err", err)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
@@ -70,7 +70,7 @@ func testNewSender(t *testing.T) {
|
||||
// exit by Stop()
|
||||
cfgCopy1 := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy1.TxType = txType
|
||||
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey)
|
||||
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
newSender1.Stop()
|
||||
|
||||
@@ -78,7 +78,7 @@ func testNewSender(t *testing.T) {
|
||||
cfgCopy2 := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy2.TxType = txType
|
||||
subCtx, cancel := context.WithCancel(context.Background())
|
||||
_, err = NewSender(subCtx, &cfgCopy2, privateKey)
|
||||
_, err = NewSender(subCtx, &cfgCopy2, privateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
cancel()
|
||||
}
|
||||
@@ -90,7 +90,7 @@ func testPendLimit(t *testing.T) {
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
cfgCopy.PendingLimit = 2
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey)
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 0; i < 2*newSender.PendingLimit(); i++ {
|
||||
@@ -107,7 +107,7 @@ func testMinGasLimit(t *testing.T) {
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey)
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
client, err := ethclient.Dial(cfgCopy.Endpoint)
|
||||
@@ -135,7 +135,7 @@ func testResubmitTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
feeData, err := s.getFeeData(s.auth, &common.Address{}, big.NewInt(0), nil, 0)
|
||||
@@ -151,7 +151,7 @@ func testResubmitTransactionWithRisingBaseFee(t *testing.T) {
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
s.baseFeePerGas = 1000
|
||||
@@ -186,7 +186,7 @@ func testCheckPendingTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
header := &types.Header{Number: big.NewInt(100), BaseFee: big.NewInt(100)}
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -29,10 +31,20 @@ type BatchProposer struct {
|
||||
minChunkNumPerBatch uint64
|
||||
batchTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
|
||||
batchProposerCircleTotal prometheus.Counter
|
||||
proposeBatchFailureTotal prometheus.Counter
|
||||
proposeBatchUpdateInfoTotal prometheus.Counter
|
||||
proposeBatchUpdateInfoFailureTotal prometheus.Counter
|
||||
totalL1CommitGas prometheus.Gauge
|
||||
totalL1CommitCalldataSize prometheus.Gauge
|
||||
batchChunksNum prometheus.Gauge
|
||||
batchFirstChunkTimeoutReached prometheus.Counter
|
||||
batchChunksSuperposeNotEnoughTotal prometheus.Counter
|
||||
}
|
||||
|
||||
// NewBatchProposer creates a new BatchProposer instance.
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB) *BatchProposer {
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
|
||||
return &BatchProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
@@ -45,22 +57,63 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
|
||||
minChunkNumPerBatch: cfg.MinChunkNumPerBatch,
|
||||
batchTimeoutSec: cfg.BatchTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
|
||||
batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_circle_total",
|
||||
Help: "Total number of propose batch total.",
|
||||
}),
|
||||
proposeBatchFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_failure_circle_total",
|
||||
Help: "Total number of propose batch total.",
|
||||
}),
|
||||
proposeBatchUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_update_info_total",
|
||||
Help: "Total number of propose batch update info total.",
|
||||
}),
|
||||
proposeBatchUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_update_info_failure_total",
|
||||
Help: "Total number of propose batch update info failure total.",
|
||||
}),
|
||||
totalL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_batch_total_l1_commit_gas",
|
||||
Help: "The total l1 commit gas",
|
||||
}),
|
||||
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_batch_total_l1_call_data_size",
|
||||
Help: "The total l1 call data size",
|
||||
}),
|
||||
batchChunksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_batch_chunks_number",
|
||||
Help: "The number of chunks in the batch",
|
||||
}),
|
||||
batchFirstChunkTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_first_chunk_timeout_reached_total",
|
||||
Help: "Total times of batch's first chunk timeout reached",
|
||||
}),
|
||||
batchChunksSuperposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_chunks_superpose_not_enough_total",
|
||||
Help: "Total number of batch chunk superpose not enough",
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// TryProposeBatch tries to propose a new batches.
|
||||
func (p *BatchProposer) TryProposeBatch() {
|
||||
p.batchProposerCircleTotal.Inc()
|
||||
dbChunks, err := p.proposeBatchChunks()
|
||||
if err != nil {
|
||||
p.proposeBatchFailureTotal.Inc()
|
||||
log.Error("proposeBatchChunks failed", "err", err)
|
||||
return
|
||||
}
|
||||
if err := p.updateBatchInfoInDB(dbChunks); err != nil {
|
||||
p.proposeBatchUpdateInfoFailureTotal.Inc()
|
||||
log.Error("update batch info in db failed", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
|
||||
p.proposeBatchUpdateInfoTotal.Inc()
|
||||
numChunks := len(dbChunks)
|
||||
if numChunks <= 0 {
|
||||
return nil
|
||||
@@ -77,10 +130,12 @@ func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
|
||||
err = p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, startChunkIndex, endChunkIndex, startChunkHash, endChunkHash, chunks, dbTX)
|
||||
if dbErr != nil {
|
||||
log.Warn("BatchProposer.updateBatchInfoInDB insert batch failure", "error", "start chunk index", startChunkIndex, "end chunk index", endChunkIndex, dbErr)
|
||||
return dbErr
|
||||
}
|
||||
dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, startChunkIndex, endChunkIndex, batch.Hash, dbTX)
|
||||
if dbErr != nil {
|
||||
log.Warn("BatchProposer.UpdateBatchHashInRange update the chunk's batch hash failure", "hash", batch.Hash, "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
return nil
|
||||
@@ -132,6 +187,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
// batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
|
||||
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
|
||||
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
// Check if the first chunk breaks hard limits.
|
||||
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
|
||||
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
|
||||
@@ -144,6 +200,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
)
|
||||
}
|
||||
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
|
||||
return nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
|
||||
@@ -174,6 +231,8 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
}
|
||||
}
|
||||
|
||||
p.batchChunksNum.Set(float64(len(dbChunks)))
|
||||
|
||||
var hasChunkTimeout bool
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
|
||||
@@ -183,12 +242,14 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
"chunk outdated time threshold", currentTimeSec,
|
||||
)
|
||||
hasChunkTimeout = true
|
||||
p.batchFirstChunkTimeoutReached.Inc()
|
||||
}
|
||||
|
||||
if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch {
|
||||
log.Warn("The chunk number of the batch is less than the minimum limit",
|
||||
"chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch,
|
||||
)
|
||||
p.batchChunksSuperposeNotEnoughTotal.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
return dbChunks, nil
|
||||
|
||||
@@ -30,7 +30,7 @@ func testBatchProposer(t *testing.T) {
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db)
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
@@ -39,7 +39,7 @@ func testBatchProposer(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
MinChunkNumPerBatch: 1,
|
||||
BatchTimeoutSec: 300,
|
||||
}, db)
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
@@ -57,10 +59,23 @@ type ChunkProposer struct {
|
||||
maxRowConsumptionPerChunk uint64
|
||||
chunkTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
|
||||
chunkProposerCircleTotal prometheus.Counter
|
||||
proposeChunkFailureTotal prometheus.Counter
|
||||
proposeChunkUpdateInfoTotal prometheus.Counter
|
||||
proposeChunkUpdateInfoFailureTotal prometheus.Counter
|
||||
chunkL2TxNum prometheus.Gauge
|
||||
chunkEstimateL1CommitGas prometheus.Gauge
|
||||
totalL1CommitCalldataSize prometheus.Gauge
|
||||
totalTxGasUsed prometheus.Gauge
|
||||
maxTxConsumption prometheus.Gauge
|
||||
chunkBlocksNum prometheus.Gauge
|
||||
chunkBlockTimeoutReached prometheus.Counter
|
||||
chunkBlocksSuperposeNotEnoughTotal prometheus.Counter
|
||||
}
|
||||
|
||||
// NewChunkProposer creates a new ChunkProposer instance.
|
||||
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB) *ChunkProposer {
|
||||
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
|
||||
return &ChunkProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
@@ -74,18 +89,70 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
|
||||
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
|
||||
chunkTimeoutSec: cfg.ChunkTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
|
||||
chunkProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_circle_total",
|
||||
Help: "Total number of propose chunk total.",
|
||||
}),
|
||||
proposeChunkFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_failure_circle_total",
|
||||
Help: "Total number of propose chunk failure total.",
|
||||
}),
|
||||
proposeChunkUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_update_info_total",
|
||||
Help: "Total number of propose chunk update info total.",
|
||||
}),
|
||||
proposeChunkUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_update_info_failure_total",
|
||||
Help: "Total number of propose chunk update info failure total.",
|
||||
}),
|
||||
chunkL2TxNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_l2_tx_num",
|
||||
Help: "The chunk l2 tx num",
|
||||
}),
|
||||
chunkEstimateL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_estimate_l1_commit_gas",
|
||||
Help: "The chunk estimate l1 commit gas",
|
||||
}),
|
||||
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_total_l1_commit_call_data_size",
|
||||
Help: "The total l1 commit call data size",
|
||||
}),
|
||||
totalTxGasUsed: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_total_tx_gas_used",
|
||||
Help: "The total tx gas used",
|
||||
}),
|
||||
maxTxConsumption: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_max_tx_consumption",
|
||||
Help: "The max tx consumption",
|
||||
}),
|
||||
chunkBlocksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_chunk_block_number",
|
||||
Help: "The number of blocks in the chunk",
|
||||
}),
|
||||
chunkBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_first_block_timeout_reached_total",
|
||||
Help: "Total times of chunk's first block timeout reached",
|
||||
}),
|
||||
chunkBlocksSuperposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_blocks_superpose_not_enough_total",
|
||||
Help: "Total number of chunk block superpose not enough",
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// TryProposeChunk tries to propose a new chunk.
|
||||
func (p *ChunkProposer) TryProposeChunk() {
|
||||
p.chunkProposerCircleTotal.Inc()
|
||||
proposedChunk, err := p.proposeChunk()
|
||||
if err != nil {
|
||||
p.proposeChunkFailureTotal.Inc()
|
||||
log.Error("propose new chunk failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := p.updateChunkInfoInDB(proposedChunk); err != nil {
|
||||
p.proposeChunkUpdateInfoFailureTotal.Inc()
|
||||
log.Error("update chunk info in orm failed", "err", err)
|
||||
}
|
||||
}
|
||||
@@ -95,9 +162,11 @@ func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
p.proposeChunkUpdateInfoTotal.Inc()
|
||||
err := p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, dbTX)
|
||||
if err != nil {
|
||||
log.Warn("ChunkProposer.InsertChunk failed", "chunk hash", chunk.Hash)
|
||||
return err
|
||||
}
|
||||
if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
|
||||
@@ -131,6 +200,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
|
||||
}
|
||||
|
||||
p.chunkL2TxNum.Set(float64(totalL2TxNum))
|
||||
// Check if the first block breaks hard limits.
|
||||
// If so, it indicates there are bugs in sequencer, manual fix is needed.
|
||||
if totalL2TxNum > p.maxL2TxNumPerChunk {
|
||||
@@ -142,6 +212,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
)
|
||||
}
|
||||
|
||||
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
@@ -151,6 +222,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
)
|
||||
}
|
||||
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
|
||||
@@ -160,6 +232,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
)
|
||||
}
|
||||
|
||||
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
|
||||
// Check if the first block breaks any soft limits.
|
||||
if totalTxGasUsed > p.maxTxGasPerChunk {
|
||||
log.Warn(
|
||||
@@ -169,7 +242,10 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
"max gas limit", p.maxTxGasPerChunk,
|
||||
)
|
||||
}
|
||||
if max := crc.max(); max > p.maxRowConsumptionPerChunk {
|
||||
|
||||
max := crc.max()
|
||||
p.maxTxConsumption.Set(float64(max))
|
||||
if max > p.maxRowConsumptionPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
@@ -200,6 +276,8 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
}
|
||||
}
|
||||
|
||||
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
|
||||
|
||||
var hasBlockTimeout bool
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
|
||||
@@ -208,6 +286,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
"block timestamp", blocks[0].Header.Time,
|
||||
"block outdated time threshold", currentTimeSec,
|
||||
)
|
||||
p.chunkBlockTimeoutReached.Inc()
|
||||
hasBlockTimeout = true
|
||||
}
|
||||
|
||||
@@ -216,6 +295,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
"totalL1CommitCalldataSize", totalL1CommitCalldataSize,
|
||||
"minL1CommitCalldataSizePerChunk", p.minL1CommitCalldataSizePerChunk,
|
||||
)
|
||||
p.chunkBlocksSuperposeNotEnoughTotal.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
return chunk, nil
|
||||
|
||||
@@ -30,7 +30,7 @@ func testChunkProposer(t *testing.T) {
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db)
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
expectedChunk := &types.Chunk{
|
||||
@@ -62,7 +62,7 @@ func testChunkProposerRowConsumption(t *testing.T) {
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 0, // !
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db)
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -11,11 +12,9 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
@@ -23,12 +22,6 @@ import (
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
bridgeL1MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsRollupEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
type rollupEvent struct {
|
||||
batchHash common.Hash
|
||||
txHash common.Hash
|
||||
@@ -59,10 +52,12 @@ type L1WatcherClient struct {
|
||||
processedMsgHeight uint64
|
||||
// The height of the block that the watcher has retrieved header rlp
|
||||
processedBlockHeight uint64
|
||||
|
||||
metrics *l1WatcherMetrics
|
||||
}
|
||||
|
||||
// NewL1WatcherClient returns a new instance of L1WatcherClient.
|
||||
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB) *L1WatcherClient {
|
||||
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB, reg prometheus.Registerer) *L1WatcherClient {
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
savedHeight, err := l1MessageOrm.GetLayer1LatestWatchedHeight()
|
||||
if err != nil {
|
||||
@@ -102,6 +97,7 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
|
||||
|
||||
processedMsgHeight: uint64(savedHeight),
|
||||
processedBlockHeight: savedL1BlockHeight,
|
||||
metrics: initL1WatcherMetrics(reg),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -125,6 +121,7 @@ func (w *L1WatcherClient) SetConfirmations(confirmations rpc.BlockNumber) {
|
||||
|
||||
// FetchBlockHeader pull latest L1 blocks and save in DB
|
||||
func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
|
||||
w.metrics.l1WatcherFetchBlockHeaderTotal.Inc()
|
||||
fromBlock := int64(w.processedBlockHeight) + 1
|
||||
toBlock := int64(blockHeight)
|
||||
if toBlock < fromBlock {
|
||||
@@ -171,6 +168,7 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
|
||||
|
||||
// update processed height
|
||||
w.processedBlockHeight = uint64(toBlock)
|
||||
w.metrics.l1WatcherFetchBlockHeaderProcessedBlockHeight.Set(float64(w.processedBlockHeight))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -189,6 +187,7 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
toBlock := int64(blockHeight)
|
||||
|
||||
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
|
||||
w.metrics.l1WatcherFetchContractEventTotal.Inc()
|
||||
to := from + contractEventsBlocksFetchLimit - 1
|
||||
|
||||
if to > toBlock {
|
||||
@@ -220,9 +219,10 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL1MsgsSyncHeightGauge.Update(to)
|
||||
w.metrics.l1WatcherFetchContractEventProcessedBlockHeight.Set(float64(to))
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
|
||||
sentMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
|
||||
@@ -232,8 +232,8 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
}
|
||||
sentMessageCount := int64(len(sentMessageEvents))
|
||||
rollupEventCount := int64(len(rollupEvents))
|
||||
bridgeL1MsgsSentEventsTotalCounter.Inc(sentMessageCount)
|
||||
bridgeL1MsgsRollupEventsTotalCounter.Inc(rollupEventCount)
|
||||
w.metrics.l1WatcherFetchContractEventSentEventsTotal.Add(float64(sentMessageCount))
|
||||
w.metrics.l1WatcherFetchContractEventRollupEventsTotal.Add(float64(rollupEventCount))
|
||||
log.Info("L1 events types", "SentMessageCount", sentMessageCount, "RollupEventCount", rollupEventCount)
|
||||
|
||||
// use rollup event to update rollup results db status
|
||||
@@ -273,7 +273,8 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL1MsgsSyncHeightGauge.Update(to)
|
||||
w.metrics.l1WatcherFetchContractEventSuccessTotal.Inc()
|
||||
w.metrics.l1WatcherFetchContractEventProcessedBlockHeight.Set(float64(w.processedMsgHeight))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
59
bridge/internal/controller/watcher/l1_watcher_metrics.go
Normal file
59
bridge/internal/controller/watcher/l1_watcher_metrics.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type l1WatcherMetrics struct {
|
||||
l1WatcherFetchBlockHeaderTotal prometheus.Counter
|
||||
l1WatcherFetchBlockHeaderProcessedBlockHeight prometheus.Gauge
|
||||
l1WatcherFetchContractEventTotal prometheus.Counter
|
||||
l1WatcherFetchContractEventSuccessTotal prometheus.Counter
|
||||
l1WatcherFetchContractEventProcessedBlockHeight prometheus.Gauge
|
||||
l1WatcherFetchContractEventSentEventsTotal prometheus.Counter
|
||||
l1WatcherFetchContractEventRollupEventsTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
initL1WatcherMetricOnce sync.Once
|
||||
l1WatcherMetric *l1WatcherMetrics
|
||||
)
|
||||
|
||||
func initL1WatcherMetrics(reg prometheus.Registerer) *l1WatcherMetrics {
|
||||
initL1WatcherMetricOnce.Do(func() {
|
||||
l1WatcherMetric = &l1WatcherMetrics{
|
||||
l1WatcherFetchBlockHeaderTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_header_total",
|
||||
Help: "The total number of l1 watcher fetch block header total",
|
||||
}),
|
||||
l1WatcherFetchBlockHeaderProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_header_processed_block_height",
|
||||
Help: "The current processed block height of l1 watcher fetch block header",
|
||||
}),
|
||||
l1WatcherFetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_total",
|
||||
Help: "The total number of l1 watcher fetch contract event total",
|
||||
}),
|
||||
l1WatcherFetchContractEventSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_success_total",
|
||||
Help: "The total number of l1 watcher fetch contract event success total",
|
||||
}),
|
||||
l1WatcherFetchContractEventProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_processed_block_height",
|
||||
Help: "The current processed block height of l1 watcher fetch contract event",
|
||||
}),
|
||||
l1WatcherFetchContractEventSentEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_sent_event_total",
|
||||
Help: "The current processed block height of l1 watcher fetch contract sent event",
|
||||
}),
|
||||
l1WatcherFetchContractEventRollupEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_rollup_event_total",
|
||||
Help: "The current processed block height of l1 watcher fetch contract rollup event",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l1WatcherMetric
|
||||
}
|
||||
@@ -30,7 +30,8 @@ func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
|
||||
client, err := ethclient.Dial(base.L1gethImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
l1Cfg := cfg.L1Config
|
||||
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
|
||||
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress,
|
||||
l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db, nil)
|
||||
assert.NoError(t, watcher.FetchContractEvent())
|
||||
return watcher, db
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -13,11 +14,9 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/event"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
@@ -25,14 +24,6 @@ import (
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
// Metrics
|
||||
var (
|
||||
bridgeL2MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
|
||||
bridgeL2BlocksFetchedHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
|
||||
bridgeL2BlocksFetchedGapGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
|
||||
bridgeL2MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
|
||||
type L2WatcherClient struct {
|
||||
ctx context.Context
|
||||
@@ -56,10 +47,12 @@ type L2WatcherClient struct {
|
||||
processedMsgHeight uint64
|
||||
|
||||
stopped uint64
|
||||
|
||||
metrics *l2WatcherMetrics
|
||||
}
|
||||
|
||||
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB) *L2WatcherClient {
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB, reg prometheus.Registerer) *L2WatcherClient {
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
var savedHeight uint64
|
||||
l1msg, err := l1MessageOrm.GetLayer1LatestMessageWithLayer2Hash()
|
||||
@@ -93,6 +86,7 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
|
||||
withdrawTrieRootSlot: withdrawTrieRootSlot,
|
||||
|
||||
stopped: 0,
|
||||
metrics: initL2WatcherMetrics(reg),
|
||||
}
|
||||
|
||||
return &w
|
||||
@@ -102,6 +96,7 @@ const blockTracesFetchLimit = uint64(10)
|
||||
|
||||
// TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks.
|
||||
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
|
||||
w.metrics.fetchRunningMissingBlocksTotal.Inc()
|
||||
heightInDB, err := w.l2BlockOrm.GetL2BlocksLatestHeight(w.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to GetL2BlocksLatestHeight", "err", err)
|
||||
@@ -120,8 +115,8 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
|
||||
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
|
||||
return
|
||||
}
|
||||
bridgeL2BlocksFetchedHeightGauge.Update(int64(to))
|
||||
bridgeL2BlocksFetchedGapGauge.Update(int64(blockHeight - to))
|
||||
w.metrics.fetchRunningMissingBlocksHeight.Set(float64(to))
|
||||
w.metrics.bridgeL2BlocksFetchedGap.Set(float64(blockHeight - to))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,6 +194,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
|
||||
}()
|
||||
|
||||
w.metrics.fetchContractEventTotal.Inc()
|
||||
blockHeight, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.Client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
@@ -238,7 +234,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL2MsgsSyncHeightGauge.Update(to)
|
||||
w.metrics.fetchContractEventHeight.Set(float64(to))
|
||||
continue
|
||||
}
|
||||
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
@@ -250,7 +246,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
}
|
||||
|
||||
relayedMessageCount := int64(len(relayedMessageEvents))
|
||||
bridgeL2MsgsRelayedEventsTotalCounter.Inc(relayedMessageCount)
|
||||
w.metrics.bridgeL2MsgsRelayedEventsTotal.Add(float64(relayedMessageCount))
|
||||
log.Info("L2 events types", "RelayedMessageCount", relayedMessageCount)
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submited message.
|
||||
@@ -269,7 +265,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL2MsgsSyncHeightGauge.Update(to)
|
||||
w.metrics.fetchContractEventHeight.Set(float64(to))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
54
bridge/internal/controller/watcher/l2_watcher_metrics.go
Normal file
54
bridge/internal/controller/watcher/l2_watcher_metrics.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type l2WatcherMetrics struct {
|
||||
fetchRunningMissingBlocksTotal prometheus.Counter
|
||||
fetchRunningMissingBlocksHeight prometheus.Gauge
|
||||
fetchContractEventTotal prometheus.Counter
|
||||
fetchContractEventHeight prometheus.Gauge
|
||||
bridgeL2MsgsRelayedEventsTotal prometheus.Counter
|
||||
bridgeL2BlocksFetchedGap prometheus.Gauge
|
||||
}
|
||||
|
||||
var (
|
||||
initL2WatcherMetricOnce sync.Once
|
||||
l2WatcherMetric *l2WatcherMetrics
|
||||
)
|
||||
|
||||
func initL2WatcherMetrics(reg prometheus.Registerer) *l2WatcherMetrics {
|
||||
initL2WatcherMetricOnce.Do(func() {
|
||||
l2WatcherMetric = &l2WatcherMetrics{
|
||||
fetchRunningMissingBlocksTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l2_watcher_fetch_running_missing_blocks_total",
|
||||
Help: "The total number of l2 watcher fetch running missing blocks",
|
||||
}),
|
||||
fetchRunningMissingBlocksHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_l2_watcher_fetch_running_missing_blocks_height",
|
||||
Help: "The total number of l2 watcher fetch running missing blocks height",
|
||||
}),
|
||||
fetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l2_watcher_fetch_contract_events_total",
|
||||
Help: "The total number of l2 watcher fetch contract events",
|
||||
}),
|
||||
fetchContractEventHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_l2_watcher_fetch_contract_height",
|
||||
Help: "The total number of l2 watcher fetch contract height",
|
||||
}),
|
||||
bridgeL2MsgsRelayedEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l2_watcher_msg_relayed_events_total",
|
||||
Help: "The total number of l2 watcher msg relayed event",
|
||||
}),
|
||||
bridgeL2BlocksFetchedGap: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_l2_watcher_blocks_fetched_gap",
|
||||
Help: "The gap of l2 fetch",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l2WatcherMetric
|
||||
}
|
||||
@@ -34,7 +34,8 @@ import (
|
||||
func setupL2Watcher(t *testing.T) (*L2WatcherClient, *gorm.DB) {
|
||||
db := setupDB(t)
|
||||
l2cfg := cfg.L2Config
|
||||
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
|
||||
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress,
|
||||
l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db, nil)
|
||||
return watcher, db
|
||||
}
|
||||
|
||||
@@ -50,7 +51,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
|
||||
l1cfg := cfg.L1Config
|
||||
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
|
||||
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKey)
|
||||
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create several transactions and commit to block
|
||||
@@ -95,7 +96,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
|
||||
|
||||
func prepareWatcherClient(l2Cli *ethclient.Client, db *gorm.DB, contractAddr common.Address) *L2WatcherClient {
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db)
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db, nil)
|
||||
}
|
||||
|
||||
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
|
||||
|
||||
@@ -26,13 +26,14 @@ func testImportL1GasPrice(t *testing.T) {
|
||||
l1Cfg := bridgeApp.Config.L1Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create L1Watcher
|
||||
startHeight, err := l1Client.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0,
|
||||
l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
|
||||
|
||||
// fetch new blocks
|
||||
number, err := l1Client.BlockNumber(context.Background())
|
||||
@@ -67,7 +68,7 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
prepareContracts(t)
|
||||
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add fake chunk
|
||||
|
||||
@@ -29,14 +29,16 @@ func testRelayL1MessageSucceed(t *testing.T) {
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
|
||||
assert.NoError(t, err)
|
||||
// Create L1Watcher
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress,
|
||||
l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
|
||||
|
||||
// Create L2Watcher
|
||||
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
|
||||
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress,
|
||||
l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db, nil)
|
||||
|
||||
// send message through l1 messenger contract
|
||||
nonce, err := l1MessengerInstance.MessageNonce(&bind.CallOpts{})
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -13,6 +12,7 @@ import (
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
@@ -28,12 +28,13 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := bridgeApp.Config.L1Config
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress,
|
||||
l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
|
||||
|
||||
// add some blocks to db
|
||||
var wrappedBlocks []*types.WrappedBlock
|
||||
@@ -64,7 +65,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db)
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
@@ -78,7 +79,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
MinChunkNumPerBatch: 1,
|
||||
BatchTimeoutSec: 300,
|
||||
}, db)
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
@@ -91,20 +92,22 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.NotEmpty(t, batch.CommitTxHash)
|
||||
assert.Equal(t, types.RollupCommitting, types.RollupStatus(batch.RollupStatus))
|
||||
|
||||
assert.NoError(t, err)
|
||||
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(batch.CommitTxHash))
|
||||
assert.NoError(t, err)
|
||||
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(commitTxReceipt.Logs), 1)
|
||||
success := utils.TryTimes(30, func() bool {
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash))
|
||||
return err == nil && receipt.Status == 1
|
||||
})
|
||||
assert.True(t, success)
|
||||
|
||||
// fetch rollup events
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupCommitted, statuses[0])
|
||||
success = utils.TryTimes(30, func() bool {
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0]
|
||||
})
|
||||
assert.True(t, success)
|
||||
|
||||
// add dummy proof
|
||||
proof := &message.BatchProof{
|
||||
@@ -118,7 +121,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
@@ -128,17 +131,20 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, batch.FinalizeTxHash)
|
||||
|
||||
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
assert.NoError(t, err)
|
||||
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
|
||||
success = utils.TryTimes(30, func() bool {
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
return err == nil && receipt.Status == 1
|
||||
})
|
||||
assert.True(t, success)
|
||||
|
||||
// fetch rollup events
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalized, statuses[0])
|
||||
success = utils.TryTimes(30, func() bool {
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0]
|
||||
})
|
||||
assert.True(t, success)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ go 1.19
|
||||
require (
|
||||
github.com/bits-and-blooms/bitset v1.7.0
|
||||
github.com/docker/docker v23.0.6+incompatible
|
||||
github.com/gin-contrib/pprof v1.4.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.9
|
||||
|
||||
@@ -114,8 +114,11 @@ github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x
|
||||
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/pprof v1.4.0 h1:XxiBSf5jWZ5i16lNOPbMTVdgHBdhfGRD5PZ1LWazzvg=
|
||||
github.com/gin-contrib/pprof v1.4.0/go.mod h1:RrehPJasUVBPK6yTUwOl8/NP6i0vbUgmxtis+Z5KE90=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
|
||||
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
|
||||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||
@@ -133,11 +136,15 @@ github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
|
||||
github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k=
|
||||
github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
@@ -148,6 +155,7 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
@@ -293,6 +301,7 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
@@ -312,6 +321,7 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
@@ -372,6 +382,7 @@ github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt
|
||||
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
|
||||
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
|
||||
github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
|
||||
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
|
||||
@@ -415,6 +426,7 @@ github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeC
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
@@ -449,6 +461,7 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
@@ -467,6 +480,8 @@ github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
|
||||
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
|
||||
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
|
||||
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
@@ -501,6 +516,7 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
|
||||
@@ -616,6 +632,8 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -634,6 +652,7 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
@@ -726,6 +745,7 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
@@ -748,6 +768,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
|
||||
|
||||
41
common/libzkp/impl/Cargo.lock
generated
41
common/libzkp/impl/Cargo.lock
generated
@@ -32,7 +32,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "aggregator"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
dependencies = [
|
||||
"ark-std",
|
||||
"env_logger 0.10.0",
|
||||
@@ -433,7 +433,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
|
||||
[[package]]
|
||||
name = "bus-mapping"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -1049,7 +1049,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "eth-types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
dependencies = [
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
@@ -1226,7 +1226,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "external-tracer"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"geth-utils",
|
||||
@@ -1439,7 +1439,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "gadgets"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
dependencies = [
|
||||
"digest 0.7.6",
|
||||
"eth-types",
|
||||
@@ -1479,7 +1479,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "geth-utils"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
|
||||
@@ -1583,7 +1583,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2-base"
|
||||
version = "0.2.2"
|
||||
source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
|
||||
source = "git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
|
||||
dependencies = [
|
||||
"ff",
|
||||
"halo2_proofs",
|
||||
@@ -1598,7 +1598,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2-ecc"
|
||||
version = "0.2.2"
|
||||
source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
|
||||
source = "git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
|
||||
dependencies = [
|
||||
"ff",
|
||||
"group",
|
||||
@@ -1633,7 +1633,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2-mpt-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=v0.5#1c11b6c9b1245073a76c3ce7100b6798060f7cb8"
|
||||
source = "git+https://github.com/scroll-tech/mpt-circuit.git?tag=v0.5.1#2163a9c436ed85363c954ecf7e6e1044a1b991dc"
|
||||
dependencies = [
|
||||
"ethers-core",
|
||||
"halo2_proofs",
|
||||
@@ -1655,7 +1655,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2_proofs"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/scroll-tech/halo2.git?branch=develop#4d8a405a97ce445c88fc8ec40f9a2dd0a661c697"
|
||||
source = "git+https://github.com/scroll-tech/halo2.git?branch=develop#19de67c07a9b9b567580466763f93ebfbc3bb799"
|
||||
dependencies = [
|
||||
"ark-std",
|
||||
"blake2b_simd",
|
||||
@@ -2077,7 +2077,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "keccak256"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"eth-types",
|
||||
@@ -2264,7 +2264,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mock"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -2279,7 +2279,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mpt-zktrie"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
dependencies = [
|
||||
"bus-mapping",
|
||||
"eth-types",
|
||||
@@ -2754,8 +2754,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "prover"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.13#0756ac68ee2f41c51f09a748cd28220290fe49ae"
|
||||
version = "0.6.4"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.5#ccb3cd457080dcfdf8ae59416eb47ae9b6b6ddd6"
|
||||
dependencies = [
|
||||
"aggregator",
|
||||
"anyhow",
|
||||
@@ -3624,7 +3624,7 @@ checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9"
|
||||
[[package]]
|
||||
name = "snark-verifier"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#12c306ec57849921e690221b10b8a08189868d4a"
|
||||
source = "git+https://github.com/scroll-tech//snark-verifier?tag=v0.1.1#11a09d4a37c31c659b29e2dac0ceb544a776ad7b"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"ethereum-types 0.14.1",
|
||||
@@ -3648,7 +3648,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "snark-verifier-sdk"
|
||||
version = "0.0.1"
|
||||
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#12c306ec57849921e690221b10b8a08189868d4a"
|
||||
source = "git+https://github.com/scroll-tech//snark-verifier?tag=v0.1.1#11a09d4a37c31c659b29e2dac0ceb544a776ad7b"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"env_logger 0.10.0",
|
||||
@@ -4039,8 +4039,8 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
|
||||
|
||||
[[package]]
|
||||
name = "types"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.13#0756ac68ee2f41c51f09a748cd28220290fe49ae"
|
||||
version = "0.6.4"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.5#ccb3cd457080dcfdf8ae59416eb47ae9b6b6ddd6"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"blake2",
|
||||
@@ -4491,10 +4491,11 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
|
||||
[[package]]
|
||||
name = "zkevm-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
|
||||
dependencies = [
|
||||
"array-init",
|
||||
"bus-mapping",
|
||||
"either",
|
||||
"env_logger 0.9.3",
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
|
||||
@@ -19,9 +19,13 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
|
||||
[patch."https://github.com/privacy-scaling-explorations/halo2curves.git"]
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
|
||||
|
||||
[patch."https://github.com/scroll-tech/snark-verifier"]
|
||||
snark-verifier = { git = "https://github.com/scroll-tech//snark-verifier", tag = "v0.1.1" }
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech//snark-verifier", tag = "v0.1.1" }
|
||||
|
||||
[dependencies]
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.13" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.13" }
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.5" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.5" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
|
||||
log = "0.4"
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/metrics/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
// ScrollRegistry is used for scroll metrics.
|
||||
ScrollRegistry = metrics.NewRegistry()
|
||||
)
|
||||
|
||||
// Serve starts the metrics server on the given address, will be closed when the given
|
||||
// context is canceled.
|
||||
func Serve(ctx context.Context, c *cli.Context) {
|
||||
if !c.Bool(utils.MetricsEnabled.Name) {
|
||||
return
|
||||
}
|
||||
|
||||
address := net.JoinHostPort(
|
||||
c.String(utils.MetricsAddr.Name),
|
||||
strconv.Itoa(c.Int(utils.MetricsPort.Name)),
|
||||
)
|
||||
|
||||
server := &http.Server{
|
||||
Addr: address,
|
||||
Handler: prometheus.Handler(ScrollRegistry),
|
||||
ReadTimeout: rpc.DefaultHTTPTimeouts.ReadTimeout,
|
||||
WriteTimeout: rpc.DefaultHTTPTimeouts.WriteTimeout,
|
||||
IdleTimeout: rpc.DefaultHTTPTimeouts.IdleTimeout,
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if err := server.Close(); err != nil {
|
||||
log.Error("Failed to close metrics server", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info("Starting metrics server", "address", address)
|
||||
|
||||
go func() {
|
||||
if err := server.ListenAndServe(); err != nil {
|
||||
log.Error("start metrics server error", "error", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
18
common/metrics/middleware.go
Normal file
18
common/metrics/middleware.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"scroll-tech/common/metrics/ginmetrics"
|
||||
)
|
||||
|
||||
// Use register the gin metric
|
||||
func Use(router *gin.Engine, metricsPrefix string, reg prometheus.Registerer) {
|
||||
m := ginmetrics.GetMonitor(reg)
|
||||
m.SetMetricPath("/metrics")
|
||||
m.SetMetricPrefix(metricsPrefix + "_")
|
||||
m.SetSlowTime(1)
|
||||
m.SetDuration([]float64{0.025, .05, .1, .5, 1, 5, 10})
|
||||
m.UseWithoutExposingEndpoint(router)
|
||||
}
|
||||
49
common/metrics/server.go
Normal file
49
common/metrics/server.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
// enable the pprof
|
||||
_ "net/http/pprof"
|
||||
|
||||
"github.com/gin-contrib/pprof"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// Server starts the metrics server on the given address, will be closed when the given
|
||||
// context is canceled.
|
||||
func Server(c *cli.Context, reg *prometheus.Registry) {
|
||||
if !c.Bool(utils.MetricsEnabled.Name) {
|
||||
return
|
||||
}
|
||||
|
||||
r := gin.New()
|
||||
r.Use(gin.Recovery())
|
||||
pprof.Register(r)
|
||||
r.GET("/metrics", func(context *gin.Context) {
|
||||
promhttp.Handler().ServeHTTP(context.Writer, context.Request)
|
||||
})
|
||||
|
||||
address := fmt.Sprintf(":%s", c.String(utils.MetricsPort.Name))
|
||||
server := &http.Server{
|
||||
Addr: address,
|
||||
Handler: r,
|
||||
ReadHeaderTimeout: time.Minute,
|
||||
}
|
||||
log.Info("Starting metrics server", "address", address)
|
||||
|
||||
go func() {
|
||||
if runServerErr := server.ListenAndServe(); runServerErr != nil && !errors.Is(runServerErr, http.ErrServerClosed) {
|
||||
log.Crit("run metrics http server failure", "error", runServerErr)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -13,6 +13,18 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// ProofFailureType the proof failure type
|
||||
type ProofFailureType int
|
||||
|
||||
const (
|
||||
// ProofFailureUndefined the undefined type proof failure type
|
||||
ProofFailureUndefined ProofFailureType = iota
|
||||
// ProofFailurePanic proof failure for prover panic
|
||||
ProofFailurePanic
|
||||
// ProofFailureNoPanic proof failure for no prover panic
|
||||
ProofFailureNoPanic
|
||||
)
|
||||
|
||||
// RespStatus represents status code from prover to scroll
|
||||
type RespStatus uint32
|
||||
|
||||
@@ -250,12 +262,14 @@ type ChunkInfo struct {
|
||||
|
||||
// ChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type ChunkProof struct {
|
||||
StorageTrace []byte `json:"storage_trace"`
|
||||
Protocol []byte `json:"protocol"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"` // cross-reference between cooridinator computation and prover compution
|
||||
StorageTrace []byte `json:"storage_trace"`
|
||||
Protocol []byte `json:"protocol"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
@@ -263,6 +277,8 @@ type BatchProof struct {
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// SanityCheck checks whether an BatchProof is in a legal format
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestProofDetailHash(t *testing.T) {
|
||||
}
|
||||
hash, err := proofDetail.Hash()
|
||||
assert.NoError(t, err)
|
||||
expectedHash := "72a00232c1fcb100b1b67e6d12cd449e5d2d890e3a66e50f4c23499d4990766f"
|
||||
expectedHash := "d3b57cb84b0da8043373eeb3612806fb7248d6d1b6e089846ccf3ccce2d9f31c"
|
||||
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var tag = "v4.1.38"
|
||||
var tag = "v4.1.78"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
@@ -44,5 +44,5 @@ func CheckScrollProverVersion(proverVersion string) bool {
|
||||
return false
|
||||
}
|
||||
// compare the `scroll_prover` version
|
||||
return remote[2] == local[2]
|
||||
return remote[2] == local[2] || remote[2] == "8c439b1"
|
||||
}
|
||||
|
||||
@@ -260,6 +260,23 @@ function owner() external view returns (address)
|
||||
*Returns the address of the current owner.*
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### rateLimiter
|
||||
|
||||
```solidity
|
||||
function rateLimiter() external view returns (address)
|
||||
```
|
||||
|
||||
The address of token rate limiter contract.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
@@ -354,6 +371,22 @@ function transferOwnership(address newOwner) external nonpayable
|
||||
|---|---|---|
|
||||
| newOwner | address | undefined |
|
||||
|
||||
### updateRateLimiter
|
||||
|
||||
```solidity
|
||||
function updateRateLimiter(address _newRateLimiter) external nonpayable
|
||||
```
|
||||
|
||||
Update rate limiter contract.
|
||||
|
||||
*This function can only called by contract owner.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _newRateLimiter | address | The address of new rate limiter contract. |
|
||||
|
||||
### updateTokenMapping
|
||||
|
||||
```solidity
|
||||
@@ -530,6 +563,23 @@ Emitted when some ERC1155 token is refunded.
|
||||
| tokenId | uint256 | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
|
||||
### UpdateRateLimiter
|
||||
|
||||
```solidity
|
||||
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
|
||||
```
|
||||
|
||||
Emitted when owner updates rate limiter contract.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _oldRateLimiter `indexed` | address | undefined |
|
||||
| _newRateLimiter `indexed` | address | undefined |
|
||||
|
||||
### UpdateTokenMapping
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -227,6 +227,23 @@ function owner() external view returns (address)
|
||||
*Returns the address of the current owner.*
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### rateLimiter
|
||||
|
||||
```solidity
|
||||
function rateLimiter() external view returns (address)
|
||||
```
|
||||
|
||||
The address of token rate limiter contract.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
@@ -299,6 +316,22 @@ function transferOwnership(address newOwner) external nonpayable
|
||||
|---|---|---|
|
||||
| newOwner | address | undefined |
|
||||
|
||||
### updateRateLimiter
|
||||
|
||||
```solidity
|
||||
function updateRateLimiter(address _newRateLimiter) external nonpayable
|
||||
```
|
||||
|
||||
Update rate limiter contract.
|
||||
|
||||
*This function can only called by contract owner.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _newRateLimiter | address | The address of new rate limiter contract. |
|
||||
|
||||
### updateTokenMapping
|
||||
|
||||
```solidity
|
||||
@@ -469,6 +502,23 @@ Emitted when some ERC721 token is refunded.
|
||||
| recipient `indexed` | address | undefined |
|
||||
| tokenId | uint256 | undefined |
|
||||
|
||||
### UpdateRateLimiter
|
||||
|
||||
```solidity
|
||||
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
|
||||
```
|
||||
|
||||
Emitted when owner updates rate limiter contract.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _oldRateLimiter `indexed` | address | undefined |
|
||||
| _newRateLimiter `indexed` | address | undefined |
|
||||
|
||||
### UpdateTokenMapping
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -239,6 +239,23 @@ Mapping from queue index to previous replay queue index.
|
||||
|---|---|---|
|
||||
| _0 | uint256 | undefined |
|
||||
|
||||
### rateLimiter
|
||||
|
||||
```solidity
|
||||
function rateLimiter() external view returns (address)
|
||||
```
|
||||
|
||||
The address of ETH rate limiter contract.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### relayMessageWithProof
|
||||
|
||||
```solidity
|
||||
@@ -436,6 +453,22 @@ Update max replay times.
|
||||
|---|---|---|
|
||||
| _newMaxReplayTimes | uint256 | The new max replay times. |
|
||||
|
||||
### updateRateLimiter
|
||||
|
||||
```solidity
|
||||
function updateRateLimiter(address _newRateLimiter) external nonpayable
|
||||
```
|
||||
|
||||
Update rate limiter contract.
|
||||
|
||||
*This function can only called by contract owner.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _newRateLimiter | address | The address of new rate limiter contract. |
|
||||
|
||||
### xDomainMessageSender
|
||||
|
||||
```solidity
|
||||
@@ -609,5 +642,22 @@ Emitted when the maximum number of times each message can be replayed is updated
|
||||
| oldMaxReplayTimes | uint256 | undefined |
|
||||
| newMaxReplayTimes | uint256 | undefined |
|
||||
|
||||
### UpdateRateLimiter
|
||||
|
||||
```solidity
|
||||
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
|
||||
```
|
||||
|
||||
Emitted when owner updates rate limiter contract.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _oldRateLimiter `indexed` | address | undefined |
|
||||
| _newRateLimiter `indexed` | address | undefined |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -225,6 +225,23 @@ function owner() external view returns (address)
|
||||
*Returns the address of the current owner.*
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### rateLimiter
|
||||
|
||||
```solidity
|
||||
function rateLimiter() external view returns (address)
|
||||
```
|
||||
|
||||
The address of token rate limiter contract.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
@@ -275,6 +292,22 @@ function transferOwnership(address newOwner) external nonpayable
|
||||
|---|---|---|
|
||||
| newOwner | address | undefined |
|
||||
|
||||
### updateRateLimiter
|
||||
|
||||
```solidity
|
||||
function updateRateLimiter(address _newRateLimiter) external nonpayable
|
||||
```
|
||||
|
||||
Update rate limiter contract.
|
||||
|
||||
*This function can only called by contract owner.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _newRateLimiter | address | The address of new rate limiter contract. |
|
||||
|
||||
|
||||
|
||||
## Events
|
||||
@@ -372,5 +405,22 @@ Emitted when some ERC20 token is refunded.
|
||||
| recipient `indexed` | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
|
||||
### UpdateRateLimiter
|
||||
|
||||
```solidity
|
||||
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
|
||||
```
|
||||
|
||||
Emitted when owner updates rate limiter contract.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _oldRateLimiter `indexed` | address | undefined |
|
||||
| _newRateLimiter `indexed` | address | undefined |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -223,6 +223,23 @@ function owner() external view returns (address)
|
||||
*Returns the address of the current owner.*
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### rateLimiter
|
||||
|
||||
```solidity
|
||||
function rateLimiter() external view returns (address)
|
||||
```
|
||||
|
||||
The address of token rate limiter contract.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
@@ -273,6 +290,22 @@ function transferOwnership(address newOwner) external nonpayable
|
||||
|---|---|---|
|
||||
| newOwner | address | undefined |
|
||||
|
||||
### updateRateLimiter
|
||||
|
||||
```solidity
|
||||
function updateRateLimiter(address _newRateLimiter) external nonpayable
|
||||
```
|
||||
|
||||
Update rate limiter contract.
|
||||
|
||||
*This function can only called by contract owner.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _newRateLimiter | address | The address of new rate limiter contract. |
|
||||
|
||||
|
||||
|
||||
## Events
|
||||
@@ -370,5 +403,22 @@ Emitted when some ERC20 token is refunded.
|
||||
| recipient `indexed` | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
|
||||
### UpdateRateLimiter
|
||||
|
||||
```solidity
|
||||
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
|
||||
```
|
||||
|
||||
Emitted when owner updates rate limiter contract.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _oldRateLimiter `indexed` | address | undefined |
|
||||
| _newRateLimiter `indexed` | address | undefined |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -205,6 +205,23 @@ function owner() external view returns (address)
|
||||
*Returns the address of the current owner.*
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### rateLimiter
|
||||
|
||||
```solidity
|
||||
function rateLimiter() external view returns (address)
|
||||
```
|
||||
|
||||
The address of token rate limiter contract.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
@@ -299,6 +316,22 @@ function transferOwnership(address newOwner) external nonpayable
|
||||
|---|---|---|
|
||||
| newOwner | address | undefined |
|
||||
|
||||
### updateRateLimiter
|
||||
|
||||
```solidity
|
||||
function updateRateLimiter(address _newRateLimiter) external nonpayable
|
||||
```
|
||||
|
||||
Update rate limiter contract.
|
||||
|
||||
*This function can only called by contract owner.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _newRateLimiter | address | The address of new rate limiter contract. |
|
||||
|
||||
### updateTokenMapping
|
||||
|
||||
```solidity
|
||||
@@ -455,6 +488,23 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
|
||||
| previousOwner `indexed` | address | undefined |
|
||||
| newOwner `indexed` | address | undefined |
|
||||
|
||||
### UpdateRateLimiter
|
||||
|
||||
```solidity
|
||||
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
|
||||
```
|
||||
|
||||
Emitted when owner updates rate limiter contract.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _oldRateLimiter `indexed` | address | undefined |
|
||||
| _newRateLimiter `indexed` | address | undefined |
|
||||
|
||||
### UpdateTokenMapping
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -174,6 +174,23 @@ function owner() external view returns (address)
|
||||
*Returns the address of the current owner.*
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### rateLimiter
|
||||
|
||||
```solidity
|
||||
function rateLimiter() external view returns (address)
|
||||
```
|
||||
|
||||
The address of token rate limiter contract.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
@@ -246,6 +263,22 @@ function transferOwnership(address newOwner) external nonpayable
|
||||
|---|---|---|
|
||||
| newOwner | address | undefined |
|
||||
|
||||
### updateRateLimiter
|
||||
|
||||
```solidity
|
||||
function updateRateLimiter(address _newRateLimiter) external nonpayable
|
||||
```
|
||||
|
||||
Update rate limiter contract.
|
||||
|
||||
*This function can only called by contract owner.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _newRateLimiter | address | The address of new rate limiter contract. |
|
||||
|
||||
### updateTokenMapping
|
||||
|
||||
```solidity
|
||||
@@ -397,6 +430,23 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
|
||||
| previousOwner `indexed` | address | undefined |
|
||||
| newOwner `indexed` | address | undefined |
|
||||
|
||||
### UpdateRateLimiter
|
||||
|
||||
```solidity
|
||||
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
|
||||
```
|
||||
|
||||
Emitted when owner updates rate limiter contract.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _oldRateLimiter `indexed` | address | undefined |
|
||||
| _newRateLimiter `indexed` | address | undefined |
|
||||
|
||||
### UpdateTokenMapping
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -194,6 +194,23 @@ function paused() external view returns (bool)
|
||||
|---|---|---|
|
||||
| _0 | bool | undefined |
|
||||
|
||||
### rateLimiter
|
||||
|
||||
```solidity
|
||||
function rateLimiter() external view returns (address)
|
||||
```
|
||||
|
||||
The address of ETH rate limiter contract.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### relayMessage
|
||||
|
||||
```solidity
|
||||
@@ -328,6 +345,22 @@ Update max failed execution times.
|
||||
|---|---|---|
|
||||
| _newMaxFailedExecutionTimes | uint256 | The new max failed execution times. |
|
||||
|
||||
### updateRateLimiter
|
||||
|
||||
```solidity
|
||||
function updateRateLimiter(address _newRateLimiter) external nonpayable
|
||||
```
|
||||
|
||||
Update rate limiter contract.
|
||||
|
||||
*This function can only called by contract owner.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _newRateLimiter | address | The address of new rate limiter contract. |
|
||||
|
||||
### xDomainMessageSender
|
||||
|
||||
```solidity
|
||||
@@ -501,5 +534,22 @@ Emitted when the maximum number of times each message can fail in L2 is updated.
|
||||
| oldMaxFailedExecutionTimes | uint256 | undefined |
|
||||
| newMaxFailedExecutionTimes | uint256 | undefined |
|
||||
|
||||
### UpdateRateLimiter
|
||||
|
||||
```solidity
|
||||
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
|
||||
```
|
||||
|
||||
Emitted when owner updates rate limiter contract.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _oldRateLimiter `indexed` | address | undefined |
|
||||
| _newRateLimiter `indexed` | address | undefined |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -139,6 +139,23 @@ function owner() external view returns (address)
|
||||
*Returns the address of the current owner.*
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### rateLimiter
|
||||
|
||||
```solidity
|
||||
function rateLimiter() external view returns (address)
|
||||
```
|
||||
|
||||
The address of token rate limiter contract.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
@@ -206,6 +223,22 @@ function transferOwnership(address newOwner) external nonpayable
|
||||
|---|---|---|
|
||||
| newOwner | address | undefined |
|
||||
|
||||
### updateRateLimiter
|
||||
|
||||
```solidity
|
||||
function updateRateLimiter(address _newRateLimiter) external nonpayable
|
||||
```
|
||||
|
||||
Update rate limiter contract.
|
||||
|
||||
*This function can only called by contract owner.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _newRateLimiter | address | The address of new rate limiter contract. |
|
||||
|
||||
### withdrawERC20
|
||||
|
||||
```solidity
|
||||
@@ -321,6 +354,23 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
|
||||
| previousOwner `indexed` | address | undefined |
|
||||
| newOwner `indexed` | address | undefined |
|
||||
|
||||
### UpdateRateLimiter
|
||||
|
||||
```solidity
|
||||
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
|
||||
```
|
||||
|
||||
Emitted when owner updates rate limiter contract.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _oldRateLimiter `indexed` | address | undefined |
|
||||
| _newRateLimiter `indexed` | address | undefined |
|
||||
|
||||
### WithdrawERC20
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -172,6 +172,23 @@ function owner() external view returns (address)
|
||||
*Returns the address of the current owner.*
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### rateLimiter
|
||||
|
||||
```solidity
|
||||
function rateLimiter() external view returns (address)
|
||||
```
|
||||
|
||||
The address of token rate limiter contract.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
@@ -222,6 +239,22 @@ function transferOwnership(address newOwner) external nonpayable
|
||||
|---|---|---|
|
||||
| newOwner | address | undefined |
|
||||
|
||||
### updateRateLimiter
|
||||
|
||||
```solidity
|
||||
function updateRateLimiter(address _newRateLimiter) external nonpayable
|
||||
```
|
||||
|
||||
Update rate limiter contract.
|
||||
|
||||
*This function can only called by contract owner.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _newRateLimiter | address | The address of new rate limiter contract. |
|
||||
|
||||
### withdrawERC20
|
||||
|
||||
```solidity
|
||||
@@ -337,6 +370,23 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
|
||||
| previousOwner `indexed` | address | undefined |
|
||||
| newOwner `indexed` | address | undefined |
|
||||
|
||||
### UpdateRateLimiter
|
||||
|
||||
```solidity
|
||||
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
|
||||
```
|
||||
|
||||
Emitted when owner updates rate limiter contract.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _oldRateLimiter `indexed` | address | undefined |
|
||||
| _newRateLimiter `indexed` | address | undefined |
|
||||
|
||||
### WithdrawERC20
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -149,7 +149,7 @@ contract L1ScrollMessenger is ScrollMessengerBase, IL1ScrollMessenger {
|
||||
|
||||
// @note check more `_to` address to avoid attack in the future when we add more gateways.
|
||||
require(_to != messageQueue, "Forbid to call message queue");
|
||||
require(_to != address(this), "Forbid to call self");
|
||||
_validateTargetAddress(_to);
|
||||
|
||||
// @note This usually will never happen, just in case.
|
||||
require(_from != xDomainMessageSender, "Invalid message sender");
|
||||
@@ -312,6 +312,8 @@ contract L1ScrollMessenger is ScrollMessengerBase, IL1ScrollMessenger {
|
||||
uint256 _gasLimit,
|
||||
address _refundAddress
|
||||
) internal nonReentrant {
|
||||
_addUsedAmount(_value);
|
||||
|
||||
address _messageQueue = messageQueue; // gas saving
|
||||
address _counterpart = counterpart; // gas saving
|
||||
|
||||
|
||||
@@ -161,6 +161,9 @@ abstract contract L1ERC20Gateway is IL1ERC20Gateway, IMessageDropCallback, Scrol
|
||||
// ignore weird fee on transfer token
|
||||
require(_amount > 0, "deposit zero amount");
|
||||
|
||||
// rate limit
|
||||
_addUsedAmount(_token, _amount);
|
||||
|
||||
return (_from, _amount, _data);
|
||||
}
|
||||
|
||||
|
||||
@@ -124,6 +124,8 @@ contract L1ETHGateway is ScrollGatewayBase, IL1ETHGateway, IMessageDropCallback
|
||||
(_from, _data) = abi.decode(_data, (address, bytes));
|
||||
}
|
||||
|
||||
// @note no rate limit here, since ETH is limited in messenger
|
||||
|
||||
// 2. Generate message passed to L1ScrollMessenger.
|
||||
bytes memory _message = abi.encodeCall(IL2ETHGateway.finalizeDepositETH, (_from, _to, _amount, _data));
|
||||
|
||||
|
||||
@@ -571,6 +571,10 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
_totalL1MessagesPoppedInBatch += 1;
|
||||
_totalL1MessagesPoppedOverall += 1;
|
||||
}
|
||||
|
||||
// check last L1 message is not skipped, _totalL1MessagesPoppedInBatch must > 0
|
||||
uint256 rem = (_totalL1MessagesPoppedInBatch - 1) & 0xff;
|
||||
require(((_bitmap >> rem) & 1) == 0, "cannot skip last L1 message");
|
||||
}
|
||||
|
||||
return _ptr;
|
||||
|
||||
@@ -137,6 +137,7 @@ contract L2ScrollMessenger is ScrollMessengerBase, IL2ScrollMessenger {
|
||||
uint256 _gasLimit
|
||||
) internal nonReentrant {
|
||||
require(msg.value == _value, "msg.value mismatch");
|
||||
_addUsedAmount(_value);
|
||||
|
||||
uint256 _nonce = L2MessageQueue(messageQueue).nextMessageIndex();
|
||||
bytes32 _xDomainCalldataHash = keccak256(_encodeXDomainCalldata(msg.sender, _to, _value, _nonce, _message));
|
||||
@@ -165,7 +166,7 @@ contract L2ScrollMessenger is ScrollMessengerBase, IL2ScrollMessenger {
|
||||
) internal {
|
||||
// @note check more `_to` address to avoid attack in the future when we add more gateways.
|
||||
require(_to != messageQueue, "Forbid to call message queue");
|
||||
require(_to != address(this), "Forbid to call self");
|
||||
_validateTargetAddress(_to);
|
||||
|
||||
// @note This usually will never happen, just in case.
|
||||
require(_from != xDomainMessageSender, "Invalid message sender");
|
||||
|
||||
@@ -126,6 +126,9 @@ contract L2CustomERC20Gateway is L2ERC20Gateway {
|
||||
(_from, _data) = abi.decode(_data, (address, bytes));
|
||||
}
|
||||
|
||||
// rate limit
|
||||
_addUsedAmount(_token, _amount);
|
||||
|
||||
// 2. Burn token.
|
||||
IScrollERC20Upgradeable(_token).burn(_from, _amount);
|
||||
|
||||
|
||||
@@ -98,6 +98,8 @@ contract L2ETHGateway is ScrollGatewayBase, IL2ETHGateway {
|
||||
(_from, _data) = abi.decode(_data, (address, bytes));
|
||||
}
|
||||
|
||||
// @note no rate limit here, since ETH is limited in messenger
|
||||
|
||||
bytes memory _message = abi.encodeCall(IL1ETHGateway.finalizeWithdrawETH, (_from, _to, _amount, _data));
|
||||
IL2ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, _amount, _message, _gasLimit);
|
||||
|
||||
|
||||
@@ -140,6 +140,9 @@ contract L2StandardERC20Gateway is L2ERC20Gateway {
|
||||
address _l1Token = tokenMapping[_token];
|
||||
require(_l1Token != address(0), "no corresponding l1 token");
|
||||
|
||||
// rate limit
|
||||
_addUsedAmount(_token, _amount);
|
||||
|
||||
// 2. Burn token.
|
||||
IScrollERC20Upgradeable(_token).burn(_from, _amount);
|
||||
|
||||
|
||||
@@ -116,6 +116,9 @@ contract L2WETHGateway is L2ERC20Gateway {
|
||||
(_from, _data) = abi.decode(_data, (address, bytes));
|
||||
}
|
||||
|
||||
// rate limit
|
||||
_addUsedAmount(_token, _amount);
|
||||
|
||||
// 2. Transfer token into this contract.
|
||||
IERC20Upgradeable(_token).safeTransferFrom(_from, address(this), _amount);
|
||||
IWETH(_token).withdraw(_amount);
|
||||
|
||||
@@ -7,6 +7,7 @@ import {PausableUpgradeable} from "@openzeppelin/contracts-upgradeable/security/
|
||||
import {ReentrancyGuardUpgradeable} from "@openzeppelin/contracts-upgradeable/security/ReentrancyGuardUpgradeable.sol";
|
||||
|
||||
import {ScrollConstants} from "./constants/ScrollConstants.sol";
|
||||
import {IETHRateLimiter} from "../rate-limiter/IETHRateLimiter.sol";
|
||||
import {IScrollMessenger} from "./IScrollMessenger.sol";
|
||||
|
||||
// solhint-disable var-name-mixedcase
|
||||
@@ -26,6 +27,11 @@ abstract contract ScrollMessengerBase is
|
||||
/// @param _newFeeVault The address of new fee vault contract.
|
||||
event UpdateFeeVault(address _oldFeeVault, address _newFeeVault);
|
||||
|
||||
/// @notice Emitted when owner updates rate limiter contract.
|
||||
/// @param _oldRateLimiter The address of old rate limiter contract.
|
||||
/// @param _newRateLimiter The address of new rate limiter contract.
|
||||
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter);
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
@@ -39,8 +45,11 @@ abstract contract ScrollMessengerBase is
|
||||
/// @notice The address of fee vault, collecting cross domain messaging fee.
|
||||
address public feeVault;
|
||||
|
||||
/// @notice The address of ETH rate limiter contract.
|
||||
address public rateLimiter;
|
||||
|
||||
/// @dev The storage slots for future usage.
|
||||
uint256[47] private __gap;
|
||||
uint256[46] private __gap;
|
||||
|
||||
/**********************
|
||||
* Function Modifiers *
|
||||
@@ -89,6 +98,16 @@ abstract contract ScrollMessengerBase is
|
||||
emit UpdateFeeVault(_oldFeeVault, _newFeeVault);
|
||||
}
|
||||
|
||||
/// @notice Update rate limiter contract.
|
||||
/// @dev This function can only called by contract owner.
|
||||
/// @param _newRateLimiter The address of new rate limiter contract.
|
||||
function updateRateLimiter(address _newRateLimiter) external onlyOwner {
|
||||
address _oldRateLimiter = rateLimiter;
|
||||
|
||||
rateLimiter = _newRateLimiter;
|
||||
emit UpdateRateLimiter(_oldRateLimiter, _newRateLimiter);
|
||||
}
|
||||
|
||||
/// @notice Pause the contract
|
||||
/// @dev This function can only called by contract owner.
|
||||
/// @param _status The pause status to update.
|
||||
@@ -128,4 +147,27 @@ abstract contract ScrollMessengerBase is
|
||||
_message
|
||||
);
|
||||
}
|
||||
|
||||
/// @dev Internal function to increase ETH usage for the given `_sender`.
|
||||
/// @param _amount The amount of ETH used.
|
||||
function _addUsedAmount(uint256 _amount) internal {
|
||||
if (_amount == 0) return;
|
||||
|
||||
address _rateLimiter = rateLimiter;
|
||||
if (_rateLimiter != address(0)) {
|
||||
IETHRateLimiter(_rateLimiter).addUsedAmount(_amount);
|
||||
}
|
||||
}
|
||||
|
||||
/// @dev Internal function to check whether the `_target` address is allowed to avoid attack.
|
||||
/// @param _target The address of target address to check.
|
||||
function _validateTargetAddress(address _target) internal view {
|
||||
// @note check more `_target` address to avoid attack in the future when we add more external contracts.
|
||||
|
||||
address _rateLimiter = rateLimiter;
|
||||
if (_rateLimiter != address(0)) {
|
||||
require(_target != _rateLimiter, "Forbid to call rate limiter");
|
||||
}
|
||||
require(_target != address(this), "Forbid to call self");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,8 +9,18 @@ import {IScrollGateway} from "./IScrollGateway.sol";
|
||||
import {IScrollMessenger} from "../IScrollMessenger.sol";
|
||||
import {IScrollGatewayCallback} from "../callbacks/IScrollGatewayCallback.sol";
|
||||
import {ScrollConstants} from "../constants/ScrollConstants.sol";
|
||||
import {ITokenRateLimiter} from "../../rate-limiter/ITokenRateLimiter.sol";
|
||||
|
||||
abstract contract ScrollGatewayBase is ReentrancyGuardUpgradeable, OwnableUpgradeable, IScrollGateway {
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when owner updates rate limiter contract.
|
||||
/// @param _oldRateLimiter The address of old rate limiter contract.
|
||||
/// @param _newRateLimiter The address of new rate limiter contract.
|
||||
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter);
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
@@ -24,8 +34,11 @@ abstract contract ScrollGatewayBase is ReentrancyGuardUpgradeable, OwnableUpgrad
|
||||
/// @inheritdoc IScrollGateway
|
||||
address public override messenger;
|
||||
|
||||
/// @notice The address of token rate limiter contract.
|
||||
address public rateLimiter;
|
||||
|
||||
/// @dev The storage slots for future usage.
|
||||
uint256[47] private __gap;
|
||||
uint256[46] private __gap;
|
||||
|
||||
/**********************
|
||||
* Function Modifiers *
|
||||
@@ -72,6 +85,20 @@ abstract contract ScrollGatewayBase is ReentrancyGuardUpgradeable, OwnableUpgrad
|
||||
}
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Update rate limiter contract.
|
||||
/// @dev This function can only called by contract owner.
|
||||
/// @param _newRateLimiter The address of new rate limiter contract.
|
||||
function updateRateLimiter(address _newRateLimiter) external onlyOwner {
|
||||
address _oldRateLimiter = rateLimiter;
|
||||
|
||||
rateLimiter = _newRateLimiter;
|
||||
emit UpdateRateLimiter(_oldRateLimiter, _newRateLimiter);
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
@@ -84,4 +111,16 @@ abstract contract ScrollGatewayBase is ReentrancyGuardUpgradeable, OwnableUpgrad
|
||||
IScrollGatewayCallback(_to).onScrollGatewayCallback(_data);
|
||||
}
|
||||
}
|
||||
|
||||
/// @dev Internal function to increase token usage for the given `_sender`.
|
||||
/// @param _token The address of token.
|
||||
/// @param _amount The amount of token used.
|
||||
function _addUsedAmount(address _token, uint256 _amount) internal {
|
||||
if (_amount == 0) return;
|
||||
|
||||
address _rateLimiter = rateLimiter;
|
||||
if (_rateLimiter != address(0)) {
|
||||
ITokenRateLimiter(_rateLimiter).addUsedAmount(_token, _amount);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
130
contracts/src/misc/ScrollOwner.sol
Normal file
130
contracts/src/misc/ScrollOwner.sol
Normal file
@@ -0,0 +1,130 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import {AccessControlEnumerable} from "@openzeppelin/contracts/access/AccessControlEnumerable.sol";
|
||||
import {EnumerableSet} from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol";
|
||||
|
||||
// solhint-disable no-empty-blocks
|
||||
|
||||
contract ScrollOwner is AccessControlEnumerable {
|
||||
using EnumerableSet for EnumerableSet.Bytes32Set;
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice Mapping from target address to selector to the list of accessible roles.
|
||||
mapping(address => mapping(bytes4 => EnumerableSet.Bytes32Set)) private targetAccess;
|
||||
|
||||
/**********************
|
||||
* Function Modifiers *
|
||||
**********************/
|
||||
|
||||
modifier hasAccess(
|
||||
address _target,
|
||||
bytes4 _selector,
|
||||
bytes32 _role
|
||||
) {
|
||||
// admin has access to all methods
|
||||
require(_role == DEFAULT_ADMIN_ROLE || targetAccess[_target][_selector].contains(_role), "no access");
|
||||
_;
|
||||
}
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
constructor() {
|
||||
_grantRole(DEFAULT_ADMIN_ROLE, msg.sender);
|
||||
}
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice Return a list of roles which has access to the function.
|
||||
/// @param _target The address of target contract.
|
||||
/// @param _selector The function selector to query.
|
||||
/// @return _roles The list of roles.
|
||||
function callableRoles(address _target, bytes4 _selector) external view returns (bytes32[] memory _roles) {
|
||||
EnumerableSet.Bytes32Set storage _lists = targetAccess[_target][_selector];
|
||||
_roles = new bytes32[](_lists.length());
|
||||
for (uint256 i = 0; i < _roles.length; i++) {
|
||||
_roles[i] = _lists.at(i);
|
||||
}
|
||||
}
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @notice Perform a function call from arbitrary role.
|
||||
/// @param _target The address of target contract.
|
||||
/// @param _value The value passing to target contract.
|
||||
/// @param _data The calldata passing to target contract.
|
||||
/// @param _role The expected role of the caller.
|
||||
function execute(
|
||||
address _target,
|
||||
uint256 _value,
|
||||
bytes calldata _data,
|
||||
bytes32 _role
|
||||
) public payable onlyRole(_role) hasAccess(_target, bytes4(_data[0:4]), _role) {
|
||||
_execute(_target, _value, _data);
|
||||
}
|
||||
|
||||
// allow others to send ether to this contract.
|
||||
receive() external payable {}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Update the access to target contract.
|
||||
/// @param _target The address of target contract.
|
||||
/// @param _selectors The list of function selectors to update.
|
||||
/// @param _role The role to change.
|
||||
/// @param _status True if we are going to add the role, otherwise remove the role.
|
||||
function updateAccess(
|
||||
address _target,
|
||||
bytes4[] memory _selectors,
|
||||
bytes32 _role,
|
||||
bool _status
|
||||
) external onlyRole(DEFAULT_ADMIN_ROLE) {
|
||||
if (_status) {
|
||||
for (uint256 i = 0; i < _selectors.length; i++) {
|
||||
targetAccess[_target][_selectors[i]].add(_role);
|
||||
}
|
||||
} else {
|
||||
for (uint256 i = 0; i < _selectors.length; i++) {
|
||||
targetAccess[_target][_selectors[i]].remove(_role);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
/// @dev Internal function to call contract. If the call reverted, the error will be popped up.
|
||||
/// @param _target The address of target contract.
|
||||
/// @param _value The value passing to target contract.
|
||||
/// @param _data The calldata passing to target contract.
|
||||
function _execute(
|
||||
address _target,
|
||||
uint256 _value,
|
||||
bytes calldata _data
|
||||
) internal {
|
||||
// solhint-disable-next-line avoid-low-level-calls
|
||||
(bool success, ) = address(_target).call{value: _value}(_data);
|
||||
if (!success) {
|
||||
// solhint-disable-next-line no-inline-assembly
|
||||
assembly {
|
||||
let ptr := mload(0x40)
|
||||
let size := returndatasize()
|
||||
returndatacopy(ptr, 0, size)
|
||||
revert(ptr, size)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
116
contracts/src/rate-limiter/ETHRateLimiter.sol
Normal file
116
contracts/src/rate-limiter/ETHRateLimiter.sol
Normal file
@@ -0,0 +1,116 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
|
||||
import {SafeCast} from "@openzeppelin/contracts/utils/math/SafeCast.sol";
|
||||
|
||||
import {IETHRateLimiter} from "./IETHRateLimiter.sol";
|
||||
|
||||
// solhint-disable func-name-mixedcase
|
||||
// solhint-disable not-rely-on-time
|
||||
|
||||
contract ETHRateLimiter is Ownable, IETHRateLimiter {
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
|
||||
struct TokenAmount {
|
||||
// The timestamp when the amount is updated.
|
||||
uint48 lastUpdateTs;
|
||||
// The ETH limit in wei.
|
||||
uint104 limit;
|
||||
// The amount of ETH in current period.
|
||||
uint104 amount;
|
||||
}
|
||||
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
|
||||
/// @notice The period length in seconds.
|
||||
/// @dev The time frame for the `k`-th period is `[periodDuration * k, periodDuration * (k + 1))`.
|
||||
uint256 public immutable periodDuration;
|
||||
|
||||
/// @notice The address of ETH spender.
|
||||
address public immutable spender;
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice The token amount used in current period.
|
||||
TokenAmount public currentPeriod;
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
constructor(
|
||||
uint256 _periodDuration,
|
||||
address _spender,
|
||||
uint104 _totalLimit
|
||||
) {
|
||||
if (_periodDuration == 0) {
|
||||
revert PeriodIsZero();
|
||||
}
|
||||
|
||||
if (_totalLimit == 0) {
|
||||
revert TotalLimitIsZero();
|
||||
}
|
||||
|
||||
periodDuration = _periodDuration;
|
||||
spender = _spender;
|
||||
|
||||
currentPeriod.limit = _totalLimit;
|
||||
}
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @inheritdoc IETHRateLimiter
|
||||
function addUsedAmount(uint256 _amount) external override {
|
||||
if (msg.sender != spender) {
|
||||
revert CallerNotSpender();
|
||||
}
|
||||
if (_amount == 0) return;
|
||||
|
||||
uint256 _currentPeriodStart = (block.timestamp / periodDuration) * periodDuration;
|
||||
|
||||
// check total limit
|
||||
uint256 _currentTotalAmount;
|
||||
TokenAmount memory _currentPeriod = currentPeriod;
|
||||
|
||||
if (_currentPeriod.lastUpdateTs < _currentPeriodStart) {
|
||||
_currentTotalAmount = _amount;
|
||||
} else {
|
||||
_currentTotalAmount = _currentPeriod.amount + _amount;
|
||||
}
|
||||
if (_currentTotalAmount > _currentPeriod.limit) {
|
||||
revert ExceedTotalLimit();
|
||||
}
|
||||
|
||||
_currentPeriod.lastUpdateTs = uint48(block.timestamp);
|
||||
_currentPeriod.amount = SafeCast.toUint104(_currentTotalAmount);
|
||||
|
||||
currentPeriod = _currentPeriod;
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Update the total token amount limit.
|
||||
/// @param _newTotalLimit The new total limit.
|
||||
function updateTotalLimit(uint104 _newTotalLimit) external onlyOwner {
|
||||
if (_newTotalLimit == 0) {
|
||||
revert TotalLimitIsZero();
|
||||
}
|
||||
|
||||
uint256 _oldTotalLimit = currentPeriod.limit;
|
||||
currentPeriod.limit = _newTotalLimit;
|
||||
|
||||
emit UpdateTotalLimit(_oldTotalLimit, _newTotalLimit);
|
||||
}
|
||||
}
|
||||
38
contracts/src/rate-limiter/IETHRateLimiter.sol
Normal file
38
contracts/src/rate-limiter/IETHRateLimiter.sol
Normal file
@@ -0,0 +1,38 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.16;
|
||||
|
||||
interface IETHRateLimiter {
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when the total limit is updated.
|
||||
/// @param oldTotalLimit The previous value of total limit before updating.
|
||||
/// @param newTotalLimit The current value of total limit after updating.
|
||||
event UpdateTotalLimit(uint256 oldTotalLimit, uint256 newTotalLimit);
|
||||
|
||||
/**********
|
||||
* Errors *
|
||||
**********/
|
||||
|
||||
/// @dev Thrown when the `periodDuration` is initialized to zero.
|
||||
error PeriodIsZero();
|
||||
|
||||
/// @dev Thrown when the `totalAmount` is initialized to zero.
|
||||
error TotalLimitIsZero();
|
||||
|
||||
/// @dev Thrown when an amount breaches the total limit in the period.
|
||||
error ExceedTotalLimit();
|
||||
|
||||
/// @dev Thrown when the call is not spender.
|
||||
error CallerNotSpender();
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @notice Request some ETH usage for `sender`.
|
||||
/// @param _amount The amount of ETH to use.
|
||||
function addUsedAmount(uint256 _amount) external;
|
||||
}
|
||||
38
contracts/src/rate-limiter/ITokenRateLimiter.sol
Normal file
38
contracts/src/rate-limiter/ITokenRateLimiter.sol
Normal file
@@ -0,0 +1,38 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.16;
|
||||
|
||||
interface ITokenRateLimiter {
|
||||
/**********
|
||||
* Events *
|
||||
**********/
|
||||
|
||||
/// @notice Emitted when the total limit is updated.
|
||||
/// @param oldTotalLimit The previous value of total limit before updating.
|
||||
/// @param newTotalLimit The current value of total limit after updating.
|
||||
event UpdateTotalLimit(address indexed token, uint256 oldTotalLimit, uint256 newTotalLimit);
|
||||
|
||||
/**********
|
||||
* Errors *
|
||||
**********/
|
||||
|
||||
/// @dev Thrown when the `periodDuration` is initialized to zero.
|
||||
error PeriodIsZero();
|
||||
|
||||
/// @dev Thrown when the `totalAmount` is initialized to zero.
|
||||
/// @param token The address of the token.
|
||||
error TotalLimitIsZero(address token);
|
||||
|
||||
/// @dev Thrown when an amount breaches the total limit in the period.
|
||||
/// @param token The address of the token.
|
||||
error ExceedTotalLimit(address token);
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @notice Request some token usage for `sender`.
|
||||
/// @param token The address of the token.
|
||||
/// @param amount The amount of token to use.
|
||||
function addUsedAmount(address token, uint256 amount) external;
|
||||
}
|
||||
106
contracts/src/rate-limiter/TokenRateLimiter.sol
Normal file
106
contracts/src/rate-limiter/TokenRateLimiter.sol
Normal file
@@ -0,0 +1,106 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {AccessControlEnumerable} from "@openzeppelin/contracts/access/AccessControlEnumerable.sol";
|
||||
import {SafeCast} from "@openzeppelin/contracts/utils/math/SafeCast.sol";
|
||||
|
||||
import {ITokenRateLimiter} from "./ITokenRateLimiter.sol";
|
||||
|
||||
// solhint-disable func-name-mixedcase
|
||||
// solhint-disable not-rely-on-time
|
||||
|
||||
contract TokenRateLimiter is AccessControlEnumerable, ITokenRateLimiter {
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
|
||||
struct TokenAmount {
|
||||
// The timestamp when the amount is updated.
|
||||
uint48 lastUpdateTs;
|
||||
// The token limit.
|
||||
uint104 limit;
|
||||
// The amount of token in current period.
|
||||
uint104 amount;
|
||||
}
|
||||
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
|
||||
/// @notice The role for token spender.
|
||||
bytes32 public constant TOKEN_SPENDER_ROLE = keccak256("TOKEN_SPENDER_ROLE");
|
||||
|
||||
/// @notice The period length in seconds.
|
||||
/// @dev The time frame for the `k`-th period is `[periodDuration * k, periodDuration * (k + 1))`.
|
||||
uint256 public immutable periodDuration;
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice Mapping from token address to the total amounts used in current period and total token amount limit.
|
||||
mapping(address => TokenAmount) public currentPeriod;
|
||||
|
||||
/// @dev The storage slots for future usage.
|
||||
uint256[49] private __gap;
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
constructor(uint256 _periodDuration) {
|
||||
if (_periodDuration == 0) {
|
||||
revert PeriodIsZero();
|
||||
}
|
||||
|
||||
_setupRole(DEFAULT_ADMIN_ROLE, msg.sender);
|
||||
|
||||
periodDuration = _periodDuration;
|
||||
}
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @inheritdoc ITokenRateLimiter
|
||||
function addUsedAmount(address _token, uint256 _amount) external override onlyRole(TOKEN_SPENDER_ROLE) {
|
||||
if (_amount == 0) return;
|
||||
|
||||
uint256 _currentPeriodStart = (block.timestamp / periodDuration) * periodDuration;
|
||||
|
||||
// check total limit, `0` means no limit at all.
|
||||
uint256 _currentTotalAmount;
|
||||
TokenAmount memory _currentPeriod = currentPeriod[_token];
|
||||
if (_currentPeriod.lastUpdateTs < _currentPeriodStart) {
|
||||
_currentTotalAmount = _amount;
|
||||
} else {
|
||||
_currentTotalAmount = _currentPeriod.amount + _amount;
|
||||
}
|
||||
if (_currentPeriod.limit != 0 && _currentTotalAmount > _currentPeriod.limit) {
|
||||
revert ExceedTotalLimit(_token);
|
||||
}
|
||||
|
||||
_currentPeriod.lastUpdateTs = uint48(block.timestamp);
|
||||
_currentPeriod.amount = SafeCast.toUint104(_currentTotalAmount);
|
||||
|
||||
currentPeriod[_token] = _currentPeriod;
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Update the total token amount limit.
|
||||
/// @param _newTotalLimit The new total limit.
|
||||
function updateTotalLimit(address _token, uint104 _newTotalLimit) external onlyRole(DEFAULT_ADMIN_ROLE) {
|
||||
if (_newTotalLimit == 0) {
|
||||
revert TotalLimitIsZero(_token);
|
||||
}
|
||||
|
||||
uint256 _oldTotalLimit = currentPeriod[_token].limit;
|
||||
currentPeriod[_token].limit = _newTotalLimit;
|
||||
|
||||
emit UpdateTotalLimit(_token, _oldTotalLimit, _newTotalLimit);
|
||||
}
|
||||
}
|
||||
75
contracts/src/test/ETHRateLimiter.t.sol
Normal file
75
contracts/src/test/ETHRateLimiter.t.sol
Normal file
@@ -0,0 +1,75 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
|
||||
|
||||
import {ETHRateLimiter} from "../rate-limiter/ETHRateLimiter.sol";
|
||||
import {IETHRateLimiter} from "../rate-limiter/IETHRateLimiter.sol";
|
||||
|
||||
contract ETHRateLimiterTest is DSTestPlus {
|
||||
event UpdateTotalLimit(uint256 oldTotalLimit, uint256 newTotalLimit);
|
||||
|
||||
ETHRateLimiter private limiter;
|
||||
|
||||
function setUp() public {
|
||||
hevm.warp(86400);
|
||||
limiter = new ETHRateLimiter(86400, address(this), 100 ether);
|
||||
}
|
||||
|
||||
function testUpdateTotalLimit(uint104 _newTotalLimit) external {
|
||||
hevm.assume(_newTotalLimit > 0);
|
||||
|
||||
// not owner, revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("Ownable: caller is not the owner");
|
||||
limiter.updateTotalLimit(_newTotalLimit);
|
||||
hevm.stopPrank();
|
||||
|
||||
// zero revert
|
||||
hevm.expectRevert(IETHRateLimiter.TotalLimitIsZero.selector);
|
||||
limiter.updateTotalLimit(0);
|
||||
|
||||
// success
|
||||
hevm.expectEmit(false, false, false, true);
|
||||
emit UpdateTotalLimit(100 ether, _newTotalLimit);
|
||||
limiter.updateTotalLimit(_newTotalLimit);
|
||||
(, uint104 _totalLimit, ) = limiter.currentPeriod();
|
||||
assertEq(_totalLimit, _newTotalLimit);
|
||||
}
|
||||
|
||||
function testAddUsedAmount() external {
|
||||
// non-spender, revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert(IETHRateLimiter.CallerNotSpender.selector);
|
||||
limiter.addUsedAmount(0);
|
||||
hevm.stopPrank();
|
||||
|
||||
// exceed total limit on first call
|
||||
hevm.expectRevert(IETHRateLimiter.ExceedTotalLimit.selector);
|
||||
limiter.addUsedAmount(100 ether + 1);
|
||||
_checkTotalCurrentPeriodAmountAmount(0);
|
||||
|
||||
// exceed total limit on second call
|
||||
limiter.addUsedAmount(50 ether);
|
||||
_checkTotalCurrentPeriodAmountAmount(50 ether);
|
||||
hevm.expectRevert(IETHRateLimiter.ExceedTotalLimit.selector);
|
||||
limiter.addUsedAmount(50 ether + 1);
|
||||
_checkTotalCurrentPeriodAmountAmount(50 ether);
|
||||
|
||||
// one period passed
|
||||
hevm.warp(86400 * 2);
|
||||
limiter.addUsedAmount(1 ether);
|
||||
_checkTotalCurrentPeriodAmountAmount(1 ether);
|
||||
|
||||
// exceed
|
||||
hevm.expectRevert(IETHRateLimiter.ExceedTotalLimit.selector);
|
||||
limiter.addUsedAmount(99 ether + 1);
|
||||
_checkTotalCurrentPeriodAmountAmount(1 ether);
|
||||
}
|
||||
|
||||
function _checkTotalCurrentPeriodAmountAmount(uint256 expected) internal {
|
||||
(, , uint256 totalAmount) = limiter.currentPeriod();
|
||||
assertEq(totalAmount, expected);
|
||||
}
|
||||
}
|
||||
@@ -42,6 +42,26 @@ contract L1ScrollMessengerTest is L1GatewayTestBase {
|
||||
l1Messenger.relayMessageWithProof(address(this), address(messageQueue), 0, 0, new bytes(0), proof);
|
||||
}
|
||||
|
||||
function testForbidCallRateLimiterFromL2() external {
|
||||
l1Messenger.updateRateLimiter(address(1));
|
||||
bytes32 _xDomainCalldataHash = keccak256(
|
||||
abi.encodeWithSignature(
|
||||
"relayMessage(address,address,uint256,uint256,bytes)",
|
||||
address(this),
|
||||
address(1),
|
||||
0,
|
||||
0,
|
||||
new bytes(0)
|
||||
)
|
||||
);
|
||||
prepareL2MessageRoot(_xDomainCalldataHash);
|
||||
IL1ScrollMessenger.L2MessageProof memory proof;
|
||||
proof.batchIndex = rollup.lastFinalizedBatchIndex();
|
||||
|
||||
hevm.expectRevert("Forbid to call rate limiter");
|
||||
l1Messenger.relayMessageWithProof(address(this), address(1), 0, 0, new bytes(0), proof);
|
||||
}
|
||||
|
||||
function testForbidCallSelfFromL2() external {
|
||||
bytes32 _xDomainCalldataHash = keccak256(
|
||||
abi.encodeWithSignature(
|
||||
|
||||
@@ -51,10 +51,15 @@ contract L2ScrollMessengerTest is DSTestPlus {
|
||||
}
|
||||
|
||||
function testForbidCallFromL1() external {
|
||||
l2Messenger.updateRateLimiter(address(1));
|
||||
|
||||
hevm.startPrank(AddressAliasHelper.applyL1ToL2Alias(address(l1Messenger)));
|
||||
hevm.expectRevert("Forbid to call message queue");
|
||||
l2Messenger.relayMessage(address(this), address(l2MessageQueue), 0, 0, new bytes(0));
|
||||
|
||||
hevm.expectRevert("Forbid to call rate limiter");
|
||||
l2Messenger.relayMessage(address(this), address(1), 0, 0, new bytes(0));
|
||||
|
||||
hevm.expectRevert("Forbid to call self");
|
||||
l2Messenger.relayMessage(address(this), address(l2Messenger), 0, 0, new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
@@ -52,6 +52,11 @@ contract ScrollChainTest is DSTestPlus {
|
||||
function testCommitBatch() public {
|
||||
bytes memory batchHeader0 = new bytes(89);
|
||||
|
||||
// import 10 L1 messages
|
||||
for (uint256 i = 0; i < 10; i++) {
|
||||
messageQueue.appendCrossDomainMessage(address(this), 1000000, new bytes(0));
|
||||
}
|
||||
|
||||
// import genesis batch first
|
||||
assembly {
|
||||
mstore(add(batchHeader0, add(0x20, 25)), 1)
|
||||
@@ -106,13 +111,24 @@ contract ScrollChainTest is DSTestPlus {
|
||||
hevm.expectRevert("invalid chunk length");
|
||||
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
|
||||
|
||||
// num txs less than num L1 msgs, revert
|
||||
// cannot skip last L1 message, revert
|
||||
chunk0 = new bytes(1 + 60);
|
||||
bytes memory bitmap = new bytes(32);
|
||||
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
|
||||
chunk0[58] = bytes1(uint8(1)); // numTransactions = 1
|
||||
chunk0[60] = bytes1(uint8(1)); // numL1Messages = 1
|
||||
bitmap[31] = bytes1(uint8(1));
|
||||
chunks[0] = chunk0;
|
||||
hevm.expectRevert("cannot skip last L1 message");
|
||||
rollup.commitBatch(0, batchHeader0, chunks, bitmap);
|
||||
|
||||
// num txs less than num L1 msgs, revert
|
||||
chunk0 = new bytes(1 + 60);
|
||||
bitmap = new bytes(32);
|
||||
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
|
||||
chunk0[58] = bytes1(uint8(1)); // numTransactions = 1
|
||||
chunk0[60] = bytes1(uint8(3)); // numL1Messages = 3
|
||||
bitmap[31] = bytes1(uint8(7));
|
||||
bitmap[31] = bytes1(uint8(3));
|
||||
chunks[0] = chunk0;
|
||||
hevm.expectRevert("num txs less than num L1 msgs");
|
||||
rollup.commitBatch(0, batchHeader0, chunks, bitmap);
|
||||
@@ -328,8 +344,8 @@ contract ScrollChainTest is DSTestPlus {
|
||||
// 2ac1dad3f3696e5581dfc10f2c7a7a8fc5b344285f7d332c7895a8825fca609a
|
||||
// 2. chunk1 has three blocks
|
||||
// 2.1 block0 has 5 tx, 3 L1 messages, no skips
|
||||
// 2.2 block1 has 10 tx, 5 L1 messages, even is skipped.
|
||||
// 2.2 block1 has 300 tx, 256 L1 messages, odd position is skipped.
|
||||
// 2.2 block1 has 10 tx, 5 L1 messages, even is skipped, last is not skipped
|
||||
// 2.2 block1 has 300 tx, 256 L1 messages, odd position is skipped, last is not skipped
|
||||
// => payload for chunk1
|
||||
// 0000000000000000
|
||||
// 0000000000000000
|
||||
@@ -348,32 +364,32 @@ contract ScrollChainTest is DSTestPlus {
|
||||
// 012c
|
||||
// ... (some tx hashes)
|
||||
// => data hash for chunk2
|
||||
// 0520f1fbe159af97fdf1d6cfcfe7605f99f7bfe3ed876e87b64250b1810df00b
|
||||
// e1276f58354ab2372050bde30d8c970ccc3728c76e97f37deebeee83ecbf5705
|
||||
// => data hash for all chunks
|
||||
// f52343299f6379fd15b20b23d51fc61b9b357b124be112686626b6278bcffa83
|
||||
// 3c71d155351642d15f1542a1543ce423abeca1f8939100a0a34cdc3127b95f69
|
||||
// => payload for batch header
|
||||
// 00
|
||||
// 0000000000000002
|
||||
// 0000000000000108
|
||||
// 0000000000000109
|
||||
// f52343299f6379fd15b20b23d51fc61b9b357b124be112686626b6278bcffa83
|
||||
// 3c71d155351642d15f1542a1543ce423abeca1f8939100a0a34cdc3127b95f69
|
||||
// cef70bf80683c4d9b8b2813e90c314e8c56648e231300b8cfed9d666b0caf14e
|
||||
// aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa800000000000000000000000000000000000000000000000000000000000000aa
|
||||
// => hash for batch header
|
||||
// 2231cf185a5c07931584f970738b4cd2ae4fb39e2d90853b26746c7616ea71b9
|
||||
// 03a9cdcb9d582251acf60937db006ec99f3505fd4751b7c1f92c9a8ef413e873
|
||||
bytes memory batchHeader2 = new bytes(89 + 32 + 32);
|
||||
assembly {
|
||||
mstore(add(batchHeader2, 0x20), 0) // version
|
||||
mstore(add(batchHeader2, add(0x20, 1)), shl(192, 2)) // batchIndex = 2
|
||||
mstore(add(batchHeader2, add(0x20, 9)), shl(192, 264)) // l1MessagePopped = 264
|
||||
mstore(add(batchHeader2, add(0x20, 17)), shl(192, 265)) // totalL1MessagePopped = 265
|
||||
mstore(add(batchHeader2, add(0x20, 25)), 0xf52343299f6379fd15b20b23d51fc61b9b357b124be112686626b6278bcffa83) // dataHash
|
||||
mstore(add(batchHeader2, add(0x20, 25)), 0x3c71d155351642d15f1542a1543ce423abeca1f8939100a0a34cdc3127b95f69) // dataHash
|
||||
mstore(add(batchHeader2, add(0x20, 57)), batchHash1) // parentBatchHash
|
||||
mstore(
|
||||
add(batchHeader2, add(0x20, 89)),
|
||||
77194726158210796949047323339125271902179989777093709359638389338608753093288
|
||||
77194726158210796949047323339125271902179989777093709359638389338608753093160
|
||||
) // bitmap0
|
||||
mstore(add(batchHeader2, add(0x20, 121)), 170) // bitmap1
|
||||
mstore(add(batchHeader2, add(0x20, 121)), 42) // bitmap1
|
||||
}
|
||||
chunk0 = new bytes(1 + 60 + 3 * 5);
|
||||
assembly {
|
||||
@@ -408,17 +424,17 @@ contract ScrollChainTest is DSTestPlus {
|
||||
assembly {
|
||||
mstore(
|
||||
add(bitmap, add(0x20, 0)),
|
||||
77194726158210796949047323339125271902179989777093709359638389338608753093288
|
||||
77194726158210796949047323339125271902179989777093709359638389338608753093160
|
||||
) // bitmap0
|
||||
mstore(add(bitmap, add(0x20, 32)), 170) // bitmap1
|
||||
mstore(add(bitmap, add(0x20, 32)), 42) // bitmap1
|
||||
}
|
||||
|
||||
hevm.expectEmit(true, true, false, true);
|
||||
emit CommitBatch(2, bytes32(0x2231cf185a5c07931584f970738b4cd2ae4fb39e2d90853b26746c7616ea71b9));
|
||||
emit CommitBatch(2, bytes32(0x03a9cdcb9d582251acf60937db006ec99f3505fd4751b7c1f92c9a8ef413e873));
|
||||
rollup.commitBatch(0, batchHeader1, chunks, bitmap);
|
||||
assertBoolEq(rollup.isBatchFinalized(2), false);
|
||||
bytes32 batchHash2 = rollup.committedBatches(2);
|
||||
assertEq(batchHash2, bytes32(0x2231cf185a5c07931584f970738b4cd2ae4fb39e2d90853b26746c7616ea71b9));
|
||||
assertEq(batchHash2, bytes32(0x03a9cdcb9d582251acf60937db006ec99f3505fd4751b7c1f92c9a8ef413e873));
|
||||
|
||||
// verify committed batch correctly
|
||||
hevm.expectEmit(true, true, false, true);
|
||||
@@ -441,7 +457,7 @@ contract ScrollChainTest is DSTestPlus {
|
||||
}
|
||||
// 4 ~ 9, even is nonzero, odd is zero
|
||||
for (uint256 i = 4; i < 9; i++) {
|
||||
if (i % 2 == 1) {
|
||||
if (i % 2 == 1 || i == 8) {
|
||||
assertEq(messageQueue.getCrossDomainMessage(i), bytes32(0));
|
||||
} else {
|
||||
assertGt(uint256(messageQueue.getCrossDomainMessage(i)), 0);
|
||||
@@ -449,7 +465,7 @@ contract ScrollChainTest is DSTestPlus {
|
||||
}
|
||||
// 9 ~ 265, even is nonzero, odd is zero
|
||||
for (uint256 i = 9; i < 265; i++) {
|
||||
if (i % 2 == 1) {
|
||||
if (i % 2 == 1 || i == 264) {
|
||||
assertEq(messageQueue.getCrossDomainMessage(i), bytes32(0));
|
||||
} else {
|
||||
assertGt(uint256(messageQueue.getCrossDomainMessage(i)), 0);
|
||||
|
||||
87
contracts/src/test/ScrollOwner.t.sol
Normal file
87
contracts/src/test/ScrollOwner.t.sol
Normal file
@@ -0,0 +1,87 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
|
||||
|
||||
import {ScrollOwner} from "../misc/ScrollOwner.sol";
|
||||
|
||||
contract ScrollOwnerTest is DSTestPlus {
|
||||
event Call();
|
||||
|
||||
ScrollOwner private owner;
|
||||
|
||||
function setUp() public {
|
||||
owner = new ScrollOwner();
|
||||
}
|
||||
|
||||
function testUpdateAccess() external {
|
||||
// not admin, evert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert(
|
||||
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
);
|
||||
owner.updateAccess(address(0), new bytes4[](0), bytes32(0), true);
|
||||
hevm.stopPrank();
|
||||
|
||||
bytes4[] memory _selectors;
|
||||
bytes32[] memory _roles;
|
||||
|
||||
// add access then remove access
|
||||
_roles = owner.callableRoles(address(this), ScrollOwnerTest.revertOnCall.selector);
|
||||
assertEq(0, _roles.length);
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = ScrollOwnerTest.revertOnCall.selector;
|
||||
owner.updateAccess(address(this), _selectors, bytes32(uint256(1)), true);
|
||||
_roles = owner.callableRoles(address(this), ScrollOwnerTest.revertOnCall.selector);
|
||||
assertEq(1, _roles.length);
|
||||
assertEq(_roles[0], bytes32(uint256(1)));
|
||||
owner.updateAccess(address(this), _selectors, bytes32(uint256(1)), false);
|
||||
_roles = owner.callableRoles(address(this), ScrollOwnerTest.revertOnCall.selector);
|
||||
assertEq(0, _roles.length);
|
||||
}
|
||||
|
||||
function testAdminExecute() external {
|
||||
// call with revert
|
||||
hevm.expectRevert("Called");
|
||||
owner.execute(address(this), 0, abi.encodeWithSelector(ScrollOwnerTest.revertOnCall.selector), bytes32(0));
|
||||
|
||||
// call with emit
|
||||
hevm.expectEmit(false, false, false, true);
|
||||
emit Call();
|
||||
owner.execute(address(this), 0, abi.encodeWithSelector(ScrollOwnerTest.emitOnCall.selector), bytes32(0));
|
||||
}
|
||||
|
||||
function testExecute(bytes32 _role) external {
|
||||
hevm.assume(_role != bytes32(0));
|
||||
|
||||
bytes4[] memory _selectors = new bytes4[](2);
|
||||
_selectors[0] = ScrollOwnerTest.revertOnCall.selector;
|
||||
_selectors[1] = ScrollOwnerTest.emitOnCall.selector;
|
||||
|
||||
owner.grantRole(_role, address(this));
|
||||
|
||||
// no access, revert
|
||||
hevm.expectRevert("no access");
|
||||
owner.execute(address(this), 0, abi.encodeWithSelector(ScrollOwnerTest.revertOnCall.selector), _role);
|
||||
|
||||
owner.updateAccess(address(this), _selectors, _role, true);
|
||||
|
||||
// call with revert
|
||||
hevm.expectRevert("Called");
|
||||
owner.execute(address(this), 0, abi.encodeWithSelector(ScrollOwnerTest.revertOnCall.selector), _role);
|
||||
|
||||
// call with emit
|
||||
hevm.expectEmit(false, false, false, true);
|
||||
emit Call();
|
||||
owner.execute(address(this), 0, abi.encodeWithSelector(ScrollOwnerTest.emitOnCall.selector), _role);
|
||||
}
|
||||
|
||||
function revertOnCall() external pure {
|
||||
revert("Called");
|
||||
}
|
||||
|
||||
function emitOnCall() external {
|
||||
emit Call();
|
||||
}
|
||||
}
|
||||
82
contracts/src/test/TokenRateLimiter.t.sol
Normal file
82
contracts/src/test/TokenRateLimiter.t.sol
Normal file
@@ -0,0 +1,82 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
|
||||
|
||||
import {TokenRateLimiter} from "../rate-limiter/TokenRateLimiter.sol";
|
||||
import {ITokenRateLimiter} from "../rate-limiter/ITokenRateLimiter.sol";
|
||||
|
||||
contract TokenRateLimiterTest is DSTestPlus {
|
||||
event UpdateTotalLimit(address indexed token, uint256 oldTotalLimit, uint256 newTotalLimit);
|
||||
|
||||
TokenRateLimiter private limiter;
|
||||
|
||||
function setUp() public {
|
||||
hevm.warp(86400);
|
||||
limiter = new TokenRateLimiter(86400);
|
||||
}
|
||||
|
||||
function testUpdateTotalLimit(address _token, uint104 _newTotalLimit) external {
|
||||
hevm.assume(_newTotalLimit > 0);
|
||||
|
||||
// not admin, revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert(
|
||||
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
);
|
||||
limiter.updateTotalLimit(_token, _newTotalLimit);
|
||||
hevm.stopPrank();
|
||||
|
||||
// zero revert
|
||||
hevm.expectRevert(abi.encodeWithSelector(ITokenRateLimiter.TotalLimitIsZero.selector, _token));
|
||||
limiter.updateTotalLimit(_token, 0);
|
||||
|
||||
// success
|
||||
hevm.expectEmit(true, false, false, true);
|
||||
emit UpdateTotalLimit(_token, 0 ether, _newTotalLimit);
|
||||
limiter.updateTotalLimit(_token, _newTotalLimit);
|
||||
(, uint104 _totalLimit, ) = limiter.currentPeriod(_token);
|
||||
assertEq(_totalLimit, _newTotalLimit);
|
||||
}
|
||||
|
||||
function testAddUsedAmount(address _token) external {
|
||||
// non-spender, revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert(
|
||||
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0x267f05081a073059ae452e6ac77ec189636e43e41051d4c3ec760734b3d173cb"
|
||||
);
|
||||
limiter.addUsedAmount(_token, 0);
|
||||
hevm.stopPrank();
|
||||
|
||||
limiter.grantRole(bytes32(0x267f05081a073059ae452e6ac77ec189636e43e41051d4c3ec760734b3d173cb), address(this));
|
||||
limiter.updateTotalLimit(_token, 100 ether);
|
||||
|
||||
// exceed total limit on first call
|
||||
hevm.expectRevert(abi.encodeWithSelector(ITokenRateLimiter.ExceedTotalLimit.selector, _token));
|
||||
limiter.addUsedAmount(_token, 100 ether + 1);
|
||||
_checkTotalCurrentPeriodAmountAmount(_token, 0);
|
||||
|
||||
// exceed total limit on second call
|
||||
limiter.addUsedAmount(_token, 50 ether);
|
||||
_checkTotalCurrentPeriodAmountAmount(_token, 50 ether);
|
||||
hevm.expectRevert(abi.encodeWithSelector(ITokenRateLimiter.ExceedTotalLimit.selector, _token));
|
||||
limiter.addUsedAmount(_token, 50 ether + 1);
|
||||
_checkTotalCurrentPeriodAmountAmount(_token, 50 ether);
|
||||
|
||||
// one period passed
|
||||
hevm.warp(86400 * 2);
|
||||
limiter.addUsedAmount(_token, 1 ether);
|
||||
_checkTotalCurrentPeriodAmountAmount(_token, 1 ether);
|
||||
|
||||
// exceed
|
||||
hevm.expectRevert(abi.encodeWithSelector(ITokenRateLimiter.ExceedTotalLimit.selector, _token));
|
||||
limiter.addUsedAmount(_token, 99 ether + 1);
|
||||
_checkTotalCurrentPeriodAmountAmount(_token, 1 ether);
|
||||
}
|
||||
|
||||
function _checkTotalCurrentPeriodAmountAmount(address token, uint256 expected) internal {
|
||||
(, , uint256 totalAmount) = limiter.currentPeriod(token);
|
||||
assertEq(totalAmount, expected);
|
||||
}
|
||||
}
|
||||
@@ -9,15 +9,14 @@ import (
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
// enable the pprof
|
||||
_ "net/http/pprof"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
@@ -59,6 +58,7 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
registry := prometheus.DefaultRegisterer
|
||||
metrics.Server(ctx, registry.(*prometheus.Registry))
|
||||
|
||||
proofCollector := cron.NewCollector(subCtx, db, cfg, registry)
|
||||
defer func() {
|
||||
@@ -69,21 +69,12 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
router := gin.New()
|
||||
api.InitController(cfg, db, registry)
|
||||
route.Route(router, cfg, registry)
|
||||
port := ctx.String(httpPortFlag.Name)
|
||||
srv := &http.Server{
|
||||
Addr: fmt.Sprintf(":%s", port),
|
||||
Handler: router,
|
||||
ReadHeaderTimeout: time.Minute,
|
||||
}
|
||||
apiSrv := apiServer(ctx, cfg, db, registry)
|
||||
|
||||
go func() {
|
||||
if runServerErr := srv.ListenAndServe(); err != nil && !errors.Is(runServerErr, http.ErrServerClosed) {
|
||||
log.Crit("run coordinator http server failure", "error", runServerErr)
|
||||
}
|
||||
}()
|
||||
log.Info(
|
||||
"coordinator start successfully",
|
||||
"version", version.Version,
|
||||
)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
@@ -95,7 +86,7 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
closeCtx, cancelExit := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancelExit()
|
||||
if err = srv.Shutdown(closeCtx); err != nil {
|
||||
if err = apiSrv.Shutdown(closeCtx); err != nil {
|
||||
log.Warn("shutdown coordinator server failure", "error", err)
|
||||
return nil
|
||||
}
|
||||
@@ -105,6 +96,25 @@ func action(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func apiServer(ctx *cli.Context, cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) *http.Server {
|
||||
router := gin.New()
|
||||
api.InitController(cfg, db, reg)
|
||||
route.Route(router, cfg, reg)
|
||||
port := ctx.String(httpPortFlag.Name)
|
||||
srv := &http.Server{
|
||||
Addr: fmt.Sprintf(":%s", port),
|
||||
Handler: router,
|
||||
ReadHeaderTimeout: time.Minute,
|
||||
}
|
||||
|
||||
go func() {
|
||||
if runServerErr := srv.ListenAndServe(); runServerErr != nil && !errors.Is(runServerErr, http.ErrServerClosed) {
|
||||
log.Crit("run coordinator http server failure", "error", runServerErr)
|
||||
}
|
||||
}()
|
||||
return srv
|
||||
}
|
||||
|
||||
// Run coordinator.
|
||||
func Run() {
|
||||
// RunApp the coordinator.
|
||||
|
||||
@@ -81,11 +81,12 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
}
|
||||
// Reset prover manager config for manager test cases.
|
||||
cfg.ProverManager = &coordinatorConfig.ProverManager{
|
||||
ProversPerSession: 1,
|
||||
Verifier: &coordinatorConfig.VerifierConfig{MockMode: true},
|
||||
CollectionTimeSec: 60,
|
||||
SessionAttempts: 10,
|
||||
MaxVerifierWorkers: 4,
|
||||
ProversPerSession: 1,
|
||||
Verifier: &coordinatorConfig.VerifierConfig{MockMode: true},
|
||||
BatchCollectionTimeSec: 60,
|
||||
ChunkCollectionTimeSec: 60,
|
||||
SessionAttempts: 10,
|
||||
MaxVerifierWorkers: 4,
|
||||
}
|
||||
cfg.DB.DSN = base.DBImg.Endpoint()
|
||||
cfg.L2.ChainID = 111
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 5,
|
||||
"collection_time_sec": 180,
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"mock_mode": true,
|
||||
"params_path": "",
|
||||
|
||||
@@ -17,8 +17,10 @@ type ProverManager struct {
|
||||
SessionAttempts uint8 `json:"session_attempts"`
|
||||
// Zk verifier config.
|
||||
Verifier *VerifierConfig `json:"verifier"`
|
||||
// Proof collection time (in seconds).
|
||||
CollectionTimeSec int `json:"collection_time_sec"`
|
||||
// BatchCollectionTimeSec batch Proof collection time (in seconds).
|
||||
BatchCollectionTimeSec int `json:"batch_collection_time_sec"`
|
||||
// ChunkCollectionTimeSec chunk Proof collection time (in seconds).
|
||||
ChunkCollectionTimeSec int `json:"chunk_collection_time_sec"`
|
||||
// Max number of workers in verifier worker pool
|
||||
MaxVerifierWorkers int `json:"max_verifier_workers"`
|
||||
}
|
||||
|
||||
@@ -15,7 +15,8 @@ func TestConfig(t *testing.T) {
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 5,
|
||||
"collection_time_sec": 180,
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"mock_mode": true,
|
||||
"params_path": "",
|
||||
|
||||
@@ -45,26 +45,28 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
|
||||
},
|
||||
}
|
||||
|
||||
switch message.ProofType(spp.TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
var tmpChunkProof message.ChunkProof
|
||||
if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil {
|
||||
nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
if spp.Status == int(message.StatusOk) {
|
||||
switch message.ProofType(spp.TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
var tmpChunkProof message.ChunkProof
|
||||
if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil {
|
||||
nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
proofMsg.ChunkProof = &tmpChunkProof
|
||||
case message.ProofTypeBatch:
|
||||
var tmpBatchProof message.BatchProof
|
||||
if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil {
|
||||
nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
proofMsg.BatchProof = &tmpBatchProof
|
||||
}
|
||||
proofMsg.ChunkProof = &tmpChunkProof
|
||||
case message.ProofTypeBatch:
|
||||
var tmpBatchProof message.BatchProof
|
||||
if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil {
|
||||
nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
proofMsg.BatchProof = &tmpBatchProof
|
||||
}
|
||||
|
||||
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg); err != nil {
|
||||
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg, spp); err != nil {
|
||||
nerr := fmt.Errorf("handle zk proof failure, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr, nil)
|
||||
return
|
||||
|
||||
@@ -29,8 +29,10 @@ type Collector struct {
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
|
||||
timeoutCheckerRunTotal prometheus.Counter
|
||||
proverTaskTimeoutTotal prometheus.Counter
|
||||
timeoutBatchCheckerRunTotal prometheus.Counter
|
||||
batchProverTaskTimeoutTotal prometheus.Counter
|
||||
timeoutChunkCheckerRunTotal prometheus.Counter
|
||||
chunkProverTaskTimeoutTotal prometheus.Counter
|
||||
}
|
||||
|
||||
// NewCollector create a collector to cron collect the data to send to prover
|
||||
@@ -44,17 +46,26 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
|
||||
timeoutCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_timeout_checker_run_total",
|
||||
Help: "Total number of timeout checker run.",
|
||||
timeoutBatchCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_batch_timeout_checker_run_total",
|
||||
Help: "Total number of batch timeout checker run.",
|
||||
}),
|
||||
proverTaskTimeoutTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_prover_task_timeout_total",
|
||||
Help: "Total number of timeout prover task.",
|
||||
batchProverTaskTimeoutTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_batch_prover_task_timeout_total",
|
||||
Help: "Total number of batch timeout prover task.",
|
||||
}),
|
||||
timeoutChunkCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_chunk_timeout_checker_run_total",
|
||||
Help: "Total number of chunk timeout checker run.",
|
||||
}),
|
||||
chunkProverTaskTimeoutTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_chunk_prover_task_timeout_total",
|
||||
Help: "Total number of chunk timeout prover task.",
|
||||
}),
|
||||
}
|
||||
|
||||
go c.timeoutProofTask()
|
||||
go c.timeoutBatchProofTask()
|
||||
go c.timeoutChunkProofTask()
|
||||
|
||||
log.Info("Start coordinator successfully.")
|
||||
|
||||
@@ -69,10 +80,10 @@ func (c *Collector) Stop() {
|
||||
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
|
||||
// chunk/batch task to unassigned. then the batch/chunk collector can retry it.
|
||||
|
||||
func (c *Collector) timeoutProofTask() {
|
||||
func (c *Collector) timeoutBatchProofTask() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
nerr := fmt.Errorf("timeout proof task panic error:%v", err)
|
||||
nerr := fmt.Errorf("timeout batch proof task panic error:%v", err)
|
||||
log.Warn(nerr.Error())
|
||||
}
|
||||
}()
|
||||
@@ -81,52 +92,14 @@ func (c *Collector) timeoutProofTask() {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
c.timeoutCheckerRunTotal.Inc()
|
||||
timeout := time.Duration(c.cfg.ProverManager.CollectionTimeSec) * time.Second
|
||||
assignedProverTasks, err := c.proverTaskOrm.GetTimeoutAssignedProverTasks(c.ctx, 10, timeout)
|
||||
c.timeoutBatchCheckerRunTotal.Inc()
|
||||
timeout := time.Duration(c.cfg.ProverManager.BatchCollectionTimeSec) * time.Second
|
||||
assignedProverTasks, err := c.proverTaskOrm.GetTimeoutAssignedProverTasks(c.ctx, 10, message.ProofTypeBatch, timeout)
|
||||
if err != nil {
|
||||
log.Error("get unassigned session info failure", "error", err)
|
||||
break
|
||||
}
|
||||
|
||||
// here not update the block batch proving status failed, because the collector loop will check
|
||||
// the attempt times. if reach the times, the collector will set the block batch proving status.
|
||||
for _, assignedProverTask := range assignedProverTasks {
|
||||
c.proverTaskTimeoutTotal.Inc()
|
||||
log.Warn("proof task have reach the timeout", "task id", assignedProverTask.TaskID,
|
||||
"prover public key", assignedProverTask.ProverPublicKey, "prover name", assignedProverTask.ProverName, "task type", assignedProverTask.TaskType)
|
||||
err = c.db.Transaction(func(tx *gorm.DB) error {
|
||||
// update prover task proving status as ProverProofInvalid
|
||||
if err = c.proverTaskOrm.UpdateProverTaskProvingStatus(c.ctx, message.ProofType(assignedProverTask.TaskType),
|
||||
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverProofInvalid, tx); err != nil {
|
||||
log.Error("update prover task proving status failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update prover task failure type
|
||||
if err = c.proverTaskOrm.UpdateProverTaskFailureType(c.ctx, message.ProofType(assignedProverTask.TaskType),
|
||||
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverTaskFailureTypeTimeout, tx); err != nil {
|
||||
log.Error("update prover task failure type failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update the task to unassigned, let collector restart it
|
||||
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeChunk {
|
||||
if err = c.chunkOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
|
||||
log.Error("update chunk proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
|
||||
}
|
||||
}
|
||||
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeBatch {
|
||||
if err = c.batchOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
|
||||
log.Error("update batch proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("check task proof is timeout failure", "error", err)
|
||||
}
|
||||
}
|
||||
c.check(assignedProverTasks, c.batchProverTaskTimeoutTotal)
|
||||
case <-c.ctx.Done():
|
||||
if c.ctx.Err() != nil {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
@@ -138,3 +111,81 @@ func (c *Collector) timeoutProofTask() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Collector) timeoutChunkProofTask() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
nerr := fmt.Errorf("timeout proof chunk task panic error:%v", err)
|
||||
log.Warn(nerr.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Second * 2)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
c.timeoutChunkCheckerRunTotal.Inc()
|
||||
timeout := time.Duration(c.cfg.ProverManager.ChunkCollectionTimeSec) * time.Second
|
||||
assignedProverTasks, err := c.proverTaskOrm.GetTimeoutAssignedProverTasks(c.ctx, 10, message.ProofTypeChunk, timeout)
|
||||
if err != nil {
|
||||
log.Error("get unassigned session info failure", "error", err)
|
||||
break
|
||||
}
|
||||
c.check(assignedProverTasks, c.chunkProverTaskTimeoutTotal)
|
||||
|
||||
case <-c.ctx.Done():
|
||||
if c.ctx.Err() != nil {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopTimeoutChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout prometheus.Counter) {
|
||||
// here not update the block batch proving status failed, because the collector loop will check
|
||||
// the attempt times. if reach the times, the collector will set the block batch proving status.
|
||||
for _, assignedProverTask := range assignedProverTasks {
|
||||
if c.proverTaskOrm.TaskTimeoutMoreThanOnce(c.ctx, assignedProverTask.TaskID) {
|
||||
log.Warn("Task timeout more than once", "taskType", message.ProofType(assignedProverTask.TaskType).String(), "hash", assignedProverTask.TaskID)
|
||||
}
|
||||
|
||||
timeout.Inc()
|
||||
log.Warn("proof task have reach the timeout", "task id", assignedProverTask.TaskID,
|
||||
"prover public key", assignedProverTask.ProverPublicKey, "prover name", assignedProverTask.ProverName, "task type", assignedProverTask.TaskType)
|
||||
err := c.db.Transaction(func(tx *gorm.DB) error {
|
||||
// update prover task proving status as ProverProofInvalid
|
||||
if err := c.proverTaskOrm.UpdateProverTaskProvingStatus(c.ctx, message.ProofType(assignedProverTask.TaskType),
|
||||
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverProofInvalid, tx); err != nil {
|
||||
log.Error("update prover task proving status failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update prover task failure type
|
||||
if err := c.proverTaskOrm.UpdateProverTaskFailureType(c.ctx, message.ProofType(assignedProverTask.TaskType),
|
||||
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverTaskFailureTypeTimeout, tx); err != nil {
|
||||
log.Error("update prover task failure type failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update the task to unassigned, let collector restart it
|
||||
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeChunk {
|
||||
if err := c.chunkOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
|
||||
log.Error("update chunk proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
|
||||
}
|
||||
}
|
||||
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeBatch {
|
||||
if err := c.batchOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
|
||||
log.Error("update batch proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("check task proof is timeout failure", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, fmt.Errorf("get prover version from context failed")
|
||||
}
|
||||
if !version.CheckScrollProverVersion(proverVersion.(string)) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", proverVersion.(string), version.Version)
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
|
||||
}
|
||||
|
||||
isAssigned, err := bp.proverTaskOrm.IsProverAssigned(ctx, publicKey.(string))
|
||||
|
||||
@@ -93,7 +93,7 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB, reg pro
|
||||
proverTaskProveDuration: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "coordinator_task_prove_duration_seconds",
|
||||
Help: "Time spend by prover prove task.",
|
||||
Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 20, 30, 60},
|
||||
Buckets: []float64{180, 300, 480, 600, 900, 1200, 1800},
|
||||
}),
|
||||
validateFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_validate_failure_total",
|
||||
@@ -121,14 +121,18 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB, reg pro
|
||||
// HandleZkProof handle a ZkProof submitted from a prover.
|
||||
// For now only proving/verifying error will lead to setting status as skipped.
|
||||
// db/unmarshal errors will not because they are errors on the business logic side.
|
||||
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.ProofMsg) error {
|
||||
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) error {
|
||||
m.proofReceivedTotal.Inc()
|
||||
pk := ctx.GetString(coordinatorType.PublicKey)
|
||||
if len(pk) == 0 {
|
||||
return fmt.Errorf("get public key from context failed")
|
||||
}
|
||||
pv := ctx.GetString(coordinatorType.ProverVersion)
|
||||
if len(pv) == 0 {
|
||||
return fmt.Errorf("get ProverVersion from context failed")
|
||||
}
|
||||
|
||||
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndPubKey(ctx, proofMsg.ID, pk)
|
||||
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndProver(ctx, proofMsg.ID, pk, pv)
|
||||
if proverTask == nil || err != nil {
|
||||
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
|
||||
return ErrValidatorFailureProverTaskEmpty
|
||||
@@ -137,15 +141,14 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
log.Info("handling zk proof", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proverTask.TaskType, "proof time", proofTimeSec)
|
||||
log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName,
|
||||
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec)
|
||||
|
||||
if err = m.validator(ctx, proverTask, pk, proofMsg); err != nil {
|
||||
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
proverVersion := ctx.GetString(coordinatorType.ProverVersion)
|
||||
m.verifierTotal.WithLabelValues(proverVersion).Inc()
|
||||
m.verifierTotal.WithLabelValues(pv).Inc()
|
||||
|
||||
var success bool
|
||||
var verifyErr error
|
||||
@@ -156,8 +159,8 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
}
|
||||
|
||||
if verifyErr != nil || !success {
|
||||
m.verifierFailureTotal.WithLabelValues(proverVersion).Inc()
|
||||
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg)
|
||||
m.verifierFailureTotal.WithLabelValues(pv).Inc()
|
||||
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
|
||||
|
||||
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
@@ -201,8 +204,13 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg) error {
|
||||
m.validateFailureTotal.Inc()
|
||||
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
m.validateFailureTotal.Inc()
|
||||
}
|
||||
}()
|
||||
|
||||
// Ensure this prover is eligible to participate in the prover task.
|
||||
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofValid {
|
||||
m.validateFailureProverTaskSubmitTwice.Inc()
|
||||
@@ -210,8 +218,12 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
// TODO: Defend invalid proof resubmissions by one of the following two methods:
|
||||
// (i) slash the prover for each submission of invalid proof
|
||||
// (ii) set the maximum failure retry times
|
||||
log.Warn("the prover task cannot submit twice", "hash", proofMsg.ID, "prover pk", proverTask.ProverPublicKey,
|
||||
"prover name", proverTask.ProverName, "proof type", proverTask.TaskType)
|
||||
log.Warn(
|
||||
"cannot submit valid proof for a prover task twice",
|
||||
"taskType", proverTask.TaskType, "hash", proofMsg.ID,
|
||||
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
|
||||
"proverPublicKey", proverTask.ProverPublicKey,
|
||||
)
|
||||
return ErrValidatorFailureProverTaskCannotSubmitTwice
|
||||
}
|
||||
|
||||
@@ -219,53 +231,51 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
if proofMsg.Status != message.StatusOk {
|
||||
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
|
||||
m.validateFailureProverTaskStatusNotOk.Inc()
|
||||
log.Info("proof generated by prover failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "error", proofMsg.Error)
|
||||
|
||||
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proofMsg.Type, proofMsg.ID, pk, types.ProverProofInvalid); updateErr != nil {
|
||||
log.Error("proof generated by prover failed update prover task proving status failure", "proof id", proofMsg.ID,
|
||||
"prover name", proverTask.ProverName, "prover pk", pk, "prove type", proofMsg.Type, "error", proofMsg.Error)
|
||||
}
|
||||
log.Info("proof generated by prover failed",
|
||||
"taskType", proofMsg.Type, "hash", proofMsg.ID,
|
||||
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
|
||||
"proverPublicKey", pk, "failureType", proofParameter.FailureType, "failureMessage", proofParameter.FailureMsg)
|
||||
return ErrValidatorFailureProofMsgStatusNotOk
|
||||
}
|
||||
|
||||
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, need skip it
|
||||
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
|
||||
m.validateFailureProverTaskTimeout.Inc()
|
||||
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "proof type", proverTask.TaskType,
|
||||
"prover name", proverTask.ProverName, "prover public key", pk, "proof time", proofTimeSec)
|
||||
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "taskType", proverTask.TaskType,
|
||||
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
|
||||
return ErrValidatorFailureProofTimeout
|
||||
}
|
||||
|
||||
// store the proof to prover task
|
||||
if updateTaskProofErr := m.updateProverTaskProof(ctx, pk, proofMsg); updateTaskProofErr != nil {
|
||||
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "public key", pk,
|
||||
"prover name", proverTask.ProverName, "error", updateTaskProofErr)
|
||||
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk,
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr)
|
||||
}
|
||||
|
||||
// if the batch/chunk have proved and verifier success, need skip this submit proof
|
||||
if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) {
|
||||
m.validateFailureProverTaskHaveVerifier.Inc()
|
||||
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID,
|
||||
"proof type", proverTask.TaskType, "prover name", proverTask.ProverName, "prover public key", pk)
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk)
|
||||
return ErrValidatorFailureTaskHaveVerifiedSuccess
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) proofFailure(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
|
||||
log.Info("proof failure update proof status", "hash", hash, "public key", pubKey,
|
||||
"proof type", proofMsg.Type.String(), "status", types.ProvingTaskFailed.String())
|
||||
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskFailed, 0); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskFailed", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
}
|
||||
}
|
||||
//func (m *ProofReceiverLogic) proofFailure(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
|
||||
// log.Info("proof failure update proof status", "hash", hash, "public key", pubKey,
|
||||
// "proof type", proofMsg.Type.String(), "status", types.ProvingTaskFailed.String())
|
||||
//
|
||||
// if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskFailed, 0); err != nil {
|
||||
// log.Error("failed to updated proof status ProvingTaskFailed", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
// }
|
||||
//}
|
||||
|
||||
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
|
||||
log.Info("proof recover update proof status", "hash", hash, "public key", pubKey,
|
||||
"proof type", proofMsg.Type.String(), "status", types.ProvingTaskUnassigned.String())
|
||||
log.Info("proof recover update proof status", "hash", hash, "proverPublicKey", pubKey,
|
||||
"taskType", proofMsg.Type.String(), "status", types.ProvingTaskUnassigned.String())
|
||||
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskUnassigned, 0); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
@@ -273,11 +283,11 @@ func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubK
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg, proofTimeSec uint64) error {
|
||||
log.Info("proof close task update proof status", "hash", hash, "public key", pubKey,
|
||||
"proof type", proofMsg.Type.String(), "status", types.ProvingTaskVerified.String())
|
||||
log.Info("proof close task update proof status", "hash", hash, "proverPublicKey", pubKey,
|
||||
"taskType", proofMsg.Type.String(), "status", types.ProvingTaskVerified.String())
|
||||
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskVerified, proofTimeSec); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskVerified", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
log.Error("failed to updated proof status ProvingTaskVerified", "hash", hash, "proverPublicKey", pubKey, "error", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -299,8 +309,8 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string,
|
||||
}
|
||||
|
||||
// if the block batch has proof verified, so the failed status not update block batch proving status
|
||||
if status == types.ProvingTaskFailed && m.checkIsTaskSuccess(ctx, hash, proofMsg.Type) {
|
||||
log.Info("update proof status ProvingTaskFailed skip because other prover have prove success", "hash", hash, "public key", proverPublicKey)
|
||||
if m.checkIsTaskSuccess(ctx, hash, proofMsg.Type) {
|
||||
log.Info("update proof status skip because this chunk / batch has been verified", "hash", hash, "public key", proverPublicKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -113,16 +113,18 @@ func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, hashes []string
|
||||
return proverTasks, nil
|
||||
}
|
||||
|
||||
// GetProverTaskByTaskIDAndPubKey get prover task taskID and public key
|
||||
func (o *ProverTask) GetProverTaskByTaskIDAndPubKey(ctx context.Context, taskID, proverPublicKey string) (*ProverTask, error) {
|
||||
// GetProverTaskByTaskIDAndProver get prover task taskID and public key
|
||||
func (o *ProverTask) GetProverTaskByTaskIDAndProver(ctx context.Context, taskID, proverPublicKey, proverVersion string) (*ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_id", taskID).Where("prover_public_key", proverPublicKey)
|
||||
db = db.Where("task_id", taskID)
|
||||
db = db.Where("prover_public_key", proverPublicKey)
|
||||
db = db.Where("prover_version", proverVersion)
|
||||
|
||||
var proverTask ProverTask
|
||||
err := db.First(&proverTask).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ProverTask.GetProverTaskByTaskIDAndPubKey err:%w, taskID:%s, pubukey:%s", err, taskID, proverPublicKey)
|
||||
return nil, fmt.Errorf("ProverTask.GetProverTaskByTaskIDAndProver err:%w, taskID:%s, pubkey:%s, prover_version:%s", err, taskID, proverPublicKey, proverVersion)
|
||||
}
|
||||
return &proverTask, nil
|
||||
}
|
||||
@@ -142,10 +144,11 @@ func (o *ProverTask) GetProvingStatusByTaskID(ctx context.Context, taskID string
|
||||
}
|
||||
|
||||
// GetTimeoutAssignedProverTasks get the timeout and assigned proving_status prover task
|
||||
func (o *ProverTask) GetTimeoutAssignedProverTasks(ctx context.Context, limit int, timeout time.Duration) ([]ProverTask, error) {
|
||||
func (o *ProverTask) GetTimeoutAssignedProverTasks(ctx context.Context, limit int, taskType message.ProofType, timeout time.Duration) ([]ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("proving_status", int(types.ProverAssigned))
|
||||
db = db.Where("task_type", int(taskType))
|
||||
db = db.Where("assigned_at < ?", utils.NowUTC().Add(-timeout))
|
||||
db = db.Limit(limit)
|
||||
|
||||
@@ -157,6 +160,25 @@ func (o *ProverTask) GetTimeoutAssignedProverTasks(ctx context.Context, limit in
|
||||
return proverTasks, nil
|
||||
}
|
||||
|
||||
// TaskTimeoutMoreThanOnce get the timeout twice task. a temp design
|
||||
func (o *ProverTask) TaskTimeoutMoreThanOnce(ctx context.Context, taskID string) bool {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_id", taskID)
|
||||
db = db.Where("proving_status", int(types.ProverProofInvalid))
|
||||
|
||||
var count int64
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if count >= 1 {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// SetProverTask updates or inserts a ProverTask record.
|
||||
func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask, dbTX ...*gorm.DB) error {
|
||||
db := o.db.WithContext(ctx)
|
||||
@@ -166,8 +188,8 @@ func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask,
|
||||
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"prover_version", "proving_status", "failure_type", "assigned_at"}),
|
||||
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}, {Name: "prover_version"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"proving_status", "failure_type", "assigned_at"}),
|
||||
})
|
||||
|
||||
if err := db.Create(&proverTask).Error; err != nil {
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"scroll-tech/common/ginmetrics"
|
||||
"scroll-tech/common/metrics"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
@@ -15,22 +15,13 @@ import (
|
||||
func Route(router *gin.Engine, cfg *config.Config, reg prometheus.Registerer) {
|
||||
router.Use(gin.Recovery())
|
||||
|
||||
apiMetric(router, reg)
|
||||
metrics.Use(router, "coordinator", reg)
|
||||
|
||||
r := router.Group("coordinator")
|
||||
|
||||
v1(r, cfg)
|
||||
}
|
||||
|
||||
func apiMetric(r *gin.Engine, reg prometheus.Registerer) {
|
||||
m := ginmetrics.GetMonitor(reg)
|
||||
m.SetMetricPath("/metrics")
|
||||
m.SetMetricPrefix("coordinator_")
|
||||
m.SetSlowTime(1)
|
||||
m.SetDuration([]float64{0.025, .05, .1, .5, 1, 5, 10})
|
||||
m.Use(r)
|
||||
}
|
||||
|
||||
func v1(router *gin.RouterGroup, conf *config.Config) {
|
||||
r := router.Group("/v1")
|
||||
|
||||
|
||||
@@ -2,8 +2,10 @@ package types
|
||||
|
||||
// SubmitProofParameter the SubmitProof api request parameter
|
||||
type SubmitProofParameter struct {
|
||||
TaskID string `form:"task_id" json:"task_id" binding:"required"`
|
||||
TaskType int `form:"task_type" json:"task_type" binding:"required"`
|
||||
Status int `form:"status" json:"status"`
|
||||
Proof string `form:"proof" json:"proof"`
|
||||
TaskID string `form:"task_id" json:"task_id" binding:"required"`
|
||||
TaskType int `form:"task_type" json:"task_type" binding:"required"`
|
||||
Status int `form:"status" json:"status"`
|
||||
Proof string `form:"proof" json:"proof"`
|
||||
FailureType int `form:"failure_type" json:"failure_type"`
|
||||
FailureMsg string `form:"failure_msg" json:"failure_msg"`
|
||||
}
|
||||
|
||||
@@ -76,11 +76,12 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
ChainID: 111,
|
||||
},
|
||||
ProverManager: &config.ProverManager{
|
||||
ProversPerSession: proversPerSession,
|
||||
Verifier: &config.VerifierConfig{MockMode: true},
|
||||
CollectionTimeSec: 10,
|
||||
MaxVerifierWorkers: 10,
|
||||
SessionAttempts: 5,
|
||||
ProversPerSession: proversPerSession,
|
||||
Verifier: &config.VerifierConfig{MockMode: true},
|
||||
BatchCollectionTimeSec: 10,
|
||||
ChunkCollectionTimeSec: 10,
|
||||
MaxVerifierWorkers: 10,
|
||||
SessionAttempts: 5,
|
||||
},
|
||||
Auth: &config.Auth{
|
||||
ChallengeExpireDurationSec: tokenTimeout,
|
||||
@@ -316,7 +317,7 @@ func testInvalidProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskFailed && batchProofStatus == types.ProvingTaskFailed {
|
||||
if chunkProofStatus == types.ProvingTaskUnassigned && batchProofStatus == types.ProvingTaskUnassigned {
|
||||
return
|
||||
}
|
||||
case <-tickStop:
|
||||
@@ -438,7 +439,7 @@ func testTimeoutProof(t *testing.T) {
|
||||
assert.Equal(t, batchProofStatus, types.ProvingTaskAssigned)
|
||||
|
||||
// wait coordinator to reset the prover task proving status
|
||||
time.Sleep(time.Duration(conf.ProverManager.CollectionTimeSec*2) * time.Second)
|
||||
time.Sleep(time.Duration(conf.ProverManager.BatchCollectionTimeSec*2) * time.Second)
|
||||
|
||||
// create second mock prover, that will send valid proof.
|
||||
chunkProver2 := newMockProver(t, "prover_test"+strconv.Itoa(2), coordinatorURL, message.ProofTypeChunk)
|
||||
|
||||
@@ -63,7 +63,7 @@ func testResetDB(t *testing.T) {
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, err)
|
||||
// total number of tables.
|
||||
assert.Equal(t, 7, int(cur))
|
||||
assert.Equal(t, 8, int(cur))
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
|
||||
@@ -26,7 +26,7 @@ create table prover_task
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL,
|
||||
|
||||
CONSTRAINT uk_tasktype_taskid_publickey UNIQUE (task_type, task_id, prover_public_key)
|
||||
CONSTRAINT uk_tasktype_taskid_publickey_version UNIQUE (task_type, task_id, prover_public_key, prover_version)
|
||||
);
|
||||
|
||||
comment
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
drop index l1_message_hash_uindex;
|
||||
|
||||
create index l1_message_hash_index
|
||||
on l1_message (msg_hash) where deleted_at IS NULL;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop index l1_message_hash_index;
|
||||
|
||||
create unique index l1_message_hash_uindex
|
||||
on l1_message (msg_hash) where deleted_at IS NULL;
|
||||
-- +goose StatementEnd
|
||||
16
go.work.sum
16
go.work.sum
@@ -193,6 +193,8 @@ github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7 h1:6IrxszG5G+O7zhtkWxq6+unVvnrm1fqV2Pe+T95DUzw=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7/go.mod h1:gFnFS95y8HstDP6P9pPwzrxOOC5TRDkwbM+ao15ChAI=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 h1:DuBDHVjgGMPki7bAyh91+3cF1Vh34sAEdH8JQgbc2R0=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI=
|
||||
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
@@ -237,6 +239,8 @@ github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf h1:Yt+4K30SdjOkRoRRm3v
|
||||
github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
|
||||
github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7 h1:kgvzE5wLsLa7XKfV85VZl40QXaMCaeFtHpPwJ8fhotY=
|
||||
github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7/go.mod h1:yRkwfj0CBpOGre+TwBsqPV0IH0Pk73e4PXJOeNDboGs=
|
||||
github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 h1:+3HCtB74++ClLy8GgjUQYeC8R4ILzVcIe8+5edAJJnE=
|
||||
github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4=
|
||||
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7 h1:tYwu/z8Y0NkkzGEh3z21mSWggMg4LwLRFucLS7TjARg=
|
||||
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
|
||||
github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d h1:W1n4DvpzZGOISgp7wWNtraLcHtnmnTwBlJidqtMIuwQ=
|
||||
@@ -275,6 +279,8 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILD
|
||||
github.com/gavv/httpexpect v2.0.0+incompatible h1:1X9kcRshkSKEjNJJxX9Y9mQ5BRfbxU5kORdjhlA1yX8=
|
||||
github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732 h1:AB7YjNrzlVHsYz06zCULVV2zYCEft82P86dSmtwxKL0=
|
||||
github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732/go.mod h1:o/XfIXWi4/GqbQirfRm5uTbXMG5NpqxkxblnbZ+QM9I=
|
||||
github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b h1:vMT47RYsrftsHSTQhqXwC3BYflo38OLC3Y4LtXtLyU0=
|
||||
github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b/go.mod h1:CDncRYVRSDqwakm282WEkjfaAj1hxU/v5RXxk5nXOiI=
|
||||
github.com/getkin/kin-openapi v0.61.0 h1:6awGqF5nG5zkVpMsAih1QH4VgzS8phTxECUWIFo7zko=
|
||||
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM=
|
||||
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs=
|
||||
@@ -298,6 +304,7 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb
|
||||
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
|
||||
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk=
|
||||
@@ -370,6 +377,8 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U=
|
||||
github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg=
|
||||
github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
@@ -460,12 +469,15 @@ github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
|
||||
github.com/jsternberg/zap-logfmt v1.0.0 h1:0Dz2s/eturmdUS34GM82JwNEdQ9hPoJgqptcEKcbpzY=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5 h1:PJr+ZMXIecYc1Ey2zucXdR73SMBtgjPgwa31099IMv0=
|
||||
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef h1:2jNeR4YUziVtswNP9sEFAI913cVrzH85T+8Q6LpYbT0=
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM=
|
||||
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=
|
||||
github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c h1:AqsttAyEyIEsNz5WLRwuRwjiT5CMDUfLk6cFJDVPebs=
|
||||
github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/kataras/blocks v0.0.7 h1:cF3RDY/vxnSRezc7vLFlQFTYXG/yAr1o7WImJuZbzC4=
|
||||
github.com/kataras/blocks v0.0.7/go.mod h1:UJIU97CluDo0f+zEjbnbkeMRlvYORtmc1304EeyXf4I=
|
||||
github.com/kataras/golog v0.1.7 h1:0TY5tHn5L5DlRIikepcaRR/6oInIr9AiWsxzt0vvlBE=
|
||||
@@ -704,6 +716,8 @@ go.opentelemetry.io/otel v1.9.0/go.mod h1:np4EoPGzoPs3O67xUVNoPPcmSvsfOxNlNA4F4A
|
||||
go.opentelemetry.io/otel/trace v1.9.0 h1:oZaCNJUjWcg60VXWee8lJKlqhPbXAPB51URuR47pQYc=
|
||||
go.opentelemetry.io/otel/trace v1.9.0/go.mod h1:2737Q0MuG8q1uILYm2YYVkAyLtOofiTNGg6VODnOiPo=
|
||||
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
|
||||
go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=
|
||||
go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
|
||||
golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
@@ -774,6 +788,8 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
|
||||
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
|
||||
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
@@ -11,7 +10,6 @@ import (
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
@@ -55,14 +53,6 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
defer func() {
|
||||
cancel()
|
||||
}()
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
|
||||
// init Prover Stats API
|
||||
port := ctx.String(httpPortFlag.Name)
|
||||
|
||||
|
||||
@@ -94,8 +94,8 @@ func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask,
|
||||
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"prover_version", "proving_status", "failure_type", "assigned_at"}),
|
||||
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}, {Name: "prover_version"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"proving_status", "failure_type", "assigned_at"}),
|
||||
})
|
||||
|
||||
if err := db.Create(&proverTask).Error; err != nil {
|
||||
|
||||
@@ -175,21 +175,25 @@ func (c *CoordinatorClient) SubmitProof(ctx context.Context, req *SubmitProofReq
|
||||
Post("/coordinator/v1/submit_proof")
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("submit proof request failed: %v", err)
|
||||
log.Error("submit proof request failed: %v", err)
|
||||
return fmt.Errorf("submit proof request failed: %w", ErrCoordinatorConnect)
|
||||
}
|
||||
|
||||
if resp.StatusCode() != 200 {
|
||||
return fmt.Errorf("failed to submit proof, status code: %v", resp.StatusCode())
|
||||
log.Error("failed to submit proof, status code: %v", resp.StatusCode())
|
||||
return fmt.Errorf("failed to submit proof, status code not 200: %w", ErrCoordinatorConnect)
|
||||
}
|
||||
|
||||
if result.ErrCode == types.ErrJWTTokenExpired {
|
||||
log.Info("JWT expired, attempting to re-login")
|
||||
if err := c.Login(ctx); err != nil {
|
||||
return fmt.Errorf("JWT expired, re-login failed: %v", err)
|
||||
log.Error("JWT expired, re-login failed: %v", err)
|
||||
return fmt.Errorf("JWT expired, re-login failed: %w", ErrCoordinatorConnect)
|
||||
}
|
||||
log.Info("re-login success")
|
||||
return c.SubmitProof(ctx, req)
|
||||
}
|
||||
|
||||
if result.ErrCode != types.Success {
|
||||
return fmt.Errorf("error code: %v, error message: %v", result.ErrCode, result.ErrMsg)
|
||||
}
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// ErrCoordinatorConnect connect to coordinator error
|
||||
var ErrCoordinatorConnect = errors.New("connect coordinator error")
|
||||
|
||||
// ChallengeResponse defines the response structure for random API
|
||||
type ChallengeResponse struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
@@ -53,10 +58,12 @@ type GetTaskResponse struct {
|
||||
|
||||
// SubmitProofRequest defines the request structure for the SubmitProof API.
|
||||
type SubmitProofRequest struct {
|
||||
TaskID string `json:"task_id"`
|
||||
TaskType int `json:"task_type"`
|
||||
Status int `json:"status"`
|
||||
Proof string `json:"proof"`
|
||||
TaskID string `json:"task_id"`
|
||||
TaskType int `json:"task_type"`
|
||||
Status int `json:"status"`
|
||||
Proof string `json:"proof"`
|
||||
FailureType int `json:"failure_type,omitempty"`
|
||||
FailureMsg string `json:"failure_msg,omitempty"`
|
||||
}
|
||||
|
||||
// SubmitProofResponse defines the response structure for the SubmitProof API.
|
||||
|
||||
@@ -52,9 +52,11 @@ func action(ctx *cli.Context) error {
|
||||
r.Start()
|
||||
|
||||
defer r.Stop()
|
||||
log.Info("prover start successfully",
|
||||
log.Info(
|
||||
"prover start successfully",
|
||||
"name", cfg.ProverName, "type", cfg.Core.ProofType,
|
||||
"publickey", r.PublicKey(), "version", version.Version)
|
||||
"publickey", r.PublicKey(), "version", version.Version,
|
||||
)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user