Compare commits

..

39 Commits

Author SHA1 Message Date
Xinran
11cd211de4 Merge branch 'develop' into multi_verifier 2023-08-15 15:42:31 +08:00
Lawliet-Chan
141a05f2c6 fix lint 2023-08-15 13:15:58 +08:00
Lawliet-Chan
5955329cff fix lint 2023-08-15 11:34:14 +08:00
Lawliet-Chan
3292128919 fix lint 2023-08-15 11:26:25 +08:00
Lawliet-Chan
497fd783a3 fix 2023-08-15 11:23:42 +08:00
Lawliet-Chan
e7d4b10451 fix 2023-08-14 21:42:54 +08:00
Xinran
e7f868b467 Merge branch 'develop' into multi_verifier 2023-08-14 21:01:34 +08:00
Lawliet-Chan
8dd288c22d add logs 2023-08-14 20:57:48 +08:00
Lawliet-Chan
4dce379232 add skip 2023-08-14 19:17:44 +08:00
Lawliet-Chan
d8019b2dfb update scripts 2023-08-14 15:43:29 +08:00
Lawliet-Chan
cf2fdfda9f try 2023-08-14 15:15:31 +08:00
Xinran
02e6f77221 Update common/version/version.go
Co-authored-by: georgehao <haohongfan@gmail.com>
2023-08-14 15:11:02 +08:00
Xinran
3e3f39a3f2 Merge branch 'develop' into multi_verifier 2023-08-14 08:59:28 +08:00
Xinran
00f906f612 Merge branch 'develop' into multi_verifier 2023-08-10 21:51:36 +08:00
Xinran
7f86fe65da Merge branch 'develop' into multi_verifier 2023-08-10 17:59:04 +08:00
Lawliet-Chan
df3fac89ae fix conflicts 2023-08-10 17:14:13 +08:00
Xinran
0892f0f876 Merge branch 'develop' into multi_verifier 2023-08-10 17:13:36 +08:00
Lawliet-Chan
0b9d6d04fa bump 2023-08-10 16:39:52 +08:00
Xinran
0c12773b78 Merge branch 'develop' into multi_verifier 2023-08-10 16:37:15 +08:00
Lawliet-Chan
36a4964aa5 fix 2023-08-10 14:33:32 +08:00
Lawliet-Chan
cd4158c80d try fix 2023-08-10 13:44:04 +08:00
Lawliet-Chan
3e01d3de83 Revert "for debugging"
This reverts commit 5344a73c5b.
2023-08-10 13:43:25 +08:00
Lawliet-Chan
5344a73c5b for debugging 2023-08-10 13:37:50 +08:00
Lawliet-Chan
50829cae9c fix lint 2023-08-10 13:19:42 +08:00
Lawliet-Chan
2044a052ec get jwt proverVersion 2023-08-10 13:10:49 +08:00
Lawliet-Chan
1e2ce2d655 fix mock 2023-08-10 09:11:51 +08:00
Lawliet-Chan
66e802fb55 fix mock 2023-08-10 09:07:44 +08:00
Xinran
716d01fda5 Merge branch 'develop' into multi_verifier 2023-08-10 00:25:40 +08:00
Lawliet-Chan
399404172c bump 2023-08-10 00:22:35 +08:00
Xinran
364a8f8850 Merge branch 'develop' into multi_verifier 2023-08-10 00:08:23 +08:00
Lawliet-Chan
76ba845d0f add Cargo.lock 2023-08-10 00:08:04 +08:00
Lawliet-Chan
f729b8e0ab fix cargo.lock 2023-08-09 23:58:53 +08:00
Lawliet-Chan
e6b0c61878 fix old version 2023-08-09 23:32:39 +08:00
Lawliet-Chan
a7a65b60e5 fix old version 2023-08-09 23:06:49 +08:00
Lawliet-Chan
160287e6cc try fix 2023-08-09 22:29:35 +08:00
Lawliet-Chan
bdc1957f27 fix 2023-08-09 22:21:11 +08:00
Lawliet-Chan
8ee49ed515 fix 2023-08-09 21:40:38 +08:00
Lawliet-Chan
c90d1414d9 fix 2023-08-09 21:33:58 +08:00
Lawliet-Chan
87b1a7a63e multi verifier 2023-08-09 21:31:14 +08:00
133 changed files with 10070 additions and 3362 deletions

View File

@@ -1,37 +0,0 @@
import { URL } from "url";
import { readFileSync, writeFileSync } from "fs";
const versionFilePath = new URL(
"../../common/version/version.go",
import.meta.url
).pathname;
const versionFileContent = readFileSync(versionFilePath, { encoding: "utf-8" });
const currentVersion = versionFileContent.match(
/var tag = "(?<version>v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+))"/
);
try {
parseInt(currentVersion.groups.major);
parseInt(currentVersion.groups.minor);
parseInt(currentVersion.groups.patch);
} catch (err) {
console.error(new Error("Failed to parse version in version.go file"));
throw err;
}
// prettier-ignore
const newVersion = `v${currentVersion.groups.major}.${currentVersion.groups.minor}.${parseInt(currentVersion.groups.patch) + 1}`;
console.log(
`Bump version from ${currentVersion.groups.version} to ${newVersion}`
);
writeFileSync(
versionFilePath,
versionFileContent.replace(
`var tag = "${currentVersion.groups.version}"`,
`var tag = "${newVersion}"`
)
);

View File

@@ -43,6 +43,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Lint
working-directory: 'bridge'
run: |
@@ -92,6 +94,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -1,61 +0,0 @@
name: Bump Version
on:
pull_request:
branches: [develop]
types:
- opened
- reopened
- synchronize
- ready_for_review
jobs:
try-to-bump:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
ref: ${{ github.head_ref }}
- name: check diff
id: check_diff
run: |
set -euo pipefail
# fetch develop branch so that we can diff against later
git fetch origin develop
echo 'checking verion changes in diff...'
# check if version changed in version.go
# note: the grep will fail if use \d instead of [0-9]
git diff HEAD..origin/develop --text --no-ext-diff --unified=0 --no-prefix common/version/version.go | grep -E '^\+var tag = "v[0-9]+\.[0-9]+\.[0-9]+"$' && true
exit_code=$?
# auto bump if version is not bumped manually
echo '> require auto version bump?'
if [ $exit_code -eq 0 ]; then
echo '> no, already bumped'
echo "result=no-bump" >> "$GITHUB_OUTPUT"
else
echo '> yes'
echo "result=bump" >> "$GITHUB_OUTPUT"
fi
- name: Install Node.js 16
if: steps.check_diff.outputs.result == 'bump'
uses: actions/setup-node@v3
with:
node-version: 16
- name: bump version in common/version/version.go
if: steps.check_diff.outputs.result == 'bump'
run: node .github/scripts/bump_version_dot_go.mjs
# Commits made by this Action do not trigger new Workflow runs
- uses: stefanzweifel/git-auto-commit-action@3ea6ae190baf489ba007f7c92608f33ce20ef04a
if: steps.check_diff.outputs.result == 'bump'
with:
skip_fetch: true # already did fetch in check diff
file_pattern: "common/version/version.go"
commit_message: "chore: auto version bump[bot]"

View File

@@ -88,6 +88,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -104,6 +104,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -81,6 +81,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -33,6 +33,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -1,7 +1,5 @@
.PHONY: check update dev_docker build_test_docker run_test_docker clean
L2GETH_TAG=scroll-v4.3.34
help: ## Display this help message
@grep -h \
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
@@ -17,14 +15,14 @@ lint: ## The code's format and security checks.
update: ## update dependencies
go work sync
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
cd $(PWD)/bridge-history-api/ && go get -u github.com/ethereum/go-ethereum@latest && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
goimports -local $(PWD)/bridge/ -w .
goimports -local $(PWD)/bridge-history-api/ -w .
goimports -local $(PWD)/common/ -w .

View File

@@ -95,9 +95,6 @@ func (b *BatchInfoFetcher) fetchBatchInfo() error {
} else {
startHeight = latestBatchHeight + 1
}
if startHeight < b.batchInfoStartNumber {
startHeight = b.batchInfoStartNumber
}
for from := startHeight; number >= from; from += fetchLimit {
to := from + fetchLimit - 1
// number - confirmation can never less than 0 since the for loop condition

View File

@@ -7,7 +7,6 @@ import (
"os/signal"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
@@ -60,8 +59,8 @@ func action(ctx *cli.Context) error {
}
}()
registry := prometheus.DefaultRegisterer
metrics.Server(ctx, registry.(*prometheus.Registry))
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
@@ -73,12 +72,8 @@ func action(ctx *cli.Context) error {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
go utils.Loop(subCtx, 10*time.Second, func() {
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {

View File

@@ -7,7 +7,6 @@ import (
"os/signal"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
@@ -62,8 +61,8 @@ func action(ctx *cli.Context) error {
}
}()
registry := prometheus.DefaultRegisterer
metrics.Server(ctx, registry.(*prometheus.Registry))
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
@@ -79,14 +78,14 @@ func action(ctx *cli.Context) error {
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */, registry)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err

View File

@@ -7,7 +7,6 @@ import (
"os/signal"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
@@ -60,10 +59,10 @@ func action(ctx *cli.Context) error {
}
}()
registry := prometheus.DefaultRegisterer
metrics.Server(ctx, registry.(*prometheus.Registry))
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err

View File

@@ -7,7 +7,6 @@ import (
"os/signal"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
@@ -63,8 +62,8 @@ func action(ctx *cli.Context) error {
}
}()
registry := prometheus.DefaultRegisterer
metrics.Server(ctx, registry.(*prometheus.Registry))
// Start metrics server.
metrics.Serve(subCtx, ctx)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
@@ -74,26 +73,26 @@ func action(ctx *cli.Context) error {
}
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis, registry)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis)
if err != nil {
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
return err
}
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db, registry)
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db)
if err != nil {
log.Error("failed to create chunkProposer", "config file", cfgFile, "error", err)
return err
}
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db, registry)
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db)
if err != nil {
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
return err
}
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
// Watcher loop to fetch missing blocks
go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
@@ -107,11 +106,11 @@ func action(ctx *cli.Context) error {
go utils.Loop(subCtx, 2*time.Second, chunkProposer.TryProposeChunk)
go utils.Loop(subCtx, 10*time.Second, batchProposer.TryProposeBatch)
go utils.Loop(subCtx, 2*time.Second, batchProposer.TryProposeBatch)
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches)
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)
go utils.Loop(subCtx, 10*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully")

View File

@@ -64,9 +64,11 @@
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515"
},
"chunk_proposer_config": {
"max_tx_gas_per_chunk": 1123456,
"max_l2_tx_num_per_chunk": 1123,
"max_l1_commit_gas_per_chunk": 11234567,
"max_l1_commit_calldata_size_per_chunk": 112345,
"min_l1_commit_calldata_size_per_chunk": 11234,
"chunk_timeout_sec": 300,
"max_row_consumption_per_chunk": 1048319,
"gas_cost_increase_multiplier": 1.2
@@ -75,6 +77,7 @@
"max_chunk_num_per_batch": 112,
"max_l1_commit_gas_per_batch": 11234567,
"max_l1_commit_calldata_size_per_batch": 112345,
"min_chunk_num_per_batch": 11,
"batch_timeout_sec": 300,
"gas_cost_increase_multiplier": 1.2
}

View File

@@ -5,7 +5,6 @@ go 1.19
require (
github.com/agiledragon/gomonkey/v2 v2.9.0
github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/prometheus/client_golang v1.14.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28
github.com/smartystreets/goconvey v1.8.0
github.com/stretchr/testify v1.8.3
@@ -14,7 +13,6 @@ require (
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
@@ -22,7 +20,6 @@ require (
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gopherjs/gopherjs v1.17.2 // indirect
@@ -38,12 +35,8 @@ require (
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
@@ -61,7 +54,7 @@ require (
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -2,8 +2,6 @@ github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/agiledragon/gomonkey/v2 v2.9.0 h1:PDiKKybR596O6FHW+RVSG0Z7uGCBNbmbUXh3uCNQ7Hc=
github.com/agiledragon/gomonkey/v2 v2.9.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
@@ -33,14 +31,8 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@@ -70,8 +62,11 @@ github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlT
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
@@ -82,8 +77,6 @@ github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APP
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
@@ -97,14 +90,6 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
@@ -155,7 +140,6 @@ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
@@ -173,13 +157,9 @@ golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=

View File

@@ -28,9 +28,11 @@ type L2Config struct {
// ChunkProposerConfig loads chunk_proposer configuration items.
type ChunkProposerConfig struct {
MaxTxGasPerChunk uint64 `json:"max_tx_gas_per_chunk"`
MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"`
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
MinL1CommitCalldataSizePerChunk uint64 `json:"min_l1_commit_calldata_size_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
MaxRowConsumptionPerChunk uint64 `json:"max_row_consumption_per_chunk"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
@@ -41,6 +43,7 @@ type BatchProposerConfig struct {
MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"`
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
MaxL1CommitCalldataSizePerBatch uint32 `json:"max_l1_commit_calldata_size_per_batch"`
MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
}

View File

@@ -6,13 +6,15 @@ import (
"fmt"
"math/big"
"github.com/prometheus/client_golang/prometheus"
// not sure if this will make problems when relay with l1geth
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
@@ -21,6 +23,11 @@ import (
"scroll-tech/bridge/internal/orm"
)
var (
bridgeL1MsgsRelayedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/total", metrics.ScrollRegistry)
bridgeL1MsgsRelayedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
)
// Layer1Relayer is responsible for
// 1. fetch pending L1Message from db
// 2. relay pending message to layer 2 node
@@ -47,18 +54,17 @@ type Layer1Relayer struct {
l1MessageOrm *orm.L1Message
l1BlockOrm *orm.L1Block
metrics *l1RelayerMetrics
}
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, reg prometheus.Registerer) (*Layer1Relayer, error) {
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey, "l1_relayer", "message_sender", reg)
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new message sender failed for address %s, err: %v", addr.Hex(), err)
}
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l1_relayer", "gas_oracle_sender", reg)
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %v", addr.Hex(), err)
@@ -80,7 +86,6 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
}
l1Relayer := &Layer1Relayer{
cfg: cfg,
ctx: ctx,
l1MessageOrm: orm.NewL1Message(db),
l1BlockOrm: orm.NewL1Block(db),
@@ -95,9 +100,9 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
}
l1Relayer.metrics = initL1RelayerMetrics(reg)
cfg: cfg,
}
go l1Relayer.handleConfirmLoop(ctx)
return l1Relayer, nil
@@ -118,9 +123,7 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
for _, msg := range msgs {
tmpMsg := msg
r.metrics.bridgeL1RelayedMsgsTotal.Inc()
if err = r.processSavedEvent(&tmpMsg); err != nil {
r.metrics.bridgeL1RelayedMsgsFailureTotal.Inc()
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
}
@@ -142,6 +145,7 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
if err != nil {
return err
}
bridgeL1MsgsRelayedTotalCounter.Inc(1)
log.Info("relayMessage to layer2", "msg hash", msg.MsgHash, "tx hash", hash)
err = r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
@@ -153,7 +157,6 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
// ProcessGasPriceOracle imports gas price to layer2
func (r *Layer1Relayer) ProcessGasPriceOracle() {
r.metrics.bridgeL1RelayerGasPriceOraclerRunTotal.Inc()
latestBlockHeight, err := r.l1BlockOrm.GetLatestL1BlockHeight(r.ctx)
if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
@@ -198,7 +201,6 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
return
}
r.lastGasPrice = block.BaseFee
r.metrics.bridgeL1RelayerLastGasPrice.Set(float64(r.lastGasPrice))
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee)
}
}
@@ -210,7 +212,7 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
case <-ctx.Done():
return
case cfm := <-r.messageSender.ConfirmChan():
r.metrics.bridgeL1MsgsRelayedConfirmedTotal.Inc()
bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1)
if !cfm.IsSuccessful {
err := r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
if err != nil {
@@ -226,7 +228,6 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
case cfm := <-r.gasOracleSender.ConfirmChan():
r.metrics.bridgeL1GasOraclerConfirmedTotal.Inc()
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())

View File

@@ -1,54 +0,0 @@
package relayer
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l1RelayerMetrics struct {
bridgeL1RelayedMsgsTotal prometheus.Counter
bridgeL1RelayedMsgsFailureTotal prometheus.Counter
bridgeL1RelayerGasPriceOraclerRunTotal prometheus.Counter
bridgeL1RelayerLastGasPrice prometheus.Gauge
bridgeL1MsgsRelayedConfirmedTotal prometheus.Counter
bridgeL1GasOraclerConfirmedTotal prometheus.Counter
}
var (
initL1RelayerMetricOnce sync.Once
l1RelayerMetric *l1RelayerMetrics
)
func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
initL1RelayerMetricOnce.Do(func() {
l1RelayerMetric = &l1RelayerMetrics{
bridgeL1RelayedMsgsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_msg_relayed_total",
Help: "The total number of the l1 relayed message.",
}),
bridgeL1RelayedMsgsFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_msg_relayed_failure_total",
Help: "The total number of the l1 relayed failure message.",
}),
bridgeL1MsgsRelayedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_relayed_confirmed_total",
Help: "The total number of layer1 relayed confirmed",
}),
bridgeL1RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_gas_price_oracler_total",
Help: "The total number of layer1 gas price oracler run total",
}),
bridgeL1RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_layer1_gas_price_latest_gas_price",
Help: "The latest gas price of bridge relayer l1",
}),
bridgeL1GasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_gas_oracler_confirmed_total",
Help: "The total number of layer1 relayed confirmed",
}),
}
})
return l1RelayerMetric
}

View File

@@ -62,7 +62,7 @@ func setupL1RelayerDB(t *testing.T) *gorm.DB {
func testCreateNewL1Relayer(t *testing.T) {
db := setupL1RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig, nil)
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
assert.NotNil(t, relayer)
}
@@ -72,7 +72,7 @@ func testL1RelayerProcessSaveEvents(t *testing.T) {
defer database.CloseDB(db)
l1MessageOrm := orm.NewL1Message(db)
l1Cfg := cfg.L1Config
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
assert.NotNil(t, relayer)
assert.NoError(t, l1MessageOrm.SaveL1Messages(context.Background(), templateL1Message))
@@ -99,7 +99,7 @@ func testL1RelayerMsgConfirm(t *testing.T) {
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
// Simulate message confirmations.
@@ -138,7 +138,7 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
// Simulate message confirmations.
@@ -168,7 +168,7 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
assert.NotNil(t, l1Relayer)

View File

@@ -8,15 +8,16 @@ import (
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
@@ -25,6 +26,13 @@ import (
"scroll-tech/bridge/internal/orm"
)
var (
bridgeL2BatchesFinalizedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry)
bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry)
)
// Layer2Relayer is responsible for
// 1. Committing and finalizing L2 blocks on L1
// 2. Relaying messages from L2 to L1
@@ -70,30 +78,29 @@ type Layer2Relayer struct {
// A list of processing batch finalization.
// key(string): confirmation ID, value(string): batch hash.
processingFinalization sync.Map
metrics *l2RelayerMetrics
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool, reg prometheus.Registerer) (*Layer2Relayer, error) {
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey, "l2_relayer", "message_sender", reg)
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool) (*Layer2Relayer, error) {
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new message sender failed for address %s, err: %w", addr.Hex(), err)
}
commitSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderPrivateKey, "l2_relayer", "commit_sender", reg)
commitSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderPrivateKey)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.CommitSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new commit sender failed for address %s, err: %w", addr.Hex(), err)
}
finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderPrivateKey, "l2_relayer", "finalize_sender", reg)
finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderPrivateKey)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.FinalizeSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new finalize sender failed for address %s, err: %w", addr.Hex(), err)
}
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l2_relayer", "gas_oracle_sender", reg)
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %w", addr.Hex(), err)
@@ -151,7 +158,6 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
return nil, fmt.Errorf("failed to initialize and commit genesis batch, err: %v", err)
}
}
layer2Relayer.metrics = initL2RelayerMetrics(reg)
go layer2Relayer.handleConfirmLoop(ctx)
return layer2Relayer, nil
@@ -269,7 +275,6 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
// ProcessGasPriceOracle imports gas price to layer1
func (r *Layer2Relayer) ProcessGasPriceOracle() {
r.metrics.bridgeL2RelayerGasPriceOraclerRunTotal.Inc()
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
if batch == nil || err != nil {
log.Error("Failed to GetLatestBatch", "batch", batch, "err", err)
@@ -307,7 +312,6 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
return
}
r.lastGasPrice = suggestGasPriceUint64
r.metrics.bridgeL2RelayerLastGasPrice.Set(float64(r.lastGasPrice))
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
}
}
@@ -316,13 +320,12 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
func (r *Layer2Relayer) ProcessPendingBatches() {
// get pending batches from database in ascending order by their index.
pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 1)
pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 10)
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
}
for _, batch := range pendingBatches {
r.metrics.bridgeL2RelayerProcessPendingBatchTotal.Inc()
// get current header and parent header.
currentBatchHeader, err := types.DecodeBatchHeader(batch.BatchHeader)
if err != nil {
@@ -397,7 +400,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batch.Hash, "index", batch.Index, "err", err)
return
}
r.metrics.bridgeL2RelayerProcessPendingBatchSuccessTotal.Inc()
bridgeL2BatchesCommittedTotalCounter.Inc(1)
r.processingCommitment.Store(txID, batch.Hash)
log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex())
}
@@ -421,8 +424,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
return
}
r.metrics.bridgeL2RelayerProcessCommittedBatchesTotal.Inc()
batch := batches[0]
hash := batch.Hash
status := types.ProvingStatus(batch.ProvingStatus)
@@ -430,9 +431,12 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
// The proof for this block is not ready yet.
return
case types.ProvingTaskProved:
// It's an intermediate state. The prover manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified.
return
case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash)
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
var parentBatchStateRoot string
if batch.Index > 0 {
@@ -491,6 +495,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
}
return
}
bridgeL2BatchesFinalizedTotalCounter.Inc(1)
log.Info("finalizeBatchWithProof in layer1", "index", batch.Index, "batch hash", batch.Hash, "tx hash", hash)
// record and sync with db, @todo handle db error
@@ -501,7 +506,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
"tx hash", finalizeTxHash.String(), "err", err)
}
r.processingFinalization.Store(txID, hash)
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
case types.ProvingTaskFailed:
// We were unable to prove this batch. There are two possibilities:
@@ -546,7 +550,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
"batch hash", batchHash.(string),
"tx hash", confirmation.TxHash.String(), "err", err)
}
r.metrics.bridgeL2BatchesCommittedConfirmedTotal.Inc()
bridgeL2BatchesCommittedConfirmedTotalCounter.Inc(1)
r.processingCommitment.Delete(confirmation.ID)
}
@@ -568,7 +572,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
"batch hash", batchHash.(string),
"tx hash", confirmation.TxHash.String(), "err", err)
}
r.metrics.bridgeL2BatchesFinalizedConfirmedTotal.Inc()
bridgeL2BatchesFinalizedConfirmedTotalCounter.Inc(1)
r.processingFinalization.Delete(confirmation.ID)
}
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
@@ -586,7 +590,6 @@ func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
case confirmation := <-r.finalizeSender.ConfirmChan():
r.handleConfirmation(confirmation)
case cfm := <-r.gasOracleSender.ConfirmChan():
r.metrics.bridgeL2BatchesGasOraclerConfirmedTotal.Inc()
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())

View File

@@ -1,74 +0,0 @@
package relayer
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l2RelayerMetrics struct {
bridgeL2RelayerProcessPendingBatchTotal prometheus.Counter
bridgeL2RelayerProcessPendingBatchSuccessTotal prometheus.Counter
bridgeL2RelayerGasPriceOraclerRunTotal prometheus.Counter
bridgeL2RelayerLastGasPrice prometheus.Gauge
bridgeL2RelayerProcessCommittedBatchesTotal prometheus.Counter
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal prometheus.Counter
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal prometheus.Counter
bridgeL2BatchesCommittedConfirmedTotal prometheus.Counter
bridgeL2BatchesFinalizedConfirmedTotal prometheus.Counter
bridgeL2BatchesGasOraclerConfirmedTotal prometheus.Counter
}
var (
initL2RelayerMetricOnce sync.Once
l2RelayerMetric *l2RelayerMetrics
)
func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
initL2RelayerMetricOnce.Do(func() {
l2RelayerMetric = &l2RelayerMetrics{
bridgeL2RelayerProcessPendingBatchTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_pending_batch_total",
Help: "The total number of layer2 process pending batch",
}),
bridgeL2RelayerProcessPendingBatchSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_pending_batch_success_total",
Help: "The total number of layer2 process pending success batch",
}),
bridgeL2RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_gas_price_oracler_total",
Help: "The total number of layer2 gas price oracler run total",
}),
bridgeL2RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_layer2_gas_price_latest_gas_price",
Help: "The latest gas price of bridge relayer l2",
}),
bridgeL2RelayerProcessCommittedBatchesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_total",
Help: "The total number of layer2 process committed batches run total",
}),
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_finalized_total",
Help: "The total number of layer2 process committed batches finalized total",
}),
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_finalized_success_total",
Help: "The total number of layer2 process committed batches finalized success total",
}),
bridgeL2BatchesCommittedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_confirmed_total",
Help: "The total number of layer2 process committed batches confirmed total",
}),
bridgeL2BatchesFinalizedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_finalized_batches_confirmed_total",
Help: "The total number of layer2 process finalized batches confirmed total",
}),
bridgeL2BatchesGasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_gras_oracler_confirmed_total",
Help: "The total number of layer2 process finalized batches confirmed total",
}),
}
})
return l2RelayerMetric
}

View File

@@ -35,7 +35,7 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
func testCreateNewRelayer(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
assert.NoError(t, err)
assert.NotNil(t, relayer)
}
@@ -45,7 +45,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
@@ -73,7 +73,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
@@ -114,7 +114,7 @@ func testL2RelayerCommitConfirm(t *testing.T) {
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
// Simulate message confirmations.
@@ -164,7 +164,7 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
// Simulate message confirmations.
@@ -221,7 +221,7 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
// Simulate message confirmations.
@@ -259,7 +259,7 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
assert.NoError(t, err)
assert.NotNil(t, relayer)

View File

@@ -1,95 +0,0 @@
package sender
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type senderMetrics struct {
senderCheckBalancerTotal *prometheus.CounterVec
senderCheckPendingTransactionTotal *prometheus.CounterVec
sendTransactionTotal *prometheus.CounterVec
sendTransactionFailureFullTx *prometheus.GaugeVec
sendTransactionFailureRepeatTransaction *prometheus.CounterVec
sendTransactionFailureGetFee *prometheus.CounterVec
sendTransactionFailureSendTx *prometheus.CounterVec
resubmitTransactionTotal *prometheus.CounterVec
currentPendingTxsNum *prometheus.GaugeVec
currentGasFeeCap *prometheus.GaugeVec
currentGasTipCap *prometheus.GaugeVec
currentGasPrice *prometheus.GaugeVec
currentGasLimit *prometheus.GaugeVec
currentNonce *prometheus.GaugeVec
}
var (
initSenderMetricOnce sync.Once
sm *senderMetrics
)
func initSenderMetrics(reg prometheus.Registerer) *senderMetrics {
initSenderMetricOnce.Do(func() {
sm = &senderMetrics{
sendTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_total",
Help: "The total number of sending transaction.",
}, []string{"service", "name"}),
sendTransactionFailureFullTx: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_send_transaction_full_tx_failure_total",
Help: "The total number of sending transaction failure for full size tx.",
}, []string{"service", "name"}),
sendTransactionFailureRepeatTransaction: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_repeat_transaction_failure_total",
Help: "The total number of sending transaction failure for repeat transaction.",
}, []string{"service", "name"}),
sendTransactionFailureGetFee: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_get_fee_failure_total",
Help: "The total number of sending transaction failure for getting fee.",
}, []string{"service", "name"}),
sendTransactionFailureSendTx: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_send_tx_failure_total",
Help: "The total number of sending transaction failure for sending tx.",
}, []string{"service", "name"}),
resubmitTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_resubmit_send_transaction_total",
Help: "The total number of resubmit transaction.",
}, []string{"service", "name"}),
currentPendingTxsNum: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_pending_tx_count",
Help: "The pending tx count in the sender.",
}, []string{"service", "name"}),
currentGasFeeCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_gas_fee_cap",
Help: "The gas fee of current transaction.",
}, []string{"service", "name"}),
currentGasTipCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_gas_tip_cap",
Help: "The gas tip of current transaction.",
}, []string{"service", "name"}),
currentGasPrice: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_gas_price_cap",
Help: "The gas price of current transaction.",
}, []string{"service", "name"}),
currentGasLimit: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_gas_limit",
Help: "The gas limit of current transaction.",
}, []string{"service", "name"}),
currentNonce: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_nonce",
Help: "The nonce of current transaction.",
}, []string{"service", "name"}),
senderCheckPendingTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_check_pending_transaction_total",
Help: "The total number of check pending transaction.",
}, []string{"service", "name"}),
senderCheckBalancerTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_check_balancer_total",
Help: "The total number of check balancer.",
}, []string{"service", "name"}),
}
})
return sm
}

View File

@@ -11,7 +11,6 @@ import (
"time"
cmapV2 "github.com/orcaman/concurrent-map/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
@@ -71,8 +70,6 @@ type Sender struct {
client *ethclient.Client // The client to retrieve on chain data or send transaction.
chainID *big.Int // The chain id of the endpoint
ctx context.Context
service string
name string
auth *bind.TransactOpts
minBalance *big.Int
@@ -83,13 +80,11 @@ type Sender struct {
confirmCh chan *Confirmation
stopCh chan struct{}
metrics *senderMetrics
}
// NewSender returns a new instance of transaction sender
// txConfirmationCh is used to notify confirmed transaction
func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.PrivateKey, service, name string, reg prometheus.Registerer) (*Sender, error) {
func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.PrivateKey) (*Sender, error) {
client, err := ethclient.Dial(config.Endpoint)
if err != nil {
return nil, fmt.Errorf("failed to dial eth client, err: %w", err)
@@ -140,10 +135,7 @@ func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.Pri
baseFeePerGas: baseFeePerGas,
pendingTxs: cmapV2.New[*PendingTransaction](),
stopCh: make(chan struct{}),
name: name,
service: service,
}
sender.metrics = initSenderMetrics(reg)
go sender.loop(ctx)
@@ -191,15 +183,10 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
// SendTransaction send a signed L2tL1 transaction.
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (common.Hash, error) {
s.metrics.sendTransactionTotal.WithLabelValues(s.service, s.name).Inc()
if s.IsFull() {
s.metrics.sendTransactionFailureFullTx.WithLabelValues(s.service, s.name).Set(1)
return common.Hash{}, ErrFullPending
}
s.metrics.sendTransactionFailureFullTx.WithLabelValues(s.service, s.name).Set(0)
if ok := s.pendingTxs.SetIfAbsent(ID, nil); !ok {
s.metrics.sendTransactionFailureRepeatTransaction.WithLabelValues(s.service, s.name).Inc()
return common.Hash{}, fmt.Errorf("repeat transaction ID: %s", ID)
}
@@ -216,12 +203,9 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
}()
if feeData, err = s.getFeeData(s.auth, target, value, data, minGasLimit); err != nil {
s.metrics.sendTransactionFailureGetFee.WithLabelValues(s.service, s.name).Inc()
return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err)
}
if tx, err = s.createAndSendTx(s.auth, feeData, target, value, data, nil); err != nil {
s.metrics.sendTransactionFailureSendTx.WithLabelValues(s.service, s.name).Inc()
return common.Hash{}, fmt.Errorf("failed to create and send transaction, err: %w", err)
}
@@ -310,20 +294,6 @@ func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, targ
return nil, err
}
if feeData.gasTipCap != nil {
s.metrics.currentGasTipCap.WithLabelValues(s.service, s.name).Set(float64(feeData.gasTipCap.Uint64()))
}
if feeData.gasFeeCap != nil {
s.metrics.currentGasFeeCap.WithLabelValues(s.service, s.name).Set(float64(feeData.gasFeeCap.Uint64()))
}
if feeData.gasPrice != nil {
s.metrics.currentGasPrice.WithLabelValues(s.service, s.name).Set(float64(feeData.gasPrice.Uint64()))
}
s.metrics.currentGasLimit.WithLabelValues(s.service, s.name).Set(float64(feeData.gasLimit))
// update nonce when it is not from resubmit
if overrideNonce == nil {
auth.Nonce = big.NewInt(int64(nonce + 1))
@@ -346,15 +316,8 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
txInfo := map[string]interface{}{
"tx_hash": tx.Hash().String(),
"tx_type": s.config.TxType,
"from": auth.From.String(),
}
switch s.config.TxType {
case LegacyTxType, AccessListTxType: // `LegacyTxType`is for ganache mock node
originalGasPrice := feeData.gasPrice
gasPrice := escalateMultipleNum.Mul(escalateMultipleNum, big.NewInt(feeData.gasPrice.Int64()))
gasPrice = gasPrice.Div(gasPrice, escalateMultipleDen)
if gasPrice.Cmp(feeData.gasPrice) < 0 {
@@ -364,13 +327,7 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
gasPrice = maxGasPrice
}
feeData.gasPrice = gasPrice
txInfo["original_gas_price"] = originalGasPrice
txInfo["adjusted_gas_price"] = gasPrice
default:
originalGasTipCap := big.NewInt(feeData.gasTipCap.Int64())
originalGasFeeCap := big.NewInt(feeData.gasFeeCap.Int64())
gasTipCap := big.NewInt(feeData.gasTipCap.Int64())
gasTipCap = gasTipCap.Mul(gasTipCap, escalateMultipleNum)
gasTipCap = gasTipCap.Div(gasTipCap, escalateMultipleDen)
@@ -402,17 +359,9 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
}
feeData.gasFeeCap = gasFeeCap
feeData.gasTipCap = gasTipCap
txInfo["original_gas_tip_cap"] = originalGasTipCap
txInfo["adjusted_gas_tip_cap"] = gasTipCap
txInfo["original_gas_fee_cap"] = originalGasFeeCap
txInfo["adjusted_gas_fee_cap"] = gasFeeCap
}
log.Debug("Transaction gas adjustment details", txInfo)
nonce := tx.Nonce()
s.metrics.resubmitTransactionTotal.WithLabelValues(s.service, s.name).Inc()
return s.createAndSendTx(auth, feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
}
@@ -449,12 +398,6 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
}
}
} else if s.config.EscalateBlocks+pending.submitAt < number {
log.Debug("resubmit transaction",
"tx hash", pending.tx.Hash().String(),
"submit block number", pending.submitAt,
"current block number", number,
"escalateBlocks", s.config.EscalateBlocks)
var tx *types.Transaction
tx, err := s.resubmitTransaction(pending.feeData, pending.signer, pending.tx)
if err != nil {
@@ -511,8 +454,8 @@ func (s *Sender) checkBalance(ctx context.Context) error {
}
if bls.Cmp(s.minBalance) < 0 {
return fmt.Errorf("insufficient account balance - actual balance: %s, minimum required balance: %s, address: %s",
bls.String(), s.minBalance.String(), s.auth.From.String())
return fmt.Errorf("insufficient account balance - actual balance: %s, minimum required balance: %s",
bls.String(), s.minBalance.String())
}
return nil
@@ -529,7 +472,6 @@ func (s *Sender) loop(ctx context.Context) {
for {
select {
case <-checkTick.C:
s.metrics.senderCheckPendingTransactionTotal.WithLabelValues(s.service, s.name).Inc()
header, err := s.client.HeaderByNumber(s.ctx, nil)
if err != nil {
log.Error("failed to get latest head", "err", err)
@@ -544,10 +486,9 @@ func (s *Sender) loop(ctx context.Context) {
s.checkPendingTransaction(header, confirmed)
case <-checkBalanceTicker.C:
s.metrics.senderCheckBalancerTotal.WithLabelValues(s.service, s.name).Inc()
// Check and set balance.
if err := s.checkBalance(ctx); err != nil {
log.Error("check balance error", "err", err)
log.Error("check balance, err: %w", err)
}
case <-ctx.Done():
return

View File

@@ -70,7 +70,7 @@ func testNewSender(t *testing.T) {
// exit by Stop()
cfgCopy1 := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy1.TxType = txType
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey, "test", "test", nil)
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey)
assert.NoError(t, err)
newSender1.Stop()
@@ -78,7 +78,7 @@ func testNewSender(t *testing.T) {
cfgCopy2 := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy2.TxType = txType
subCtx, cancel := context.WithCancel(context.Background())
_, err = NewSender(subCtx, &cfgCopy2, privateKey, "test", "test", nil)
_, err = NewSender(subCtx, &cfgCopy2, privateKey)
assert.NoError(t, err)
cancel()
}
@@ -90,7 +90,7 @@ func testPendLimit(t *testing.T) {
cfgCopy.TxType = txType
cfgCopy.Confirmations = rpc.LatestBlockNumber
cfgCopy.PendingLimit = 2
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey)
assert.NoError(t, err)
for i := 0; i < 2*newSender.PendingLimit(); i++ {
@@ -107,7 +107,7 @@ func testMinGasLimit(t *testing.T) {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
cfgCopy.Confirmations = rpc.LatestBlockNumber
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey)
assert.NoError(t, err)
client, err := ethclient.Dial(cfgCopy.Endpoint)
@@ -135,7 +135,7 @@ func testResubmitTransaction(t *testing.T) {
for _, txType := range txTypes {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
s, err := NewSender(context.Background(), &cfgCopy, privateKey)
assert.NoError(t, err)
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
feeData, err := s.getFeeData(s.auth, &common.Address{}, big.NewInt(0), nil, 0)
@@ -151,7 +151,7 @@ func testResubmitTransactionWithRisingBaseFee(t *testing.T) {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
s, err := NewSender(context.Background(), &cfgCopy, privateKey)
assert.NoError(t, err)
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
s.baseFeePerGas = 1000
@@ -186,7 +186,7 @@ func testCheckPendingTransaction(t *testing.T) {
for _, txType := range txTypes {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
s, err := NewSender(context.Background(), &cfgCopy, privateKey)
assert.NoError(t, err)
header := &types.Header{Number: big.NewInt(100), BaseFee: big.NewInt(100)}

View File

@@ -5,8 +5,6 @@ import (
"fmt"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
@@ -28,28 +26,13 @@ type BatchProposer struct {
maxChunkNumPerBatch uint64
maxL1CommitGasPerBatch uint64
maxL1CommitCalldataSizePerBatch uint32
minChunkNumPerBatch uint64
batchTimeoutSec uint64
gasCostIncreaseMultiplier float64
batchProposerCircleTotal prometheus.Counter
proposeBatchFailureTotal prometheus.Counter
proposeBatchUpdateInfoTotal prometheus.Counter
proposeBatchUpdateInfoFailureTotal prometheus.Counter
totalL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
batchChunksNum prometheus.Gauge
batchFirstBlockTimeoutReached prometheus.Counter
batchChunksProposeNotEnoughTotal prometheus.Counter
}
// NewBatchProposer creates a new BatchProposer instance.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
log.Debug("new batch proposer",
"maxChunkNumPerBatch", cfg.MaxChunkNumPerBatch,
"maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch,
"maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch,
"batchTimeoutSec", cfg.BatchTimeoutSec)
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB) *BatchProposer {
return &BatchProposer{
ctx: ctx,
db: db,
@@ -59,65 +42,25 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
maxChunkNumPerBatch: cfg.MaxChunkNumPerBatch,
maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch,
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
minChunkNumPerBatch: cfg.MinChunkNumPerBatch,
batchTimeoutSec: cfg.BatchTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_circle_total",
Help: "Total number of propose batch total.",
}),
proposeBatchFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_failure_circle_total",
Help: "Total number of propose batch total.",
}),
proposeBatchUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_update_info_total",
Help: "Total number of propose batch update info total.",
}),
proposeBatchUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_update_info_failure_total",
Help: "Total number of propose batch update info failure total.",
}),
totalL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_batch_total_l1_commit_gas",
Help: "The total l1 commit gas",
}),
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_batch_total_l1_call_data_size",
Help: "The total l1 call data size",
}),
batchChunksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_batch_chunks_number",
Help: "The number of chunks in the batch",
}),
batchFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_first_block_timeout_reached_total",
Help: "Total times of batch's first block timeout reached",
}),
batchChunksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_chunks_propose_not_enough_total",
Help: "Total number of batch chunk propose not enough",
}),
}
}
// TryProposeBatch tries to propose a new batches.
func (p *BatchProposer) TryProposeBatch() {
p.batchProposerCircleTotal.Inc()
dbChunks, err := p.proposeBatchChunks()
if err != nil {
p.proposeBatchFailureTotal.Inc()
log.Error("proposeBatchChunks failed", "err", err)
return
}
if err := p.updateBatchInfoInDB(dbChunks); err != nil {
p.proposeBatchUpdateInfoFailureTotal.Inc()
log.Error("update batch info in db failed", "err", err)
}
}
func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
p.proposeBatchUpdateInfoTotal.Inc()
numChunks := len(dbChunks)
if numChunks <= 0 {
return nil
@@ -134,12 +77,10 @@ func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
err = p.db.Transaction(func(dbTX *gorm.DB) error {
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, startChunkIndex, endChunkIndex, startChunkHash, endChunkHash, chunks, dbTX)
if dbErr != nil {
log.Warn("BatchProposer.updateBatchInfoInDB insert batch failure", "error", "start chunk index", startChunkIndex, "end chunk index", endChunkIndex, dbErr)
return dbErr
}
dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, startChunkIndex, endChunkIndex, batch.Hash, dbTX)
if dbErr != nil {
log.Warn("BatchProposer.UpdateBatchHashInRange update the chunk's batch hash failure", "hash", batch.Hash, "error", dbErr)
return dbErr
}
return nil
@@ -157,90 +98,83 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
return nil, nil
}
var totalL1CommitCalldataSize uint32
var totalL1CommitGas uint64
var totalChunks uint64
var totalL1MessagePopped uint64
firstChunk := dbChunks[0]
totalL1CommitCalldataSize := firstChunk.TotalL1CommitCalldataSize
totalL1CommitGas := firstChunk.TotalL1CommitGas
totalChunks := uint64(1)
totalL1MessagePopped := firstChunk.TotalL1MessagesPoppedBefore + uint64(firstChunk.TotalL1MessagesPoppedInChunk)
parentBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
if err != nil {
return nil, err
}
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
// Add extra gas costs
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += types.CalldataNonZeroByteGas // version in calldata
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += 16 // version in calldata
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256) // _skippedL1MessageBitmap in calldata
// adjusting gas:
// add 1 time cold sload (2100 gas) for L1MessageQueue
// add 1 time cold address access (2600 gas) for L1MessageQueue
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
totalL1CommitGas += (2100 + 2600 - 100 - 100)
totalL1CommitGas += getKeccakGas(32 * totalChunks) // batch data hash
if parentBatch != nil {
totalL1CommitGas += types.GetKeccak256Gas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
totalL1CommitGas += types.CalldataNonZeroByteGas * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
totalL1CommitGas += getKeccakGas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
totalL1CommitGas += 16 * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
}
// batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
// Check if the first chunk breaks hard limits.
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
firstChunk.StartBlockNumber,
firstChunk.EndBlockNumber,
totalL1CommitGas,
p.maxL1CommitGasPerBatch,
)
}
for i, chunk := range dbChunks {
// metric values
lastTotalL1CommitCalldataSize := totalL1CommitCalldataSize
lastTotalL1CommitGas := totalL1CommitGas
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
firstChunk.StartBlockNumber,
firstChunk.EndBlockNumber,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerBatch,
)
}
for i, chunk := range dbChunks[1:] {
totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize
totalL1CommitGas += chunk.TotalL1CommitGas
// adjust batch data hash gas cost
totalL1CommitGas -= types.GetKeccak256Gas(32 * totalChunks)
totalL1CommitGas -= getKeccakGas(32 * totalChunks)
totalChunks++
totalL1CommitGas += types.GetKeccak256Gas(32 * totalChunks)
// adjust batch header hash gas cost, batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
totalL1CommitGas -= types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
totalL1CommitGas -= types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += getKeccakGas(32 * totalChunks)
// adjust batch header hash gas cost
totalL1CommitGas -= getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
totalL1CommitGas -= 16 * (32 * (totalL1MessagePopped + 255) / 256)
totalL1MessagePopped += uint64(chunk.TotalL1MessagesPoppedInChunk)
totalL1CommitGas += types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
if totalChunks > p.maxChunkNumPerBatch ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
// Check if the first chunk breaks hard limits.
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
if i == 0 {
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
dbChunks[0].StartBlockNumber,
dbChunks[0].EndBlockNumber,
totalL1CommitGas,
p.maxL1CommitGasPerBatch,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
dbChunks[0].StartBlockNumber,
dbChunks[0].EndBlockNumber,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerBatch,
)
}
}
log.Debug("breaking limit condition in batching",
"currentTotalChunks", totalChunks,
"maxChunkNumPerBatch", p.maxChunkNumPerBatch,
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
"maxL1CommitCalldataSizePerBatch", p.maxL1CommitCalldataSizePerBatch,
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitGasPerBatch", p.maxL1CommitGasPerBatch)
p.totalL1CommitGas.Set(float64(lastTotalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(lastTotalL1CommitCalldataSize))
p.batchChunksNum.Set(float64(i))
return dbChunks[:i], nil
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
return dbChunks[:i+1], nil
}
}
var hasChunkTimeout bool
currentTimeSec := uint64(time.Now().Unix())
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
@@ -248,16 +182,16 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
"first block timestamp", dbChunks[0].StartBlockTime,
"chunk outdated time threshold", currentTimeSec,
)
p.batchFirstBlockTimeoutReached.Inc()
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.batchChunksNum.Set(float64(len(dbChunks)))
return dbChunks, nil
hasChunkTimeout = true
}
log.Debug("pending chunks do not reach one of the constraints or contain a timeout block")
p.batchChunksProposeNotEnoughTotal.Inc()
return nil, nil
if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch {
log.Warn("The chunk number of the batch is less than the minimum limit",
"chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch,
)
return nil, nil
}
return dbChunks, nil
}
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {

View File

@@ -23,20 +23,23 @@ func testBatchProposer(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
}, db)
cp.TryProposeChunk()
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
MinChunkNumPerBatch: 1,
BatchTimeoutSec: 300,
}, db, nil)
}, db)
bp.TryProposeBatch()
chunkOrm := orm.NewChunk(db)

View File

@@ -6,8 +6,6 @@ import (
"fmt"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
@@ -51,111 +49,43 @@ type ChunkProposer struct {
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
maxTxGasPerChunk uint64
maxL2TxNumPerChunk uint64
maxL1CommitGasPerChunk uint64
maxL1CommitCalldataSizePerChunk uint64
minL1CommitCalldataSizePerChunk uint64
maxRowConsumptionPerChunk uint64
chunkTimeoutSec uint64
gasCostIncreaseMultiplier float64
chunkProposerCircleTotal prometheus.Counter
proposeChunkFailureTotal prometheus.Counter
proposeChunkUpdateInfoTotal prometheus.Counter
proposeChunkUpdateInfoFailureTotal prometheus.Counter
chunkL2TxNum prometheus.Gauge
chunkEstimateL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
totalTxGasUsed prometheus.Gauge
maxTxConsumption prometheus.Gauge
chunkBlocksNum prometheus.Gauge
chunkFirstBlockTimeoutReached prometheus.Counter
chunkBlocksProposeNotEnoughTotal prometheus.Counter
}
// NewChunkProposer creates a new ChunkProposer instance.
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
log.Debug("new chunk proposer",
"maxL2TxNumPerChunk", cfg.MaxL2TxNumPerChunk,
"maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk,
"maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk,
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
"chunkTimeoutSec", cfg.ChunkTimeoutSec)
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB) *ChunkProposer {
return &ChunkProposer{
ctx: ctx,
db: db,
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
maxTxGasPerChunk: cfg.MaxTxGasPerChunk,
maxL2TxNumPerChunk: cfg.MaxL2TxNumPerChunk,
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
minL1CommitCalldataSizePerChunk: cfg.MinL1CommitCalldataSizePerChunk,
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
chunkTimeoutSec: cfg.ChunkTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
chunkProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_circle_total",
Help: "Total number of propose chunk total.",
}),
proposeChunkFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_failure_circle_total",
Help: "Total number of propose chunk failure total.",
}),
proposeChunkUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_update_info_total",
Help: "Total number of propose chunk update info total.",
}),
proposeChunkUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_update_info_failure_total",
Help: "Total number of propose chunk update info failure total.",
}),
chunkL2TxNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_l2_tx_num",
Help: "The chunk l2 tx num",
}),
chunkEstimateL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_estimate_l1_commit_gas",
Help: "The chunk estimate l1 commit gas",
}),
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_total_l1_commit_call_data_size",
Help: "The total l1 commit call data size",
}),
totalTxGasUsed: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_total_tx_gas_used",
Help: "The total tx gas used",
}),
maxTxConsumption: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_max_tx_consumption",
Help: "The max tx consumption",
}),
chunkBlocksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_chunk_block_number",
Help: "The number of blocks in the chunk",
}),
chunkFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_first_block_timeout_reached_total",
Help: "Total times of chunk's first block timeout reached",
}),
chunkBlocksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_blocks_propose_not_enough_total",
Help: "Total number of chunk block propose not enough",
}),
}
}
// TryProposeChunk tries to propose a new chunk.
func (p *ChunkProposer) TryProposeChunk() {
p.chunkProposerCircleTotal.Inc()
proposedChunk, err := p.proposeChunk()
if err != nil {
p.proposeChunkFailureTotal.Inc()
log.Error("propose new chunk failed", "err", err)
return
}
if err := p.updateChunkInfoInDB(proposedChunk); err != nil {
p.proposeChunkUpdateInfoFailureTotal.Inc()
log.Error("update chunk info in orm failed", "err", err)
}
}
@@ -165,11 +95,9 @@ func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
return nil
}
p.proposeChunkUpdateInfoTotal.Inc()
err := p.db.Transaction(func(dbTX *gorm.DB) error {
dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, dbTX)
if err != nil {
log.Warn("ChunkProposer.InsertChunk failed", "chunk hash", chunk.Hash)
return err
}
if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
@@ -191,98 +119,88 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
return nil, nil
}
var chunk types.Chunk
var totalTxGasUsed uint64
var totalL2TxNum uint64
var totalL1CommitCalldataSize uint64
var totalL1CommitGas uint64
chunk := &types.Chunk{Blocks: blocks[:1]}
firstBlock := chunk.Blocks[0]
totalTxGasUsed := firstBlock.Header.GasUsed
totalL2TxNum := firstBlock.L2TxsNum()
totalL1CommitCalldataSize := firstBlock.EstimateL1CommitCalldataSize()
crc := chunkRowConsumption{}
totalL1CommitGas := chunk.EstimateL1CommitGas()
for i, block := range blocks {
// metric values
lastTotalL2TxNum := totalL2TxNum
lastTotalL1CommitGas := totalL1CommitGas
lastCrcMax := crc.max()
lastTotalL1CommitCalldataSize := totalL1CommitCalldataSize
lastTotalTxGasUsed := totalTxGasUsed
if err := crc.add(firstBlock.RowConsumption); err != nil {
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
}
// Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed.
if totalL2TxNum > p.maxL2TxNumPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
firstBlock.Header.Number,
totalL2TxNum,
p.maxL2TxNumPerChunk,
)
}
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
firstBlock.Header.Number,
totalL1CommitGas,
p.maxL1CommitGasPerChunk,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
firstBlock.Header.Number,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerChunk,
)
}
// Check if the first block breaks any soft limits.
if totalTxGasUsed > p.maxTxGasPerChunk {
log.Warn(
"The first block in chunk exceeds l2 tx gas limit",
"block number", firstBlock.Header.Number,
"gas used", totalTxGasUsed,
"max gas limit", p.maxTxGasPerChunk,
)
}
if max := crc.max(); max > p.maxRowConsumptionPerChunk {
return nil, fmt.Errorf(
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
firstBlock.Header.Number,
crc,
max,
p.maxRowConsumptionPerChunk,
)
}
for _, block := range blocks[1:] {
chunk.Blocks = append(chunk.Blocks, block)
totalTxGasUsed += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas = chunk.EstimateL1CommitGas()
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
if err := crc.add(block.RowConsumption); err != nil {
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
}
crcMax := crc.max()
if totalL2TxNum > p.maxL2TxNumPerChunk ||
if totalTxGasUsed > p.maxTxGasPerChunk ||
totalL2TxNum > p.maxL2TxNumPerChunk ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk ||
crcMax > p.maxRowConsumptionPerChunk {
// Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed.
if i == 0 {
if totalL2TxNum > p.maxL2TxNumPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
block.Header.Number,
totalL2TxNum,
p.maxL2TxNumPerChunk,
)
}
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
block.Header.Number,
totalL1CommitGas,
p.maxL1CommitGasPerChunk,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
block.Header.Number,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerChunk,
)
}
if crcMax > p.maxRowConsumptionPerChunk {
return nil, fmt.Errorf(
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
block.Header.Number,
crc,
crcMax,
p.maxRowConsumptionPerChunk,
)
}
}
log.Debug("breaking limit condition in chunking",
"totalL2TxNum", totalL2TxNum,
"maxL2TxNumPerChunk", p.maxL2TxNumPerChunk,
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
"maxL1CommitCalldataSizePerChunk", p.maxL1CommitCalldataSizePerChunk,
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitGasPerChunk", p.maxL1CommitGasPerChunk,
"chunkRowConsumptionMax", crcMax,
"chunkRowConsumption", crc,
"p.maxRowConsumptionPerChunk", p.maxRowConsumptionPerChunk)
p.chunkL2TxNum.Set(float64(lastTotalL2TxNum))
p.chunkEstimateL1CommitGas.Set(float64(lastTotalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(lastTotalL1CommitCalldataSize))
p.maxTxConsumption.Set(float64(lastCrcMax))
p.totalTxGasUsed.Set(float64(lastTotalTxGasUsed))
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
return &chunk, nil
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) ||
crc.max() > p.maxRowConsumptionPerChunk {
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1] // remove the last block from chunk
break
}
chunk.Blocks = append(chunk.Blocks, block)
}
var hasBlockTimeout bool
currentTimeSec := uint64(time.Now().Unix())
if blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
@@ -290,17 +208,15 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
"block timestamp", blocks[0].Header.Time,
"block outdated time threshold", currentTimeSec,
)
p.chunkFirstBlockTimeoutReached.Inc()
p.chunkL2TxNum.Set(float64(totalL2TxNum))
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.maxTxConsumption.Set(float64(crc.max()))
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
return &chunk, nil
hasBlockTimeout = true
}
log.Debug("pending blocks do not reach one of the constraints or contain a timeout block")
p.chunkBlocksProposeNotEnoughTotal.Inc()
return nil, nil
if !hasBlockTimeout && totalL1CommitCalldataSize < p.minL1CommitCalldataSizePerChunk {
log.Warn("The calldata size of the chunk is less than the minimum limit",
"totalL1CommitCalldataSize", totalL1CommitCalldataSize,
"minL1CommitCalldataSizePerChunk", p.minL1CommitCalldataSizePerChunk,
)
return nil, nil
}
return chunk, nil
}

View File

@@ -23,12 +23,14 @@ func testChunkProposer(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
}, db)
cp.TryProposeChunk()
expectedChunk := &types.Chunk{
@@ -53,12 +55,14 @@ func testChunkProposerRowConsumption(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 0, // !
ChunkTimeoutSec: 300,
}, db, nil)
}, db)
cp.TryProposeChunk()
chunkOrm := orm.NewChunk(db)

View File

@@ -4,7 +4,6 @@ import (
"context"
"math/big"
"github.com/prometheus/client_golang/prometheus"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
@@ -12,9 +11,11 @@ import (
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
@@ -22,6 +23,12 @@ import (
"scroll-tech/bridge/internal/utils"
)
var (
bridgeL1MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry)
bridgeL1MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry)
bridgeL1MsgsRollupEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
)
type rollupEvent struct {
batchHash common.Hash
txHash common.Hash
@@ -52,12 +59,10 @@ type L1WatcherClient struct {
processedMsgHeight uint64
// The height of the block that the watcher has retrieved header rlp
processedBlockHeight uint64
metrics *l1WatcherMetrics
}
// NewL1WatcherClient returns a new instance of L1WatcherClient.
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB, reg prometheus.Registerer) *L1WatcherClient {
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB) *L1WatcherClient {
l1MessageOrm := orm.NewL1Message(db)
savedHeight, err := l1MessageOrm.GetLayer1LatestWatchedHeight()
if err != nil {
@@ -97,7 +102,6 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
processedMsgHeight: uint64(savedHeight),
processedBlockHeight: savedL1BlockHeight,
metrics: initL1WatcherMetrics(reg),
}
}
@@ -121,7 +125,6 @@ func (w *L1WatcherClient) SetConfirmations(confirmations rpc.BlockNumber) {
// FetchBlockHeader pull latest L1 blocks and save in DB
func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
w.metrics.l1WatcherFetchBlockHeaderTotal.Inc()
fromBlock := int64(w.processedBlockHeight) + 1
toBlock := int64(blockHeight)
if toBlock < fromBlock {
@@ -168,7 +171,6 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
// update processed height
w.processedBlockHeight = uint64(toBlock)
w.metrics.l1WatcherFetchBlockHeaderProcessedBlockHeight.Set(float64(w.processedBlockHeight))
return nil
}
@@ -187,7 +189,6 @@ func (w *L1WatcherClient) FetchContractEvent() error {
toBlock := int64(blockHeight)
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
w.metrics.l1WatcherFetchContractEventTotal.Inc()
to := from + contractEventsBlocksFetchLimit - 1
if to > toBlock {
@@ -219,10 +220,9 @@ func (w *L1WatcherClient) FetchContractEvent() error {
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
w.metrics.l1WatcherFetchContractEventProcessedBlockHeight.Set(float64(to))
bridgeL1MsgsSyncHeightGauge.Update(to)
continue
}
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
sentMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
@@ -232,8 +232,8 @@ func (w *L1WatcherClient) FetchContractEvent() error {
}
sentMessageCount := int64(len(sentMessageEvents))
rollupEventCount := int64(len(rollupEvents))
w.metrics.l1WatcherFetchContractEventSentEventsTotal.Add(float64(sentMessageCount))
w.metrics.l1WatcherFetchContractEventRollupEventsTotal.Add(float64(rollupEventCount))
bridgeL1MsgsSentEventsTotalCounter.Inc(sentMessageCount)
bridgeL1MsgsRollupEventsTotalCounter.Inc(rollupEventCount)
log.Info("L1 events types", "SentMessageCount", sentMessageCount, "RollupEventCount", rollupEventCount)
// use rollup event to update rollup results db status
@@ -273,8 +273,7 @@ func (w *L1WatcherClient) FetchContractEvent() error {
}
w.processedMsgHeight = uint64(to)
w.metrics.l1WatcherFetchContractEventSuccessTotal.Inc()
w.metrics.l1WatcherFetchContractEventProcessedBlockHeight.Set(float64(w.processedMsgHeight))
bridgeL1MsgsSyncHeightGauge.Update(to)
}
return nil

View File

@@ -1,59 +0,0 @@
package watcher
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l1WatcherMetrics struct {
l1WatcherFetchBlockHeaderTotal prometheus.Counter
l1WatcherFetchBlockHeaderProcessedBlockHeight prometheus.Gauge
l1WatcherFetchContractEventTotal prometheus.Counter
l1WatcherFetchContractEventSuccessTotal prometheus.Counter
l1WatcherFetchContractEventProcessedBlockHeight prometheus.Gauge
l1WatcherFetchContractEventSentEventsTotal prometheus.Counter
l1WatcherFetchContractEventRollupEventsTotal prometheus.Counter
}
var (
initL1WatcherMetricOnce sync.Once
l1WatcherMetric *l1WatcherMetrics
)
func initL1WatcherMetrics(reg prometheus.Registerer) *l1WatcherMetrics {
initL1WatcherMetricOnce.Do(func() {
l1WatcherMetric = &l1WatcherMetrics{
l1WatcherFetchBlockHeaderTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_header_total",
Help: "The total number of l1 watcher fetch block header total",
}),
l1WatcherFetchBlockHeaderProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l1_watcher_fetch_block_header_processed_block_height",
Help: "The current processed block height of l1 watcher fetch block header",
}),
l1WatcherFetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_total",
Help: "The total number of l1 watcher fetch contract event total",
}),
l1WatcherFetchContractEventSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_success_total",
Help: "The total number of l1 watcher fetch contract event success total",
}),
l1WatcherFetchContractEventProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_processed_block_height",
Help: "The current processed block height of l1 watcher fetch contract event",
}),
l1WatcherFetchContractEventSentEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_sent_event_total",
Help: "The current processed block height of l1 watcher fetch contract sent event",
}),
l1WatcherFetchContractEventRollupEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_rollup_event_total",
Help: "The current processed block height of l1 watcher fetch contract rollup event",
}),
}
})
return l1WatcherMetric
}

View File

@@ -30,8 +30,7 @@ func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
client, err := ethclient.Dial(base.L1gethImg.Endpoint())
assert.NoError(t, err)
l1Cfg := cfg.L1Config
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress,
l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db, nil)
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
assert.NoError(t, watcher.FetchContractEvent())
return watcher, db
}

View File

@@ -5,7 +5,6 @@ import (
"fmt"
"math/big"
"github.com/prometheus/client_golang/prometheus"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
@@ -14,9 +13,11 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/event"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
@@ -24,6 +25,14 @@ import (
"scroll-tech/bridge/internal/utils"
)
// Metrics
var (
bridgeL2MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedGapGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
bridgeL2MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
)
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
type L2WatcherClient struct {
ctx context.Context
@@ -47,12 +56,10 @@ type L2WatcherClient struct {
processedMsgHeight uint64
stopped uint64
metrics *l2WatcherMetrics
}
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB, reg prometheus.Registerer) *L2WatcherClient {
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB) *L2WatcherClient {
l1MessageOrm := orm.NewL1Message(db)
var savedHeight uint64
l1msg, err := l1MessageOrm.GetLayer1LatestMessageWithLayer2Hash()
@@ -86,7 +93,6 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
withdrawTrieRootSlot: withdrawTrieRootSlot,
stopped: 0,
metrics: initL2WatcherMetrics(reg),
}
return &w
@@ -96,7 +102,6 @@ const blockTracesFetchLimit = uint64(10)
// TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks.
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
w.metrics.fetchRunningMissingBlocksTotal.Inc()
heightInDB, err := w.l2BlockOrm.GetL2BlocksLatestHeight(w.ctx)
if err != nil {
log.Error("failed to GetL2BlocksLatestHeight", "err", err)
@@ -115,8 +120,8 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
return
}
w.metrics.fetchRunningMissingBlocksHeight.Set(float64(to))
w.metrics.bridgeL2BlocksFetchedGap.Set(float64(blockHeight - to))
bridgeL2BlocksFetchedHeightGauge.Update(int64(to))
bridgeL2BlocksFetchedGapGauge.Update(int64(blockHeight - to))
}
}
@@ -194,7 +199,6 @@ func (w *L2WatcherClient) FetchContractEvent() {
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
w.metrics.fetchContractEventTotal.Inc()
blockHeight, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
@@ -234,7 +238,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
w.metrics.fetchContractEventHeight.Set(float64(to))
bridgeL2MsgsSyncHeightGauge.Update(to)
continue
}
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
@@ -246,7 +250,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
}
relayedMessageCount := int64(len(relayedMessageEvents))
w.metrics.bridgeL2MsgsRelayedEventsTotal.Add(float64(relayedMessageCount))
bridgeL2MsgsRelayedEventsTotalCounter.Inc(relayedMessageCount)
log.Info("L2 events types", "RelayedMessageCount", relayedMessageCount)
// Update relayed message first to make sure we don't forget to update submited message.
@@ -265,7 +269,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
}
w.processedMsgHeight = uint64(to)
w.metrics.fetchContractEventHeight.Set(float64(to))
bridgeL2MsgsSyncHeightGauge.Update(to)
}
}

View File

@@ -1,54 +0,0 @@
package watcher
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l2WatcherMetrics struct {
fetchRunningMissingBlocksTotal prometheus.Counter
fetchRunningMissingBlocksHeight prometheus.Gauge
fetchContractEventTotal prometheus.Counter
fetchContractEventHeight prometheus.Gauge
bridgeL2MsgsRelayedEventsTotal prometheus.Counter
bridgeL2BlocksFetchedGap prometheus.Gauge
}
var (
initL2WatcherMetricOnce sync.Once
l2WatcherMetric *l2WatcherMetrics
)
func initL2WatcherMetrics(reg prometheus.Registerer) *l2WatcherMetrics {
initL2WatcherMetricOnce.Do(func() {
l2WatcherMetric = &l2WatcherMetrics{
fetchRunningMissingBlocksTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l2_watcher_fetch_running_missing_blocks_total",
Help: "The total number of l2 watcher fetch running missing blocks",
}),
fetchRunningMissingBlocksHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l2_watcher_fetch_running_missing_blocks_height",
Help: "The total number of l2 watcher fetch running missing blocks height",
}),
fetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l2_watcher_fetch_contract_events_total",
Help: "The total number of l2 watcher fetch contract events",
}),
fetchContractEventHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l2_watcher_fetch_contract_height",
Help: "The total number of l2 watcher fetch contract height",
}),
bridgeL2MsgsRelayedEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l2_watcher_msg_relayed_events_total",
Help: "The total number of l2 watcher msg relayed event",
}),
bridgeL2BlocksFetchedGap: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l2_watcher_blocks_fetched_gap",
Help: "The gap of l2 fetch",
}),
}
})
return l2WatcherMetric
}

View File

@@ -34,8 +34,7 @@ import (
func setupL2Watcher(t *testing.T) (*L2WatcherClient, *gorm.DB) {
db := setupDB(t)
l2cfg := cfg.L2Config
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress,
l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db, nil)
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
return watcher, db
}
@@ -51,7 +50,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
l1cfg := cfg.L1Config
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKey, "test", "test", nil)
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKey)
assert.NoError(t, err)
// Create several transactions and commit to block
@@ -96,7 +95,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
func prepareWatcherClient(l2Cli *ethclient.Client, db *gorm.DB, contractAddr common.Address) *L2WatcherClient {
confirmations := rpc.LatestBlockNumber
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db, nil)
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db)
}
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {

View File

@@ -311,7 +311,7 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}

View File

@@ -201,7 +201,7 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}

View File

@@ -26,14 +26,13 @@ func testImportL1GasPrice(t *testing.T) {
l1Cfg := bridgeApp.Config.L1Config
// Create L1Relayer
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
// Create L1Watcher
startHeight, err := l1Client.BlockNumber(context.Background())
assert.NoError(t, err)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0,
l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// fetch new blocks
number, err := l1Client.BlockNumber(context.Background())
@@ -68,7 +67,7 @@ func testImportL2GasPrice(t *testing.T) {
prepareContracts(t)
l2Cfg := bridgeApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, nil)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
// add fake chunk

View File

@@ -29,16 +29,14 @@ func testRelayL1MessageSucceed(t *testing.T) {
l2Cfg := bridgeApp.Config.L2Config
// Create L1Relayer
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
// Create L1Watcher
confirmations := rpc.LatestBlockNumber
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress,
l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// Create L2Watcher
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress,
l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db, nil)
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
// send message through l1 messenger contract
nonce, err := l1MessengerInstance.MessageNonce(&bind.CallOpts{})

View File

@@ -5,6 +5,7 @@ import (
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
@@ -12,7 +13,6 @@ import (
"scroll-tech/common/database"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer"
@@ -28,13 +28,12 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// Create L2Relayer
l2Cfg := bridgeApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, nil)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
// Create L1Watcher
l1Cfg := bridgeApp.Config.L1Config
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress,
l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// add some blocks to db
var wrappedBlocks []*types.WrappedBlock
@@ -58,12 +57,14 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.NoError(t, err)
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
}, db)
cp.TryProposeChunk()
chunkOrm := orm.NewChunk(db)
@@ -75,8 +76,9 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
MinChunkNumPerBatch: 1,
BatchTimeoutSec: 300,
}, db, nil)
}, db)
bp.TryProposeBatch()
l2Relayer.ProcessPendingBatches()
@@ -89,22 +91,20 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.NotEmpty(t, batch.CommitTxHash)
assert.Equal(t, types.RollupCommitting, types.RollupStatus(batch.RollupStatus))
success := utils.TryTimes(30, func() bool {
var receipt *gethTypes.Receipt
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash))
return err == nil && receipt.Status == 1
})
assert.True(t, success)
assert.NoError(t, err)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(batch.CommitTxHash))
assert.NoError(t, err)
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
assert.NoError(t, err)
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch rollup events
success = utils.TryTimes(30, func() bool {
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
var statuses []types.RollupStatus
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0]
})
assert.True(t, success)
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupCommitted, statuses[0])
// add dummy proof
proof := &message.BatchProof{
@@ -118,7 +118,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// process committed batch and check status
l2Relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizing, statuses[0])
@@ -128,20 +128,17 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.NotNil(t, batch)
assert.NotEmpty(t, batch.FinalizeTxHash)
success = utils.TryTimes(30, func() bool {
var receipt *gethTypes.Receipt
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
return err == nil && receipt.Status == 1
})
assert.True(t, success)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(batch.FinalizeTxHash))
assert.NoError(t, err)
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
assert.NoError(t, err)
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch rollup events
success = utils.TryTimes(30, func() bool {
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
var statuses []types.RollupStatus
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0]
})
assert.True(t, success)
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalized, statuses[0])
}

View File

@@ -32,7 +32,7 @@ dependencies = [
[[package]]
name = "aggregator"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
dependencies = [
"ark-std",
"env_logger 0.10.0",
@@ -380,7 +380,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b"
dependencies = [
"borsh-derive",
"hashbrown 0.12.3",
"hashbrown 0.13.2",
]
[[package]]
@@ -433,7 +433,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
[[package]]
name = "bus-mapping"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
dependencies = [
"eth-types",
"ethers-core",
@@ -1049,7 +1049,7 @@ dependencies = [
[[package]]
name = "eth-types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
dependencies = [
"ethers-core",
"ethers-signers",
@@ -1226,7 +1226,7 @@ dependencies = [
[[package]]
name = "external-tracer"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
dependencies = [
"eth-types",
"geth-utils",
@@ -1439,7 +1439,7 @@ dependencies = [
[[package]]
name = "gadgets"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
dependencies = [
"digest 0.7.6",
"eth-types",
@@ -1479,7 +1479,7 @@ dependencies = [
[[package]]
name = "geth-utils"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
dependencies = [
"env_logger 0.9.3",
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
@@ -1583,7 +1583,7 @@ dependencies = [
[[package]]
name = "halo2-base"
version = "0.2.2"
source = "git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
dependencies = [
"ff",
"halo2_proofs",
@@ -1598,7 +1598,7 @@ dependencies = [
[[package]]
name = "halo2-ecc"
version = "0.2.2"
source = "git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
dependencies = [
"ff",
"group",
@@ -1633,7 +1633,7 @@ dependencies = [
[[package]]
name = "halo2-mpt-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?tag=v0.5.1#2163a9c436ed85363c954ecf7e6e1044a1b991dc"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=v0.5#2163a9c436ed85363c954ecf7e6e1044a1b991dc"
dependencies = [
"ethers-core",
"halo2_proofs",
@@ -1655,7 +1655,7 @@ dependencies = [
[[package]]
name = "halo2_proofs"
version = "0.2.0"
source = "git+https://github.com/scroll-tech/halo2.git?branch=develop#19de67c07a9b9b567580466763f93ebfbc3bb799"
source = "git+https://github.com/scroll-tech/halo2.git?branch=develop#01f0b5260445a9190299af7b06b766c1e925fdaf"
dependencies = [
"ark-std",
"blake2b_simd",
@@ -2077,7 +2077,7 @@ dependencies = [
[[package]]
name = "keccak256"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
dependencies = [
"env_logger 0.9.3",
"eth-types",
@@ -2264,7 +2264,7 @@ dependencies = [
[[package]]
name = "mock"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
dependencies = [
"eth-types",
"ethers-core",
@@ -2279,7 +2279,7 @@ dependencies = [
[[package]]
name = "mpt-zktrie"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
dependencies = [
"bus-mapping",
"eth-types",
@@ -2754,8 +2754,8 @@ dependencies = [
[[package]]
name = "prover"
version = "0.7.5"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.5#9699d40940aed2f14d8e1958167d714bca2c9984"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.16#bd901762c4744936586f4a30e27a4b65cea3bb71"
dependencies = [
"aggregator",
"anyhow",
@@ -3624,7 +3624,7 @@ checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9"
[[package]]
name = "snark-verifier"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/snark-verifier?tag=v0.1.2#4466059ce9a6dfaf26455e4ffb61d72af775cf52"
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#100127726ac210226ac1096767e0efc5230775e3"
dependencies = [
"bytes",
"ethereum-types 0.14.1",
@@ -3648,7 +3648,7 @@ dependencies = [
[[package]]
name = "snark-verifier-sdk"
version = "0.0.1"
source = "git+https://github.com/scroll-tech/snark-verifier?tag=v0.1.2#4466059ce9a6dfaf26455e4ffb61d72af775cf52"
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#100127726ac210226ac1096767e0efc5230775e3"
dependencies = [
"bincode",
"env_logger 0.10.0",
@@ -4039,8 +4039,8 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
[[package]]
name = "types"
version = "0.7.5"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.5#9699d40940aed2f14d8e1958167d714bca2c9984"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.16#bd901762c4744936586f4a30e27a4b65cea3bb71"
dependencies = [
"base64 0.13.1",
"blake2",
@@ -4491,11 +4491,10 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
[[package]]
name = "zkevm-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.16#f1341e5bf2dc59ea10c19012257c7e386cfc195f"
dependencies = [
"array-init",
"bus-mapping",
"either",
"env_logger 0.9.3",
"eth-types",
"ethers-core",
@@ -4536,7 +4535,6 @@ dependencies = [
name = "zkp"
version = "0.1.0"
dependencies = [
"base64 0.13.1",
"env_logger 0.9.3",
"halo2_proofs",
"libc",

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -20,18 +20,18 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
[dependencies]
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.5" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.5" }
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.16" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.16" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
base64 = "0.13.0"
env_logger = "0.9.0"
libc = "0.2"
log = "0.4"
once_cell = "1.8.0"
env_logger = "0.9.0"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0.66"
libc = "0.2"
once_cell = "1.8.0"
[profile.test]
opt-level = 3

View File

@@ -1,11 +1,11 @@
use crate::utils::{c_char_to_str, c_char_to_vec, string_to_c_char, vec_to_c_char, OUTPUT_DIR};
use crate::utils::{c_char_to_str, c_char_to_vec, vec_to_c_char, OUTPUT_DIR};
use libc::c_char;
use prover::{
aggregator::{Prover, Verifier},
utils::{chunk_trace_to_witness_block, init_env_and_log},
BatchProof, ChunkHash, ChunkProof,
};
use std::{cell::OnceCell, env, panic, ptr::null};
use std::{cell::OnceCell, panic, ptr::null};
use types::eth::BlockTrace;
static mut PROVER: OnceCell<Prover> = OnceCell::new();
@@ -13,15 +13,11 @@ static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char, assets_dir: *const c_char) {
pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char) {
init_env_and_log("ffi_batch_prove");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let prover = Prover::from_dirs(params_dir, assets_dir);
let prover = Prover::from_params_dir(params_dir);
PROVER.set(prover).unwrap();
}
@@ -34,35 +30,11 @@ pub unsafe extern "C" fn init_batch_verifier(params_dir: *const c_char, assets_d
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let verifier = Verifier::from_dirs(params_dir, assets_dir);
VERIFIER.set(verifier).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn get_batch_vk() -> *const c_char {
let vk_result = panic::catch_unwind(|| PROVER.get_mut().unwrap().get_vk());
vk_result
.ok()
.flatten()
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> c_char {
let chunk_proofs = c_char_to_vec(chunk_proofs);
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs).unwrap();
assert!(!chunk_proofs.is_empty());
let valid = panic::catch_unwind(|| PROVER.get().unwrap().check_chunk_proofs(&chunk_proofs));
valid.unwrap_or(false) as c_char
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_batch_proof(

View File

@@ -1,11 +1,11 @@
use crate::utils::{c_char_to_str, c_char_to_vec, string_to_c_char, vec_to_c_char, OUTPUT_DIR};
use crate::utils::{c_char_to_str, c_char_to_vec, vec_to_c_char, OUTPUT_DIR};
use libc::c_char;
use prover::{
utils::init_env_and_log,
zkevm::{Prover, Verifier},
ChunkProof,
};
use std::{cell::OnceCell, env, panic, ptr::null};
use std::{cell::OnceCell, panic, ptr::null};
use types::eth::BlockTrace;
static mut PROVER: OnceCell<Prover> = OnceCell::new();
@@ -13,14 +13,10 @@ static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_chunk_prover(params_dir: *const c_char, assets_dir: *const c_char) {
pub unsafe extern "C" fn init_chunk_prover(params_dir: *const c_char) {
init_env_and_log("ffi_chunk_prove");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let prover = Prover::from_params_dir(params_dir);
PROVER.set(prover).unwrap();
@@ -34,24 +30,11 @@ pub unsafe extern "C" fn init_chunk_verifier(params_dir: *const c_char, assets_d
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let verifier = Verifier::from_dirs(params_dir, assets_dir);
VERIFIER.set(verifier).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn get_chunk_vk() -> *const c_char {
let vk_result = panic::catch_unwind(|| PROVER.get_mut().unwrap().get_vk());
vk_result
.ok()
.flatten()
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const c_char {

View File

@@ -19,10 +19,6 @@ pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec<u8> {
cstr.to_bytes().to_vec()
}
pub(crate) fn string_to_c_char(string: String) -> *const c_char {
CString::new(string).unwrap().into_raw()
}
pub(crate) fn vec_to_c_char(bytes: Vec<u8>) -> *const c_char {
CString::new(bytes).unwrap().into_raw()
}

View File

@@ -1,13 +1,10 @@
void init_batch_prover(char* params_dir, char* assets_dir);
void init_batch_prover(char* params_dir);
void init_batch_verifier(char* params_dir, char* assets_dir);
char* get_batch_vk();
char check_chunk_proofs(char* chunk_proofs);
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
char verify_batch_proof(char* proof);
void init_chunk_prover(char* params_dir, char* assets_dir);
void init_chunk_prover(char* params_dir);
void init_chunk_verifier(char* params_dir, char* assets_dir);
char* get_chunk_vk();
char* gen_chunk_proof(char* block_traces);
char verify_chunk_proof(char* proof);

57
common/metrics/metrics.go Normal file
View File

@@ -0,0 +1,57 @@
package metrics
import (
"context"
"net"
"net/http"
"strconv"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/metrics/prometheus"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/urfave/cli/v2"
"scroll-tech/common/utils"
)
var (
// ScrollRegistry is used for scroll metrics.
ScrollRegistry = metrics.NewRegistry()
)
// Serve starts the metrics server on the given address, will be closed when the given
// context is canceled.
func Serve(ctx context.Context, c *cli.Context) {
if !c.Bool(utils.MetricsEnabled.Name) {
return
}
address := net.JoinHostPort(
c.String(utils.MetricsAddr.Name),
strconv.Itoa(c.Int(utils.MetricsPort.Name)),
)
server := &http.Server{
Addr: address,
Handler: prometheus.Handler(ScrollRegistry),
ReadTimeout: rpc.DefaultHTTPTimeouts.ReadTimeout,
WriteTimeout: rpc.DefaultHTTPTimeouts.WriteTimeout,
IdleTimeout: rpc.DefaultHTTPTimeouts.IdleTimeout,
}
go func() {
<-ctx.Done()
if err := server.Close(); err != nil {
log.Error("Failed to close metrics server", "error", err)
}
}()
log.Info("Starting metrics server", "address", address)
go func() {
if err := server.ListenAndServe(); err != nil {
log.Error("start metrics server error", "error", err)
}
}()
}

View File

@@ -3,31 +3,20 @@ package types
import (
"encoding/binary"
"errors"
"fmt"
"math"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
)
// CalldataNonZeroByteGas is the gas consumption per non zero byte in calldata.
const CalldataNonZeroByteGas = 16
// GetKeccak256Gas calculates keccak256 hash gas.
func GetKeccak256Gas(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct {
Header *types.Header `json:"header"`
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
Transactions []*types.TransactionData `json:"transactions"`
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
RowConsumption *types.RowConsumption `json:"row_consumption"`
txPayloadLengthCache map[string]uint64
Transactions []*types.TransactionData `json:"transactions"`
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
RowConsumption *types.RowConsumption `json:"row_consumption"`
}
// NumL1Messages returns the number of L1 messages in this block.
@@ -98,14 +87,17 @@ func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
if txData.Type == types.L1MessageTxType {
continue
}
size += 64 // 60 bytes BlockContext + 4 bytes payload length
size += w.getTxPayloadLength(txData)
size += uint64(len(txData.Data))
}
return size
}
// EstimateL1CommitGas calculates the total L1 commit gas for this block approximately.
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
var total uint64
var numL1Messages uint64
for _, txData := range w.Transactions {
@@ -114,10 +106,23 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
continue
}
txPayloadLength := w.getTxPayloadLength(txData)
total += CalldataNonZeroByteGas * txPayloadLength // an over-estimate: treat each byte as non-zero
total += CalldataNonZeroByteGas * 64 // 60 bytes BlockContext + 4 bytes payload length
total += GetKeccak256Gas(txPayloadLength) // l2 tx hash
data, _ := hexutil.Decode(txData.Data)
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
txPayloadLength := uint64(len(rlpTxData))
total += 16 * txPayloadLength // an over-estimate: treat each byte as non-zero
total += 16 * 4 // size of a uint32 field
total += getKeccakGas(txPayloadLength) // l2 tx hash
}
// sload
@@ -140,48 +145,3 @@ func (w *WrappedBlock) L2TxsNum() uint64 {
}
return count
}
func (w *WrappedBlock) getTxPayloadLength(txData *types.TransactionData) uint64 {
if w.txPayloadLengthCache == nil {
w.txPayloadLengthCache = make(map[string]uint64)
}
if length, exists := w.txPayloadLengthCache[txData.TxHash]; exists {
return length
}
rlpTxData, err := convertTxDataToRLPEncoding(txData)
if err != nil {
log.Crit("convertTxDataToRLPEncoding failed, which should not happen", "hash", txData.TxHash, "err", err)
return 0
}
txPayloadLength := uint64(len(rlpTxData))
w.txPayloadLengthCache[txData.TxHash] = txPayloadLength
return txPayloadLength
}
func convertTxDataToRLPEncoding(txData *types.TransactionData) ([]byte, error) {
data, err := hexutil.Decode(txData.Data)
if err != nil {
return nil, fmt.Errorf("failed to decode txData.Data: %s, err: %w", txData.Data, err)
}
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, err := tx.MarshalBinary()
if err != nil {
return nil, fmt.Errorf("failed to marshal binary of the tx: %+v, err: %w", tx, err)
}
return rlpTxData, nil
}

View File

@@ -8,6 +8,7 @@ import (
"strings"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
)
@@ -64,7 +65,23 @@ func (c *Chunk) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
if txData.Type == types.L1MessageTxType {
continue
}
rlpTxData, err := convertTxDataToRLPEncoding(txData)
data, err := hexutil.Decode(txData.Data)
if err != nil {
return nil, err
}
// right now we only support legacy tx
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, err := tx.MarshalBinary()
if err != nil {
return nil, err
}
@@ -129,10 +146,14 @@ func (c *Chunk) EstimateL1CommitGas() uint64 {
}
numBlocks := uint64(len(c.Blocks))
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
totalL1CommitGas += CalldataNonZeroByteGas // numBlocks field of chunk encoding in calldata
totalL1CommitGas += CalldataNonZeroByteGas * numBlocks * 60 // numBlocks of BlockContext in chunk
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
totalL1CommitGas += 16 // numBlocks field of chunk encoding in calldata
totalL1CommitGas += 16 * 60 * numBlocks // BlockContext in chunk
totalL1CommitGas += GetKeccak256Gas(58*numBlocks + 32*totalTxNum) // chunk hash
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
totalL1CommitGas += getKeccakGas(58*numBlocks + 32*totalTxNum) // chunk hash
return totalL1CommitGas
}

View File

@@ -38,15 +38,11 @@ func TestChunkEncode(t *testing.T) {
wrappedBlock := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
assert.Equal(t, uint64(0), wrappedBlock.NumL1Messages(0))
assert.Equal(t, uint64(358), wrappedBlock.EstimateL1CommitCalldataSize())
assert.Equal(t, uint64(2), wrappedBlock.L2TxsNum())
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock,
},
}
assert.Equal(t, uint64(0), chunk.NumL1Messages(0))
assert.Equal(t, uint64(6966), chunk.EstimateL1CommitGas())
bytes, err = chunk.Encode(0)
hexString := hex.EncodeToString(bytes)
assert.NoError(t, err)
@@ -60,15 +56,11 @@ func TestChunkEncode(t *testing.T) {
wrappedBlock2 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
assert.Equal(t, uint64(11), wrappedBlock2.NumL1Messages(0)) // 0..=9 skipped, 10 included
assert.Equal(t, uint64(96), wrappedBlock2.EstimateL1CommitCalldataSize())
assert.Equal(t, uint64(1), wrappedBlock2.L2TxsNum())
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock2,
},
}
assert.Equal(t, uint64(11), chunk.NumL1Messages(0))
assert.Equal(t, uint64(5002), chunk.EstimateL1CommitGas())
bytes, err = chunk.Encode(0)
hexString = hex.EncodeToString(bytes)
assert.NoError(t, err)
@@ -83,8 +75,6 @@ func TestChunkEncode(t *testing.T) {
wrappedBlock2,
},
}
assert.Equal(t, uint64(11), chunk.NumL1Messages(0))
assert.Equal(t, uint64(9958), chunk.EstimateL1CommitGas())
bytes, err = chunk.Encode(0)
hexString = hex.EncodeToString(bytes)
assert.NoError(t, err)
@@ -146,81 +136,3 @@ func TestChunkHash(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, "0x2eb7dd63bf8fc29a0f8c10d16c2ae6f9da446907c79d50f5c164d30dc8526b60", hash.Hex())
}
func TestErrorPaths(t *testing.T) {
// test 1: Header.Number is not a uint64
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
wrappedBlock.Header.Number = wrappedBlock.Header.Number.Lsh(wrappedBlock.Header.Number, 64)
bytes, err := wrappedBlock.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "block number is not uint64")
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
for i := 0; i < 65537; i++ {
wrappedBlock.Transactions = append(wrappedBlock.Transactions, wrappedBlock.Transactions[0])
}
bytes, err = wrappedBlock.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of transactions exceeds max uint16")
chunk := &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock,
},
}
bytes, err = chunk.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of transactions exceeds max uint16")
wrappedBlock.Transactions = wrappedBlock.Transactions[:1]
wrappedBlock.Transactions[0].Data = "not-a-hex"
bytes, err = chunk.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "hex string without 0x prefix")
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
wrappedBlock.Transactions[0].TxHash = "not-a-hex"
_, err = chunk.Hash(0)
assert.Error(t, err)
assert.Contains(t, err.Error(), "invalid byte")
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
for i := 0; i < 65535; i++ {
tx := &wrappedBlock2.Transactions[i]
txCopy := *tx
txCopy.Nonce = uint64(i + 1)
wrappedBlock2.Transactions = append(wrappedBlock2.Transactions, txCopy)
}
bytes, err = wrappedBlock2.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of L1 messages exceeds max uint16")
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock2,
},
}
bytes, err = chunk.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of L1 messages exceeds max uint16")
}

View File

@@ -126,8 +126,8 @@ const (
ProvingTaskUnassigned
// ProvingTaskAssigned : proving_task is assigned to be proved
ProvingTaskAssigned
// ProvingTaskProvedDEPRECATED DEPRECATED: proof has been returned by prover
ProvingTaskProvedDEPRECATED
// ProvingTaskProved DEPRECATED: proof has been returned by prover
ProvingTaskProved
// ProvingTaskVerified : proof is valid
ProvingTaskVerified
// ProvingTaskFailed : fail to generate proof
@@ -140,7 +140,7 @@ func (ps ProvingStatus) String() string {
return "unassigned"
case ProvingTaskAssigned:
return "assigned"
case ProvingTaskProvedDEPRECATED:
case ProvingTaskProved:
return "proved"
case ProvingTaskVerified:
return "verified"

View File

@@ -58,8 +58,8 @@ func TestProvingStatus(t *testing.T) {
"assigned",
},
{
"ProvingTaskProvedDEPRECATED",
ProvingTaskProvedDEPRECATED,
"ProvingTaskProved",
ProvingTaskProved,
"proved",
},
{

View File

@@ -13,18 +13,6 @@ import (
"github.com/scroll-tech/go-ethereum/rlp"
)
// ProofFailureType the proof failure type
type ProofFailureType int
const (
// ProofFailureUndefined the undefined type proof failure type
ProofFailureUndefined ProofFailureType = iota
// ProofFailurePanic proof failure for prover panic
ProofFailurePanic
// ProofFailureNoPanic proof failure for no prover panic
ProofFailureNoPanic
)
// RespStatus represents status code from prover to scroll
type RespStatus uint32
@@ -262,14 +250,12 @@ type ChunkInfo struct {
// ChunkProof includes the proof info that are required for chunk verification and rollup.
type ChunkProof struct {
StorageTrace []byte `json:"storage_trace"`
Protocol []byte `json:"protocol"`
Proof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
GitVersion string `json:"git_version,omitempty"`
StorageTrace []byte `json:"storage_trace"`
Protocol []byte `json:"protocol"`
Proof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"` // cross-reference between cooridinator computation and prover compution
}
// BatchProof includes the proof info that are required for batch verification and rollup.
@@ -277,8 +263,6 @@ type BatchProof struct {
Proof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
GitVersion string `json:"git_version,omitempty"`
}
// SanityCheck checks whether an BatchProof is in a legal format

View File

@@ -108,7 +108,7 @@ func TestProofDetailHash(t *testing.T) {
}
hash, err := proofDetail.Hash()
assert.NoError(t, err)
expectedHash := "d3b57cb84b0da8043373eeb3612806fb7248d6d1b6e089846ccf3ccce2d9f31c"
expectedHash := "72a00232c1fcb100b1b67e6d12cd449e5d2d890e3a66e50f4c23499d4990766f"
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
}

View File

@@ -3,11 +3,10 @@ package version
import (
"fmt"
"runtime/debug"
"strconv"
"strings"
)
var tag = "v4.1.106"
var tag = "v4.1.46"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -29,8 +28,15 @@ var commit = func() string {
// The default `000000-000000` is set for integration test, and will be overwritten by coordinator's & prover's actual compilations (see their Makefiles).
var ZkVersion = "000000-000000"
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, prover, contracts and etc.
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZkVersion)
// OldZkVersion is the last version of ZkVersion.
var OldZkVersion = "000000-000000"
var (
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, prover, contracts and etc.
Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZkVersion)
// OldVersion is the last version of Version.
OldVersion = fmt.Sprintf("%s-%s-%s", tag, commit, OldZkVersion)
)
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
func CheckScrollProverVersion(proverVersion string) bool {
@@ -47,39 +53,3 @@ func CheckScrollProverVersion(proverVersion string) bool {
// compare the `scroll_prover` version
return remote[2] == local[2]
}
// CheckScrollProverVersionTag check the "scroll-prover" version's tag, if it's too old, return false
func CheckScrollProverVersionTag(proverVersion string) bool {
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// so split-by-'-' length should be 4
remote := strings.Split(proverVersion, "-")
if len(remote) != 4 {
return false
}
remoteTagNums := strings.Split(strings.TrimPrefix(remote[0], "v"), ".")
if len(remoteTagNums) != 3 {
return false
}
remoteTagMajor, err := strconv.Atoi(remoteTagNums[0])
if err != nil {
return false
}
remoteTagMinor, err := strconv.Atoi(remoteTagNums[1])
if err != nil {
return false
}
remoteTagPatch, err := strconv.Atoi(remoteTagNums[2])
if err != nil {
return false
}
if remoteTagMajor != 4 {
return false
}
if remoteTagMinor != 1 {
return false
}
if remoteTagPatch < 98 {
return false
}
return true
}

View File

@@ -260,23 +260,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -371,22 +354,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -563,23 +530,6 @@ Emitted when some ERC1155 token is refunded.
| tokenId | uint256 | undefined |
| amount | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -227,23 +227,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -316,22 +299,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -502,23 +469,6 @@ Emitted when some ERC721 token is refunded.
| recipient `indexed` | address | undefined |
| tokenId | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -239,23 +239,6 @@ Mapping from queue index to previous replay queue index.
|---|---|---|
| _0 | uint256 | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of ETH rate limiter contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### relayMessageWithProof
```solidity
@@ -453,22 +436,6 @@ Update max replay times.
|---|---|---|
| _newMaxReplayTimes | uint256 | The new max replay times. |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### xDomainMessageSender
```solidity
@@ -642,22 +609,5 @@ Emitted when the maximum number of times each message can be replayed is updated
| oldMaxReplayTimes | uint256 | undefined |
| newMaxReplayTimes | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -225,23 +225,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -292,22 +275,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
## Events
@@ -405,22 +372,5 @@ Emitted when some ERC20 token is refunded.
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -223,23 +223,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -290,22 +273,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
## Events
@@ -403,22 +370,5 @@ Emitted when some ERC20 token is refunded.
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -205,23 +205,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -316,22 +299,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -488,23 +455,6 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -174,23 +174,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -263,22 +246,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -430,23 +397,6 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -194,23 +194,6 @@ function paused() external view returns (bool)
|---|---|---|
| _0 | bool | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of ETH rate limiter contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### relayMessage
```solidity
@@ -345,22 +328,6 @@ Update max failed execution times.
|---|---|---|
| _newMaxFailedExecutionTimes | uint256 | The new max failed execution times. |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### xDomainMessageSender
```solidity
@@ -534,22 +501,5 @@ Emitted when the maximum number of times each message can fail in L2 is updated.
| oldMaxFailedExecutionTimes | uint256 | undefined |
| newMaxFailedExecutionTimes | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -139,23 +139,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -223,22 +206,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### withdrawERC20
```solidity
@@ -354,23 +321,6 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### WithdrawERC20
```solidity

View File

@@ -172,23 +172,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -239,22 +222,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### withdrawERC20
```solidity
@@ -370,23 +337,6 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### WithdrawERC20
```solidity

View File

@@ -149,7 +149,7 @@ contract L1ScrollMessenger is ScrollMessengerBase, IL1ScrollMessenger {
// @note check more `_to` address to avoid attack in the future when we add more gateways.
require(_to != messageQueue, "Forbid to call message queue");
_validateTargetAddress(_to);
require(_to != address(this), "Forbid to call self");
// @note This usually will never happen, just in case.
require(_from != xDomainMessageSender, "Invalid message sender");
@@ -312,8 +312,6 @@ contract L1ScrollMessenger is ScrollMessengerBase, IL1ScrollMessenger {
uint256 _gasLimit,
address _refundAddress
) internal nonReentrant {
_addUsedAmount(_value);
address _messageQueue = messageQueue; // gas saving
address _counterpart = counterpart; // gas saving

View File

@@ -161,9 +161,6 @@ abstract contract L1ERC20Gateway is IL1ERC20Gateway, IMessageDropCallback, Scrol
// ignore weird fee on transfer token
require(_amount > 0, "deposit zero amount");
// rate limit
_addUsedAmount(_token, _amount);
return (_from, _amount, _data);
}

View File

@@ -124,8 +124,6 @@ contract L1ETHGateway is ScrollGatewayBase, IL1ETHGateway, IMessageDropCallback
(_from, _data) = abi.decode(_data, (address, bytes));
}
// @note no rate limit here, since ETH is limited in messenger
// 2. Generate message passed to L1ScrollMessenger.
bytes memory _message = abi.encodeCall(IL2ETHGateway.finalizeDepositETH, (_from, _to, _amount, _data));

View File

@@ -137,7 +137,6 @@ contract L2ScrollMessenger is ScrollMessengerBase, IL2ScrollMessenger {
uint256 _gasLimit
) internal nonReentrant {
require(msg.value == _value, "msg.value mismatch");
_addUsedAmount(_value);
uint256 _nonce = L2MessageQueue(messageQueue).nextMessageIndex();
bytes32 _xDomainCalldataHash = keccak256(_encodeXDomainCalldata(msg.sender, _to, _value, _nonce, _message));
@@ -166,7 +165,7 @@ contract L2ScrollMessenger is ScrollMessengerBase, IL2ScrollMessenger {
) internal {
// @note check more `_to` address to avoid attack in the future when we add more gateways.
require(_to != messageQueue, "Forbid to call message queue");
_validateTargetAddress(_to);
require(_to != address(this), "Forbid to call self");
// @note This usually will never happen, just in case.
require(_from != xDomainMessageSender, "Invalid message sender");

View File

@@ -126,9 +126,6 @@ contract L2CustomERC20Gateway is L2ERC20Gateway {
(_from, _data) = abi.decode(_data, (address, bytes));
}
// rate limit
_addUsedAmount(_token, _amount);
// 2. Burn token.
IScrollERC20Upgradeable(_token).burn(_from, _amount);

View File

@@ -98,8 +98,6 @@ contract L2ETHGateway is ScrollGatewayBase, IL2ETHGateway {
(_from, _data) = abi.decode(_data, (address, bytes));
}
// @note no rate limit here, since ETH is limited in messenger
bytes memory _message = abi.encodeCall(IL1ETHGateway.finalizeWithdrawETH, (_from, _to, _amount, _data));
IL2ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, _amount, _message, _gasLimit);

View File

@@ -140,9 +140,6 @@ contract L2StandardERC20Gateway is L2ERC20Gateway {
address _l1Token = tokenMapping[_token];
require(_l1Token != address(0), "no corresponding l1 token");
// rate limit
_addUsedAmount(_token, _amount);
// 2. Burn token.
IScrollERC20Upgradeable(_token).burn(_from, _amount);

View File

@@ -116,9 +116,6 @@ contract L2WETHGateway is L2ERC20Gateway {
(_from, _data) = abi.decode(_data, (address, bytes));
}
// rate limit
_addUsedAmount(_token, _amount);
// 2. Transfer token into this contract.
IERC20Upgradeable(_token).safeTransferFrom(_from, address(this), _amount);
IWETH(_token).withdraw(_amount);

View File

@@ -128,7 +128,7 @@ contract L2USDCGateway is L2ERC20Gateway {
require(_token == l2USDC, "only USDC is allowed");
require(!withdrawPaused, "withdraw paused");
// 1. Extract real sender if this call is from L2GatewayRouter.
// 1. Extract real sender if this call is from L1GatewayRouter.
address _from = msg.sender;
if (router == msg.sender) {
(_from, _data) = abi.decode(_data, (address, bytes));

View File

@@ -7,7 +7,6 @@ import {PausableUpgradeable} from "@openzeppelin/contracts-upgradeable/security/
import {ReentrancyGuardUpgradeable} from "@openzeppelin/contracts-upgradeable/security/ReentrancyGuardUpgradeable.sol";
import {ScrollConstants} from "./constants/ScrollConstants.sol";
import {IETHRateLimiter} from "../rate-limiter/IETHRateLimiter.sol";
import {IScrollMessenger} from "./IScrollMessenger.sol";
// solhint-disable var-name-mixedcase
@@ -27,11 +26,6 @@ abstract contract ScrollMessengerBase is
/// @param _newFeeVault The address of new fee vault contract.
event UpdateFeeVault(address _oldFeeVault, address _newFeeVault);
/// @notice Emitted when owner updates rate limiter contract.
/// @param _oldRateLimiter The address of old rate limiter contract.
/// @param _newRateLimiter The address of new rate limiter contract.
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter);
/*************
* Variables *
*************/
@@ -45,11 +39,8 @@ abstract contract ScrollMessengerBase is
/// @notice The address of fee vault, collecting cross domain messaging fee.
address public feeVault;
/// @notice The address of ETH rate limiter contract.
address public rateLimiter;
/// @dev The storage slots for future usage.
uint256[46] private __gap;
uint256[47] private __gap;
/**********************
* Function Modifiers *
@@ -98,16 +89,6 @@ abstract contract ScrollMessengerBase is
emit UpdateFeeVault(_oldFeeVault, _newFeeVault);
}
/// @notice Update rate limiter contract.
/// @dev This function can only called by contract owner.
/// @param _newRateLimiter The address of new rate limiter contract.
function updateRateLimiter(address _newRateLimiter) external onlyOwner {
address _oldRateLimiter = rateLimiter;
rateLimiter = _newRateLimiter;
emit UpdateRateLimiter(_oldRateLimiter, _newRateLimiter);
}
/// @notice Pause the contract
/// @dev This function can only called by contract owner.
/// @param _status The pause status to update.
@@ -147,27 +128,4 @@ abstract contract ScrollMessengerBase is
_message
);
}
/// @dev Internal function to increase ETH usage for the given `_sender`.
/// @param _amount The amount of ETH used.
function _addUsedAmount(uint256 _amount) internal {
if (_amount == 0) return;
address _rateLimiter = rateLimiter;
if (_rateLimiter != address(0)) {
IETHRateLimiter(_rateLimiter).addUsedAmount(_amount);
}
}
/// @dev Internal function to check whether the `_target` address is allowed to avoid attack.
/// @param _target The address of target address to check.
function _validateTargetAddress(address _target) internal view {
// @note check more `_target` address to avoid attack in the future when we add more external contracts.
address _rateLimiter = rateLimiter;
if (_rateLimiter != address(0)) {
require(_target != _rateLimiter, "Forbid to call rate limiter");
}
require(_target != address(this), "Forbid to call self");
}
}

View File

@@ -9,18 +9,8 @@ import {IScrollGateway} from "./IScrollGateway.sol";
import {IScrollMessenger} from "../IScrollMessenger.sol";
import {IScrollGatewayCallback} from "../callbacks/IScrollGatewayCallback.sol";
import {ScrollConstants} from "../constants/ScrollConstants.sol";
import {ITokenRateLimiter} from "../../rate-limiter/ITokenRateLimiter.sol";
abstract contract ScrollGatewayBase is ReentrancyGuardUpgradeable, OwnableUpgradeable, IScrollGateway {
/**********
* Events *
**********/
/// @notice Emitted when owner updates rate limiter contract.
/// @param _oldRateLimiter The address of old rate limiter contract.
/// @param _newRateLimiter The address of new rate limiter contract.
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter);
/*************
* Variables *
*************/
@@ -34,11 +24,8 @@ abstract contract ScrollGatewayBase is ReentrancyGuardUpgradeable, OwnableUpgrad
/// @inheritdoc IScrollGateway
address public override messenger;
/// @notice The address of token rate limiter contract.
address public rateLimiter;
/// @dev The storage slots for future usage.
uint256[46] private __gap;
uint256[47] private __gap;
/**********************
* Function Modifiers *
@@ -85,20 +72,6 @@ abstract contract ScrollGatewayBase is ReentrancyGuardUpgradeable, OwnableUpgrad
}
}
/************************
* Restricted Functions *
************************/
/// @notice Update rate limiter contract.
/// @dev This function can only called by contract owner.
/// @param _newRateLimiter The address of new rate limiter contract.
function updateRateLimiter(address _newRateLimiter) external onlyOwner {
address _oldRateLimiter = rateLimiter;
rateLimiter = _newRateLimiter;
emit UpdateRateLimiter(_oldRateLimiter, _newRateLimiter);
}
/**********************
* Internal Functions *
**********************/
@@ -111,16 +84,4 @@ abstract contract ScrollGatewayBase is ReentrancyGuardUpgradeable, OwnableUpgrad
IScrollGatewayCallback(_to).onScrollGatewayCallback(_data);
}
}
/// @dev Internal function to increase token usage for the given `_sender`.
/// @param _token The address of token.
/// @param _amount The amount of token used.
function _addUsedAmount(address _token, uint256 _amount) internal {
if (_amount == 0) return;
address _rateLimiter = rateLimiter;
if (_rateLimiter != address(0)) {
ITokenRateLimiter(_rateLimiter).addUsedAmount(_token, _amount);
}
}
}

View File

@@ -1,130 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
import {AccessControlEnumerable} from "@openzeppelin/contracts/access/AccessControlEnumerable.sol";
import {EnumerableSet} from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol";
// solhint-disable no-empty-blocks
contract ScrollOwner is AccessControlEnumerable {
using EnumerableSet for EnumerableSet.Bytes32Set;
/*************
* Variables *
*************/
/// @notice Mapping from target address to selector to the list of accessible roles.
mapping(address => mapping(bytes4 => EnumerableSet.Bytes32Set)) private targetAccess;
/**********************
* Function Modifiers *
**********************/
modifier hasAccess(
address _target,
bytes4 _selector,
bytes32 _role
) {
// admin has access to all methods
require(_role == DEFAULT_ADMIN_ROLE || targetAccess[_target][_selector].contains(_role), "no access");
_;
}
/***************
* Constructor *
***************/
constructor() {
_grantRole(DEFAULT_ADMIN_ROLE, msg.sender);
}
/*************************
* Public View Functions *
*************************/
/// @notice Return a list of roles which has access to the function.
/// @param _target The address of target contract.
/// @param _selector The function selector to query.
/// @return _roles The list of roles.
function callableRoles(address _target, bytes4 _selector) external view returns (bytes32[] memory _roles) {
EnumerableSet.Bytes32Set storage _lists = targetAccess[_target][_selector];
_roles = new bytes32[](_lists.length());
for (uint256 i = 0; i < _roles.length; i++) {
_roles[i] = _lists.at(i);
}
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Perform a function call from arbitrary role.
/// @param _target The address of target contract.
/// @param _value The value passing to target contract.
/// @param _data The calldata passing to target contract.
/// @param _role The expected role of the caller.
function execute(
address _target,
uint256 _value,
bytes calldata _data,
bytes32 _role
) public payable onlyRole(_role) hasAccess(_target, bytes4(_data[0:4]), _role) {
_execute(_target, _value, _data);
}
// allow others to send ether to this contract.
receive() external payable {}
/************************
* Restricted Functions *
************************/
/// @notice Update the access to target contract.
/// @param _target The address of target contract.
/// @param _selectors The list of function selectors to update.
/// @param _role The role to change.
/// @param _status True if we are going to add the role, otherwise remove the role.
function updateAccess(
address _target,
bytes4[] memory _selectors,
bytes32 _role,
bool _status
) external onlyRole(DEFAULT_ADMIN_ROLE) {
if (_status) {
for (uint256 i = 0; i < _selectors.length; i++) {
targetAccess[_target][_selectors[i]].add(_role);
}
} else {
for (uint256 i = 0; i < _selectors.length; i++) {
targetAccess[_target][_selectors[i]].remove(_role);
}
}
}
/**********************
* Internal Functions *
**********************/
/// @dev Internal function to call contract. If the call reverted, the error will be popped up.
/// @param _target The address of target contract.
/// @param _value The value passing to target contract.
/// @param _data The calldata passing to target contract.
function _execute(
address _target,
uint256 _value,
bytes calldata _data
) internal {
// solhint-disable-next-line avoid-low-level-calls
(bool success, ) = address(_target).call{value: _value}(_data);
if (!success) {
// solhint-disable-next-line no-inline-assembly
assembly {
let ptr := mload(0x40)
let size := returndatasize()
returndatacopy(ptr, 0, size)
revert(ptr, size)
}
}
}
}

View File

@@ -1,116 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.16;
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
import {SafeCast} from "@openzeppelin/contracts/utils/math/SafeCast.sol";
import {IETHRateLimiter} from "./IETHRateLimiter.sol";
// solhint-disable func-name-mixedcase
// solhint-disable not-rely-on-time
contract ETHRateLimiter is Ownable, IETHRateLimiter {
/***********
* Structs *
***********/
struct TokenAmount {
// The timestamp when the amount is updated.
uint48 lastUpdateTs;
// The ETH limit in wei.
uint104 limit;
// The amount of ETH in current period.
uint104 amount;
}
/*************
* Constants *
*************/
/// @notice The period length in seconds.
/// @dev The time frame for the `k`-th period is `[periodDuration * k, periodDuration * (k + 1))`.
uint256 public immutable periodDuration;
/// @notice The address of ETH spender.
address public immutable spender;
/*************
* Variables *
*************/
/// @notice The token amount used in current period.
TokenAmount public currentPeriod;
/***************
* Constructor *
***************/
constructor(
uint256 _periodDuration,
address _spender,
uint104 _totalLimit
) {
if (_periodDuration == 0) {
revert PeriodIsZero();
}
if (_totalLimit == 0) {
revert TotalLimitIsZero();
}
periodDuration = _periodDuration;
spender = _spender;
currentPeriod.limit = _totalLimit;
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @inheritdoc IETHRateLimiter
function addUsedAmount(uint256 _amount) external override {
if (msg.sender != spender) {
revert CallerNotSpender();
}
if (_amount == 0) return;
uint256 _currentPeriodStart = (block.timestamp / periodDuration) * periodDuration;
// check total limit
uint256 _currentTotalAmount;
TokenAmount memory _currentPeriod = currentPeriod;
if (_currentPeriod.lastUpdateTs < _currentPeriodStart) {
_currentTotalAmount = _amount;
} else {
_currentTotalAmount = _currentPeriod.amount + _amount;
}
if (_currentTotalAmount > _currentPeriod.limit) {
revert ExceedTotalLimit();
}
_currentPeriod.lastUpdateTs = uint48(block.timestamp);
_currentPeriod.amount = SafeCast.toUint104(_currentTotalAmount);
currentPeriod = _currentPeriod;
}
/************************
* Restricted Functions *
************************/
/// @notice Update the total token amount limit.
/// @param _newTotalLimit The new total limit.
function updateTotalLimit(uint104 _newTotalLimit) external onlyOwner {
if (_newTotalLimit == 0) {
revert TotalLimitIsZero();
}
uint256 _oldTotalLimit = currentPeriod.limit;
currentPeriod.limit = _newTotalLimit;
emit UpdateTotalLimit(_oldTotalLimit, _newTotalLimit);
}
}

View File

@@ -1,38 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.16;
interface IETHRateLimiter {
/**********
* Events *
**********/
/// @notice Emitted when the total limit is updated.
/// @param oldTotalLimit The previous value of total limit before updating.
/// @param newTotalLimit The current value of total limit after updating.
event UpdateTotalLimit(uint256 oldTotalLimit, uint256 newTotalLimit);
/**********
* Errors *
**********/
/// @dev Thrown when the `periodDuration` is initialized to zero.
error PeriodIsZero();
/// @dev Thrown when the `totalAmount` is initialized to zero.
error TotalLimitIsZero();
/// @dev Thrown when an amount breaches the total limit in the period.
error ExceedTotalLimit();
/// @dev Thrown when the call is not spender.
error CallerNotSpender();
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Request some ETH usage for `sender`.
/// @param _amount The amount of ETH to use.
function addUsedAmount(uint256 _amount) external;
}

View File

@@ -1,38 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.16;
interface ITokenRateLimiter {
/**********
* Events *
**********/
/// @notice Emitted when the total limit is updated.
/// @param oldTotalLimit The previous value of total limit before updating.
/// @param newTotalLimit The current value of total limit after updating.
event UpdateTotalLimit(address indexed token, uint256 oldTotalLimit, uint256 newTotalLimit);
/**********
* Errors *
**********/
/// @dev Thrown when the `periodDuration` is initialized to zero.
error PeriodIsZero();
/// @dev Thrown when the `totalAmount` is initialized to zero.
/// @param token The address of the token.
error TotalLimitIsZero(address token);
/// @dev Thrown when an amount breaches the total limit in the period.
/// @param token The address of the token.
error ExceedTotalLimit(address token);
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Request some token usage for `sender`.
/// @param token The address of the token.
/// @param amount The amount of token to use.
function addUsedAmount(address token, uint256 amount) external;
}

View File

@@ -1,106 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.16;
import {AccessControlEnumerable} from "@openzeppelin/contracts/access/AccessControlEnumerable.sol";
import {SafeCast} from "@openzeppelin/contracts/utils/math/SafeCast.sol";
import {ITokenRateLimiter} from "./ITokenRateLimiter.sol";
// solhint-disable func-name-mixedcase
// solhint-disable not-rely-on-time
contract TokenRateLimiter is AccessControlEnumerable, ITokenRateLimiter {
/***********
* Structs *
***********/
struct TokenAmount {
// The timestamp when the amount is updated.
uint48 lastUpdateTs;
// The token limit.
uint104 limit;
// The amount of token in current period.
uint104 amount;
}
/*************
* Constants *
*************/
/// @notice The role for token spender.
bytes32 public constant TOKEN_SPENDER_ROLE = keccak256("TOKEN_SPENDER_ROLE");
/// @notice The period length in seconds.
/// @dev The time frame for the `k`-th period is `[periodDuration * k, periodDuration * (k + 1))`.
uint256 public immutable periodDuration;
/*************
* Variables *
*************/
/// @notice Mapping from token address to the total amounts used in current period and total token amount limit.
mapping(address => TokenAmount) public currentPeriod;
/// @dev The storage slots for future usage.
uint256[49] private __gap;
/***************
* Constructor *
***************/
constructor(uint256 _periodDuration) {
if (_periodDuration == 0) {
revert PeriodIsZero();
}
_setupRole(DEFAULT_ADMIN_ROLE, msg.sender);
periodDuration = _periodDuration;
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @inheritdoc ITokenRateLimiter
function addUsedAmount(address _token, uint256 _amount) external override onlyRole(TOKEN_SPENDER_ROLE) {
if (_amount == 0) return;
uint256 _currentPeriodStart = (block.timestamp / periodDuration) * periodDuration;
// check total limit, `0` means no limit at all.
uint256 _currentTotalAmount;
TokenAmount memory _currentPeriod = currentPeriod[_token];
if (_currentPeriod.lastUpdateTs < _currentPeriodStart) {
_currentTotalAmount = _amount;
} else {
_currentTotalAmount = _currentPeriod.amount + _amount;
}
if (_currentPeriod.limit != 0 && _currentTotalAmount > _currentPeriod.limit) {
revert ExceedTotalLimit(_token);
}
_currentPeriod.lastUpdateTs = uint48(block.timestamp);
_currentPeriod.amount = SafeCast.toUint104(_currentTotalAmount);
currentPeriod[_token] = _currentPeriod;
}
/************************
* Restricted Functions *
************************/
/// @notice Update the total token amount limit.
/// @param _newTotalLimit The new total limit.
function updateTotalLimit(address _token, uint104 _newTotalLimit) external onlyRole(DEFAULT_ADMIN_ROLE) {
if (_newTotalLimit == 0) {
revert TotalLimitIsZero(_token);
}
uint256 _oldTotalLimit = currentPeriod[_token].limit;
currentPeriod[_token].limit = _newTotalLimit;
emit UpdateTotalLimit(_token, _oldTotalLimit, _newTotalLimit);
}
}

View File

@@ -1,75 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.16;
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
import {ETHRateLimiter} from "../rate-limiter/ETHRateLimiter.sol";
import {IETHRateLimiter} from "../rate-limiter/IETHRateLimiter.sol";
contract ETHRateLimiterTest is DSTestPlus {
event UpdateTotalLimit(uint256 oldTotalLimit, uint256 newTotalLimit);
ETHRateLimiter private limiter;
function setUp() public {
hevm.warp(86400);
limiter = new ETHRateLimiter(86400, address(this), 100 ether);
}
function testUpdateTotalLimit(uint104 _newTotalLimit) external {
hevm.assume(_newTotalLimit > 0);
// not owner, revert
hevm.startPrank(address(1));
hevm.expectRevert("Ownable: caller is not the owner");
limiter.updateTotalLimit(_newTotalLimit);
hevm.stopPrank();
// zero revert
hevm.expectRevert(IETHRateLimiter.TotalLimitIsZero.selector);
limiter.updateTotalLimit(0);
// success
hevm.expectEmit(false, false, false, true);
emit UpdateTotalLimit(100 ether, _newTotalLimit);
limiter.updateTotalLimit(_newTotalLimit);
(, uint104 _totalLimit, ) = limiter.currentPeriod();
assertEq(_totalLimit, _newTotalLimit);
}
function testAddUsedAmount() external {
// non-spender, revert
hevm.startPrank(address(1));
hevm.expectRevert(IETHRateLimiter.CallerNotSpender.selector);
limiter.addUsedAmount(0);
hevm.stopPrank();
// exceed total limit on first call
hevm.expectRevert(IETHRateLimiter.ExceedTotalLimit.selector);
limiter.addUsedAmount(100 ether + 1);
_checkTotalCurrentPeriodAmountAmount(0);
// exceed total limit on second call
limiter.addUsedAmount(50 ether);
_checkTotalCurrentPeriodAmountAmount(50 ether);
hevm.expectRevert(IETHRateLimiter.ExceedTotalLimit.selector);
limiter.addUsedAmount(50 ether + 1);
_checkTotalCurrentPeriodAmountAmount(50 ether);
// one period passed
hevm.warp(86400 * 2);
limiter.addUsedAmount(1 ether);
_checkTotalCurrentPeriodAmountAmount(1 ether);
// exceed
hevm.expectRevert(IETHRateLimiter.ExceedTotalLimit.selector);
limiter.addUsedAmount(99 ether + 1);
_checkTotalCurrentPeriodAmountAmount(1 ether);
}
function _checkTotalCurrentPeriodAmountAmount(uint256 expected) internal {
(, , uint256 totalAmount) = limiter.currentPeriod();
assertEq(totalAmount, expected);
}
}

View File

@@ -42,26 +42,6 @@ contract L1ScrollMessengerTest is L1GatewayTestBase {
l1Messenger.relayMessageWithProof(address(this), address(messageQueue), 0, 0, new bytes(0), proof);
}
function testForbidCallRateLimiterFromL2() external {
l1Messenger.updateRateLimiter(address(1));
bytes32 _xDomainCalldataHash = keccak256(
abi.encodeWithSignature(
"relayMessage(address,address,uint256,uint256,bytes)",
address(this),
address(1),
0,
0,
new bytes(0)
)
);
prepareL2MessageRoot(_xDomainCalldataHash);
IL1ScrollMessenger.L2MessageProof memory proof;
proof.batchIndex = rollup.lastFinalizedBatchIndex();
hevm.expectRevert("Forbid to call rate limiter");
l1Messenger.relayMessageWithProof(address(this), address(1), 0, 0, new bytes(0), proof);
}
function testForbidCallSelfFromL2() external {
bytes32 _xDomainCalldataHash = keccak256(
abi.encodeWithSignature(

View File

@@ -51,15 +51,10 @@ contract L2ScrollMessengerTest is DSTestPlus {
}
function testForbidCallFromL1() external {
l2Messenger.updateRateLimiter(address(1));
hevm.startPrank(AddressAliasHelper.applyL1ToL2Alias(address(l1Messenger)));
hevm.expectRevert("Forbid to call message queue");
l2Messenger.relayMessage(address(this), address(l2MessageQueue), 0, 0, new bytes(0));
hevm.expectRevert("Forbid to call rate limiter");
l2Messenger.relayMessage(address(this), address(1), 0, 0, new bytes(0));
hevm.expectRevert("Forbid to call self");
l2Messenger.relayMessage(address(this), address(l2Messenger), 0, 0, new bytes(0));
hevm.stopPrank();

View File

@@ -1,87 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
import {ScrollOwner} from "../misc/ScrollOwner.sol";
contract ScrollOwnerTest is DSTestPlus {
event Call();
ScrollOwner private owner;
function setUp() public {
owner = new ScrollOwner();
}
function testUpdateAccess() external {
// not admin, evert
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0x0000000000000000000000000000000000000000000000000000000000000000"
);
owner.updateAccess(address(0), new bytes4[](0), bytes32(0), true);
hevm.stopPrank();
bytes4[] memory _selectors;
bytes32[] memory _roles;
// add access then remove access
_roles = owner.callableRoles(address(this), ScrollOwnerTest.revertOnCall.selector);
assertEq(0, _roles.length);
_selectors = new bytes4[](1);
_selectors[0] = ScrollOwnerTest.revertOnCall.selector;
owner.updateAccess(address(this), _selectors, bytes32(uint256(1)), true);
_roles = owner.callableRoles(address(this), ScrollOwnerTest.revertOnCall.selector);
assertEq(1, _roles.length);
assertEq(_roles[0], bytes32(uint256(1)));
owner.updateAccess(address(this), _selectors, bytes32(uint256(1)), false);
_roles = owner.callableRoles(address(this), ScrollOwnerTest.revertOnCall.selector);
assertEq(0, _roles.length);
}
function testAdminExecute() external {
// call with revert
hevm.expectRevert("Called");
owner.execute(address(this), 0, abi.encodeWithSelector(ScrollOwnerTest.revertOnCall.selector), bytes32(0));
// call with emit
hevm.expectEmit(false, false, false, true);
emit Call();
owner.execute(address(this), 0, abi.encodeWithSelector(ScrollOwnerTest.emitOnCall.selector), bytes32(0));
}
function testExecute(bytes32 _role) external {
hevm.assume(_role != bytes32(0));
bytes4[] memory _selectors = new bytes4[](2);
_selectors[0] = ScrollOwnerTest.revertOnCall.selector;
_selectors[1] = ScrollOwnerTest.emitOnCall.selector;
owner.grantRole(_role, address(this));
// no access, revert
hevm.expectRevert("no access");
owner.execute(address(this), 0, abi.encodeWithSelector(ScrollOwnerTest.revertOnCall.selector), _role);
owner.updateAccess(address(this), _selectors, _role, true);
// call with revert
hevm.expectRevert("Called");
owner.execute(address(this), 0, abi.encodeWithSelector(ScrollOwnerTest.revertOnCall.selector), _role);
// call with emit
hevm.expectEmit(false, false, false, true);
emit Call();
owner.execute(address(this), 0, abi.encodeWithSelector(ScrollOwnerTest.emitOnCall.selector), _role);
}
function revertOnCall() external pure {
revert("Called");
}
function emitOnCall() external {
emit Call();
}
}

View File

@@ -1,82 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.16;
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
import {TokenRateLimiter} from "../rate-limiter/TokenRateLimiter.sol";
import {ITokenRateLimiter} from "../rate-limiter/ITokenRateLimiter.sol";
contract TokenRateLimiterTest is DSTestPlus {
event UpdateTotalLimit(address indexed token, uint256 oldTotalLimit, uint256 newTotalLimit);
TokenRateLimiter private limiter;
function setUp() public {
hevm.warp(86400);
limiter = new TokenRateLimiter(86400);
}
function testUpdateTotalLimit(address _token, uint104 _newTotalLimit) external {
hevm.assume(_newTotalLimit > 0);
// not admin, revert
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0x0000000000000000000000000000000000000000000000000000000000000000"
);
limiter.updateTotalLimit(_token, _newTotalLimit);
hevm.stopPrank();
// zero revert
hevm.expectRevert(abi.encodeWithSelector(ITokenRateLimiter.TotalLimitIsZero.selector, _token));
limiter.updateTotalLimit(_token, 0);
// success
hevm.expectEmit(true, false, false, true);
emit UpdateTotalLimit(_token, 0 ether, _newTotalLimit);
limiter.updateTotalLimit(_token, _newTotalLimit);
(, uint104 _totalLimit, ) = limiter.currentPeriod(_token);
assertEq(_totalLimit, _newTotalLimit);
}
function testAddUsedAmount(address _token) external {
// non-spender, revert
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0x267f05081a073059ae452e6ac77ec189636e43e41051d4c3ec760734b3d173cb"
);
limiter.addUsedAmount(_token, 0);
hevm.stopPrank();
limiter.grantRole(bytes32(0x267f05081a073059ae452e6ac77ec189636e43e41051d4c3ec760734b3d173cb), address(this));
limiter.updateTotalLimit(_token, 100 ether);
// exceed total limit on first call
hevm.expectRevert(abi.encodeWithSelector(ITokenRateLimiter.ExceedTotalLimit.selector, _token));
limiter.addUsedAmount(_token, 100 ether + 1);
_checkTotalCurrentPeriodAmountAmount(_token, 0);
// exceed total limit on second call
limiter.addUsedAmount(_token, 50 ether);
_checkTotalCurrentPeriodAmountAmount(_token, 50 ether);
hevm.expectRevert(abi.encodeWithSelector(ITokenRateLimiter.ExceedTotalLimit.selector, _token));
limiter.addUsedAmount(_token, 50 ether + 1);
_checkTotalCurrentPeriodAmountAmount(_token, 50 ether);
// one period passed
hevm.warp(86400 * 2);
limiter.addUsedAmount(_token, 1 ether);
_checkTotalCurrentPeriodAmountAmount(_token, 1 ether);
// exceed
hevm.expectRevert(abi.encodeWithSelector(ITokenRateLimiter.ExceedTotalLimit.selector, _token));
limiter.addUsedAmount(_token, 99 ether + 1);
_checkTotalCurrentPeriodAmountAmount(_token, 1 ether);
}
function _checkTotalCurrentPeriodAmountAmount(address token, uint256 expected) internal {
(, , uint256 totalAmount) = limiter.currentPeriod(token);
assertEq(totalAmount, expected);
}
}

View File

@@ -5,28 +5,38 @@ IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock.new | cut -d "#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock.new | cut -d "#" -f2 | cut -c-7)
OLD_ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock.old | cut -d "#" -f2 | cut -c-7)
OLD_HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock.old | cut -d "#" -f2 | cut -c-7)
else
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock.new | cut -d "\#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock.new | cut -d "\#" -f2 | cut -c-7)
OLD_ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock.old | cut -d "\#" -f2 | cut -c-7)
OLD_HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock.old | cut -d "\#" -f2 | cut -c-7)
endif
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
OLD_ZK_VERSION=${OLD_ZKEVM_VERSION}-${OLD_HALO2_VERSION}
pre-upgrade-zk:
cd ../common/libzkp/impl && cp Cargo.lock.old Cargo.lock && cargo clean && cargo build --release && cp ./target/release/libzkp.so ../interface/liboldzkp.so
cp -r ../common/libzkp/interface ./internal/logic/old_verifier/lib && rm ../common/libzkp/interface/liboldzkp.so
find ../common | grep libzktrie.so | xargs -I{} cp {} ./internal/logic/old_verifier/lib/liboldzktrie.so
test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
libzkp:
cd ../common/libzkp/impl && cargo clean && cargo build --release && cp ./target/release/libzkp.so ../interface/
rm -rf ./internal/logic/verifier/lib && cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
libzkp: pre-upgrade-zk
cd ../common/libzkp/impl && cp Cargo.lock.new Cargo.lock && cargo clean && cargo build --release && cp ./target/release/libzkp.so ../interface/
cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
find ../common | grep libzktrie.so | xargs -I{} cp {} ./internal/logic/verifier/lib
coordinator: libzkp ## Builds the Coordinator instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION} -X scroll-tech/common/version.OldZkVersion=${OLD_ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
coordinator_skip_libzkp:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION} -X scroll-tech/common/version.OldZkVersion=${OLD_ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
mock_coordinator: ## Builds the mocked Coordinator instance.
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator ./cmd

View File

@@ -71,11 +71,6 @@ func action(ctx *cli.Context) error {
apiSrv := apiServer(ctx, cfg, db, registry)
log.Info(
"coordinator start successfully",
"version", version.Version,
)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)

View File

@@ -81,12 +81,11 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
}
// Reset prover manager config for manager test cases.
cfg.ProverManager = &coordinatorConfig.ProverManager{
ProversPerSession: 1,
Verifier: &coordinatorConfig.VerifierConfig{MockMode: true},
BatchCollectionTimeSec: 60,
ChunkCollectionTimeSec: 60,
SessionAttempts: 10,
MaxVerifierWorkers: 4,
ProversPerSession: 1,
Verifier: &coordinatorConfig.VerifierConfig{MockMode: true},
CollectionTimeSec: 60,
SessionAttempts: 10,
MaxVerifierWorkers: 4,
}
cfg.DB.DSN = base.DBImg.Endpoint()
cfg.L2.ChainID = 111

View File

@@ -2,13 +2,17 @@
"prover_manager": {
"provers_per_session": 1,
"session_attempts": 5,
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"collection_time_sec": 180,
"verifier": {
"mock_mode": true,
"params_path": "",
"assets_path": ""
},
"old_verifier": {
"mock_mode": true,
"params_path": "",
"assets_path": ""
},
"max_verifier_workers": 4
},
"db": {

View File

@@ -17,10 +17,10 @@ type ProverManager struct {
SessionAttempts uint8 `json:"session_attempts"`
// Zk verifier config.
Verifier *VerifierConfig `json:"verifier"`
// BatchCollectionTimeSec batch Proof collection time (in seconds).
BatchCollectionTimeSec int `json:"batch_collection_time_sec"`
// ChunkCollectionTimeSec chunk Proof collection time (in seconds).
ChunkCollectionTimeSec int `json:"chunk_collection_time_sec"`
// Old Zk version config
OldVerifier *VerifierConfig `json:"old_verifier"`
// Proof collection time (in seconds).
CollectionTimeSec int `json:"collection_time_sec"`
// Max number of workers in verifier worker pool
MaxVerifierWorkers int `json:"max_verifier_workers"`
}

View File

@@ -15,8 +15,7 @@ func TestConfig(t *testing.T) {
"prover_manager": {
"provers_per_session": 1,
"session_attempts": 5,
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"collection_time_sec": 180,
"verifier": {
"mock_mode": true,
"params_path": "",

View File

@@ -7,7 +7,6 @@ import (
"gorm.io/gorm"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/verifier"
)
var (
@@ -26,14 +25,9 @@ var (
// InitController inits Controller with database
func InitController(cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) {
initControllerOnce.Do(func() {
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier)
if err != nil {
panic("proof receiver new verifier failure")
}
Auth = NewAuthController(db)
HealthCheck = NewHealthCheckController()
GetTask = NewGetTaskController(cfg, db, vf, reg)
SubmitProof = NewSubmitProofController(cfg, db, vf, reg)
GetTask = NewGetTaskController(cfg, db, reg)
SubmitProof = NewSubmitProofController(cfg, db, reg)
})
}

View File

@@ -13,7 +13,6 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/provertask"
"scroll-tech/coordinator/internal/logic/verifier"
coordinatorType "scroll-tech/coordinator/internal/types"
)
@@ -23,9 +22,9 @@ type GetTaskController struct {
}
// NewGetTaskController create a get prover task controller
func NewGetTaskController(cfg *config.Config, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, db, vf.ChunkVK, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, db, vf.BatchVK, reg)
func NewGetTaskController(cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, db, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, db, reg)
ptc := &GetTaskController{
proverTasks: make(map[message.ProofType]provertask.ProverTask),
@@ -41,7 +40,7 @@ func NewGetTaskController(cfg *config.Config, db *gorm.DB, vf *verifier.Verifier
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
var getTaskParameter coordinatorType.GetTaskParameter
if err := ctx.ShouldBind(&getTaskParameter); err != nil {
nerr := fmt.Errorf("prover task parameter invalid, err:%w", err)
nerr := fmt.Errorf("prover tasks parameter invalid, err:%w", err)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
@@ -49,7 +48,7 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
proofType := ptc.proofType(&getTaskParameter)
proverTask, isExist := ptc.proverTasks[proofType]
if !isExist {
nerr := fmt.Errorf("parameter wrong proof type:%v", proofType)
nerr := fmt.Errorf("parameter wrong proof type")
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}

View File

@@ -13,8 +13,7 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/submitproof"
"scroll-tech/coordinator/internal/logic/verifier"
coordinatorType "scroll-tech/coordinator/internal/types"
coodinatorType "scroll-tech/coordinator/internal/types"
)
// SubmitProofController the submit proof api controller
@@ -23,18 +22,18 @@ type SubmitProofController struct {
}
// NewSubmitProofController create the submit proof api controller instance
func NewSubmitProofController(cfg *config.Config, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *SubmitProofController {
func NewSubmitProofController(cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) *SubmitProofController {
return &SubmitProofController{
submitProofReceiverLogic: submitproof.NewSubmitProofReceiverLogic(cfg.ProverManager, db, vf, reg),
submitProofReceiverLogic: submitproof.NewSubmitProofReceiverLogic(cfg.ProverManager, db, reg),
}
}
// SubmitProof prover submit the proof to coordinator
func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
var spp coordinatorType.SubmitProofParameter
var spp coodinatorType.SubmitProofParameter
if err := ctx.ShouldBind(&spp); err != nil {
nerr := fmt.Errorf("parameter invalid, err:%w", err)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
@@ -46,31 +45,29 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
},
}
if spp.Status == int(message.StatusOk) {
switch message.ProofType(spp.TaskType) {
case message.ProofTypeChunk:
var tmpChunkProof message.ChunkProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
proofMsg.ChunkProof = &tmpChunkProof
case message.ProofTypeBatch:
var tmpBatchProof message.BatchProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
proofMsg.BatchProof = &tmpBatchProof
switch message.ProofType(spp.TaskType) {
case message.ProofTypeChunk:
var tmpChunkProof message.ChunkProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
proofMsg.ChunkProof = &tmpChunkProof
case message.ProofTypeBatch:
var tmpBatchProof message.BatchProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
proofMsg.BatchProof = &tmpBatchProof
}
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg, spp); err != nil {
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg); err != nil {
nerr := fmt.Errorf("handle zk proof failure, err:%w", err)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr, nil)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr, nil)
return
}
coordinatorType.RenderJSON(ctx, types.Success, nil, nil)
coodinatorType.RenderJSON(ctx, types.Success, nil, nil)
}

View File

@@ -29,11 +29,8 @@ type Collector struct {
chunkOrm *orm.Chunk
batchOrm *orm.Batch
timeoutBatchCheckerRunTotal prometheus.Counter
batchProverTaskTimeoutTotal prometheus.Counter
timeoutChunkCheckerRunTotal prometheus.Counter
chunkProverTaskTimeoutTotal prometheus.Counter
checkBatchAllChunkReadyRunTotal prometheus.Counter
timeoutCheckerRunTotal prometheus.Counter
proverTaskTimeoutTotal prometheus.Counter
}
// NewCollector create a collector to cron collect the data to send to prover
@@ -47,31 +44,17 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
timeoutBatchCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_batch_timeout_checker_run_total",
Help: "Total number of batch timeout checker run.",
timeoutCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_timeout_checker_run_total",
Help: "Total number of timeout checker run.",
}),
batchProverTaskTimeoutTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_batch_prover_task_timeout_total",
Help: "Total number of batch timeout prover task.",
}),
timeoutChunkCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_chunk_timeout_checker_run_total",
Help: "Total number of chunk timeout checker run.",
}),
chunkProverTaskTimeoutTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_chunk_prover_task_timeout_total",
Help: "Total number of chunk timeout prover task.",
}),
checkBatchAllChunkReadyRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_check_batch_all_chunk_ready_run_total",
Help: "Total number of check batch all chunks ready total",
proverTaskTimeoutTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_prover_task_timeout_total",
Help: "Total number of timeout prover task.",
}),
}
go c.timeoutBatchProofTask()
go c.timeoutChunkProofTask()
go c.checkBatchAllChunkReady()
go c.timeoutProofTask()
log.Info("Start coordinator successfully.")
@@ -85,10 +68,11 @@ func (c *Collector) Stop() {
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
// chunk/batch task to unassigned. then the batch/chunk collector can retry it.
func (c *Collector) timeoutBatchProofTask() {
func (c *Collector) timeoutProofTask() {
defer func() {
if err := recover(); err != nil {
nerr := fmt.Errorf("timeout batch proof task panic error:%v", err)
nerr := fmt.Errorf("timeout proof task panic error:%v", err)
log.Warn(nerr.Error())
}
}()
@@ -97,149 +81,52 @@ func (c *Collector) timeoutBatchProofTask() {
for {
select {
case <-ticker.C:
c.timeoutBatchCheckerRunTotal.Inc()
timeout := time.Duration(c.cfg.ProverManager.BatchCollectionTimeSec) * time.Second
assignedProverTasks, err := c.proverTaskOrm.GetTimeoutAssignedProverTasks(c.ctx, 10, message.ProofTypeBatch, timeout)
c.timeoutCheckerRunTotal.Inc()
timeout := time.Duration(c.cfg.ProverManager.CollectionTimeSec) * time.Second
assignedProverTasks, err := c.proverTaskOrm.GetTimeoutAssignedProverTasks(c.ctx, 10, timeout)
if err != nil {
log.Error("get unassigned session info failure", "error", err)
break
}
c.check(assignedProverTasks, c.batchProverTaskTimeoutTotal)
case <-c.ctx.Done():
if c.ctx.Err() != nil {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
return
}
}
}
func (c *Collector) timeoutChunkProofTask() {
defer func() {
if err := recover(); err != nil {
nerr := fmt.Errorf("timeout proof chunk task panic error:%v", err)
log.Warn(nerr.Error())
}
}()
// here not update the block batch proving status failed, because the collector loop will check
// the attempt times. if reach the times, the collector will set the block batch proving status.
for _, assignedProverTask := range assignedProverTasks {
c.proverTaskTimeoutTotal.Inc()
log.Warn("proof task have reach the timeout", "task id", assignedProverTask.TaskID,
"prover public key", assignedProverTask.ProverPublicKey, "prover name", assignedProverTask.ProverName, "task type", assignedProverTask.TaskType)
err = c.db.Transaction(func(tx *gorm.DB) error {
// update prover task proving status as ProverProofInvalid
if err = c.proverTaskOrm.UpdateProverTaskProvingStatus(c.ctx, message.ProofType(assignedProverTask.TaskType),
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverProofInvalid, tx); err != nil {
log.Error("update prover task proving status failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
ticker := time.NewTicker(time.Second * 2)
for {
select {
case <-ticker.C:
c.timeoutChunkCheckerRunTotal.Inc()
timeout := time.Duration(c.cfg.ProverManager.ChunkCollectionTimeSec) * time.Second
assignedProverTasks, err := c.proverTaskOrm.GetTimeoutAssignedProverTasks(c.ctx, 10, message.ProofTypeChunk, timeout)
if err != nil {
log.Error("get unassigned session info failure", "error", err)
break
}
c.check(assignedProverTasks, c.chunkProverTaskTimeoutTotal)
// update prover task failure type
if err = c.proverTaskOrm.UpdateProverTaskFailureType(c.ctx, message.ProofType(assignedProverTask.TaskType),
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverTaskFailureTypeTimeout, tx); err != nil {
log.Error("update prover task failure type failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
case <-c.ctx.Done():
if c.ctx.Err() != nil {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
return
}
}
}
func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout prometheus.Counter) {
// here not update the block batch proving status failed, because the collector loop will check
// the attempt times. if reach the times, the collector will set the block batch proving status.
for _, assignedProverTask := range assignedProverTasks {
if c.proverTaskOrm.TaskTimeoutMoreThanOnce(c.ctx, message.ProofType(assignedProverTask.TaskType), assignedProverTask.TaskID) {
log.Warn("Task timeout more than once", "taskType", message.ProofType(assignedProverTask.TaskType).String(), "hash", assignedProverTask.TaskID)
}
timeout.Inc()
log.Warn("proof task have reach the timeout", "task id", assignedProverTask.TaskID,
"prover public key", assignedProverTask.ProverPublicKey, "prover name", assignedProverTask.ProverName, "task type", assignedProverTask.TaskType)
err := c.db.Transaction(func(tx *gorm.DB) error {
// update prover task proving status as ProverProofInvalid
if err := c.proverTaskOrm.UpdateProverTaskProvingStatus(c.ctx, message.ProofType(assignedProverTask.TaskType),
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverProofInvalid, tx); err != nil {
log.Error("update prover task proving status failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
// update prover task failure type
if err := c.proverTaskOrm.UpdateProverTaskFailureType(c.ctx, message.ProofType(assignedProverTask.TaskType),
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverTaskFailureTypeTimeout, tx); err != nil {
log.Error("update prover task failure type failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
// update the task to unassigned, let collector restart it
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeChunk {
if err := c.chunkOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
log.Error("update chunk proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
}
}
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeBatch {
if err := c.batchOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
log.Error("update batch proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
}
}
return nil
})
if err != nil {
log.Error("check task proof is timeout failure", "error", err)
}
}
}
func (c *Collector) checkBatchAllChunkReady() {
defer func() {
if err := recover(); err != nil {
nerr := fmt.Errorf("check batch all chunk ready panic error:%v", err)
log.Warn(nerr.Error())
}
}()
ticker := time.NewTicker(time.Second * 10)
for {
select {
case <-ticker.C:
c.checkBatchAllChunkReadyRunTotal.Inc()
page := 1
pageSize := 50
for {
offset := (page - 1) * pageSize
batches, err := c.batchOrm.GetUnassignedAndChunksUnreadyBatches(c.ctx, offset, pageSize)
// update the task to unassigned, let collector restart it
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeChunk {
if err = c.chunkOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
log.Error("update chunk proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
}
}
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeBatch {
if err = c.batchOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
log.Error("update batch proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
}
}
return nil
})
if err != nil {
log.Warn("checkBatchAllChunkReady GetUnassignedAndChunksUnreadyBatches", "error", err)
break
log.Error("check task proof is timeout failure", "error", err)
}
for _, batch := range batches {
allReady, checkErr := c.chunkOrm.CheckIfBatchChunkProofsAreReady(c.ctx, batch.Hash)
if checkErr != nil {
log.Warn("checkBatchAllChunkReady CheckIfBatchChunkProofsAreReady failure", "error", checkErr, "hash", batch.Hash)
continue
}
if !allReady {
continue
}
if updateErr := c.batchOrm.UpdateChunkProofsStatusByBatchHash(c.ctx, batch.Hash, types.ChunkProofsStatusReady); updateErr != nil {
log.Warn("checkBatchAllChunkReady UpdateChunkProofsStatusByBatchHash failure", "error", checkErr, "hash", batch.Hash)
}
}
if len(batches) < pageSize {
break
}
page++
}
case <-c.ctx.Done():
if c.ctx.Err() != nil {
log.Error("manager context canceled with error", "error", c.ctx.Err())

View File

@@ -0,0 +1,35 @@
//go:build mock_verifier
package old_verifier
import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
)
const InvalidTestProof = "this is a invalid proof"
// OldVerifier represents a mock halo2 verifier.
type OldVerifier struct{}
// NewVerifier Sets up a mock verifier.
func NewOldVerifier(_ *config.VerifierConfig) (*OldVerifier, error) {
return &OldVerifier{}, nil
}
// VerifyChunkProof return a mock verification result for a ChunkProof.
func (v *OldVerifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
if string(proof.Proof) == InvalidTestProof {
return false, nil
}
return true, nil
}
// VerifyBatchProof return a mock verification result for a BatchProof.
func (v *OldVerifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
if string(proof.Proof) == InvalidTestProof {
return false, nil
}
return true, nil
}

Some files were not shown because too many files have changed in this diff Show More