mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-13 07:57:58 -05:00
Compare commits
44 Commits
prealpha-v
...
alpha-v1.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3849d1bcc9 | ||
|
|
f33bfffd85 | ||
|
|
4ad17468d0 | ||
|
|
fbcabcc5e2 | ||
|
|
eb3f187926 | ||
|
|
d5f0218f5f | ||
|
|
5fdd2c609c | ||
|
|
d9bc0842cc | ||
|
|
0e88b9aa94 | ||
|
|
33a912e7c1 | ||
|
|
e48e76acdf | ||
|
|
f5d02175f8 | ||
|
|
bb76a00613 | ||
|
|
41d71fc274 | ||
|
|
02ea14d721 | ||
|
|
ea9c1c6776 | ||
|
|
16576b6f53 | ||
|
|
aa885f068f | ||
|
|
1f764a579d | ||
|
|
91ee767669 | ||
|
|
7eac41691e | ||
|
|
d9516890b0 | ||
|
|
ddb96bb732 | ||
|
|
e419dd8d5c | ||
|
|
c99c65bdfd | ||
|
|
18fd7f56a8 | ||
|
|
a319dc1cff | ||
|
|
52bf3a55fc | ||
|
|
598e10e4fc | ||
|
|
eed3f42731 | ||
|
|
5a4bea8ccd | ||
|
|
5b37b63d89 | ||
|
|
5e5c4f7701 | ||
|
|
09dc638652 | ||
|
|
b598a01e7f | ||
|
|
0fcdb6f824 | ||
|
|
5a95dcf5ba | ||
|
|
d0c63e75df | ||
|
|
676b8a2230 | ||
|
|
a1cb3d3b87 | ||
|
|
47b4c54e05 | ||
|
|
fe822a65b9 | ||
|
|
9bd4931f93 | ||
|
|
411cb19b62 |
7
.github/pull_request_template.md
vendored
Normal file
7
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
1. Purpose or design rationale of this PR
|
||||
|
||||
|
||||
2. Does this PR involve a new deployment, and involve a new git tag & docker image tag? If so, has `tag` in `common/version.go` been updated?
|
||||
|
||||
|
||||
3. Is this PR a breaking change? If so, have it been attached a `breaking-change` label?
|
||||
2
.github/workflows/common.yml
vendored
2
.github/workflows/common.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2022-08-23
|
||||
toolchain: nightly-2022-12-10
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
|
||||
2
.github/workflows/coordinator.yml
vendored
2
.github/workflows/coordinator.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2022-08-23
|
||||
toolchain: nightly-2022-12-10
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
|
||||
4
.github/workflows/roller.yml
vendored
4
.github/workflows/roller.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2022-08-23
|
||||
toolchain: nightly-2022-12-10
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
@@ -42,6 +42,8 @@ jobs:
|
||||
- name: Test
|
||||
run: |
|
||||
make roller
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./prover/lib
|
||||
export CHAIN_ID=534353
|
||||
go test -v ./...
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -3,6 +3,7 @@ assets/params*
|
||||
assets/seed
|
||||
coverage.txt
|
||||
build/bin
|
||||
*.integration.txt
|
||||
|
||||
# misc
|
||||
sftp-config.json
|
||||
|
||||
40
Jenkinsfile
vendored
40
Jenkinsfile
vendored
@@ -13,22 +13,12 @@ pipeline {
|
||||
environment {
|
||||
GO111MODULE = 'on'
|
||||
PATH="/home/ubuntu/.cargo/bin:$PATH"
|
||||
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:./coordinator/verifier/lib"
|
||||
CHAIN_ID='534353'
|
||||
// LOG_DOCKER = 'true'
|
||||
}
|
||||
stages {
|
||||
stage('Build') {
|
||||
when {
|
||||
anyOf {
|
||||
changeset "Jenkinsfile"
|
||||
changeset "build/**"
|
||||
changeset "go.work**"
|
||||
changeset "bridge/**"
|
||||
changeset "coordinator/**"
|
||||
changeset "common/**"
|
||||
changeset "database/**"
|
||||
changeset "tests/**"
|
||||
}
|
||||
}
|
||||
parallel {
|
||||
stage('Build Prerequisite') {
|
||||
steps {
|
||||
@@ -70,18 +60,6 @@ pipeline {
|
||||
}
|
||||
}
|
||||
stage('Parallel Test') {
|
||||
when {
|
||||
anyOf {
|
||||
changeset "Jenkinsfile"
|
||||
changeset "build/**"
|
||||
changeset "go.work**"
|
||||
changeset "bridge/**"
|
||||
changeset "coordinator/**"
|
||||
changeset "common/**"
|
||||
changeset "database/**"
|
||||
changeset "tests/**"
|
||||
}
|
||||
}
|
||||
parallel{
|
||||
stage('Test bridge package') {
|
||||
steps {
|
||||
@@ -126,24 +104,12 @@ pipeline {
|
||||
}
|
||||
}
|
||||
stage('Compare Coverage') {
|
||||
when {
|
||||
anyOf {
|
||||
changeset "Jenkinsfile"
|
||||
changeset "build/**"
|
||||
changeset "go.work**"
|
||||
changeset "bridge/**"
|
||||
changeset "coordinator/**"
|
||||
changeset "common/**"
|
||||
changeset "database/**"
|
||||
changeset "tests/**"
|
||||
}
|
||||
}
|
||||
steps {
|
||||
sh "./build/post-test-report-coverage.sh"
|
||||
script {
|
||||
currentBuild.result = 'SUCCESS'
|
||||
}
|
||||
step([$class: 'CompareCoverageAction', publishResultAs: 'statusCheck', scmVars: [GIT_URL: env.GIT_URL]])
|
||||
step([$class: 'CompareCoverageAction', publishResultAs: 'Comment', scmVars: [GIT_URL: env.GIT_URL]])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
12
Makefile
12
Makefile
@@ -1,5 +1,7 @@
|
||||
.PHONY: check update dev_docker clean
|
||||
|
||||
ZKP_VERSION=release-1220
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
|
||||
@@ -29,5 +31,15 @@ dev_docker: ## build docker images for development/testing usages
|
||||
docker build -t scroll_l1geth ./common/docker/l1geth/
|
||||
docker build -t scroll_l2geth ./common/docker/l2geth/
|
||||
|
||||
test_zkp: ## Test zkp prove and verify, roller/prover generates the proof and coordinator/verifier verifies it
|
||||
mkdir -p test_params
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params19 -O ./test_params/params19
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_params/params26 -O ./test_params/params26
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/${ZKP_VERSION}/test_seed -O test_seed
|
||||
rm -rf ./roller/assets/test_params && mv test_params ./roller/assets/ && mv test_seed ./roller/assets/
|
||||
cd ./roller && make test-gpu-prover
|
||||
rm -rf ./coordinator/assets/test_params && mv ./roller/assets/test_params ./coordinator/assets/ && mv ./roller/assets/agg_proof ./coordinator/assets/
|
||||
cd ./coordinator && make test-gpu-verifier
|
||||
|
||||
clean: ## Empty out the bin folder
|
||||
@rm -rf build/bin
|
||||
|
||||
@@ -9,20 +9,20 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// SENT_MESSAGE_EVENT_SIGNATURE = keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
|
||||
SENT_MESSAGE_EVENT_SIGNATURE = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
|
||||
// SentMessageEventSignature = keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
|
||||
SentMessageEventSignature = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
|
||||
|
||||
// RELAYED_MESSAGE_EVENT_SIGNATURE = keccak256("RelayedMessage(bytes32)")
|
||||
RELAYED_MESSAGE_EVENT_SIGNATURE = "4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"
|
||||
// RelayedMessageEventSignature = keccak256("RelayedMessage(bytes32)")
|
||||
RelayedMessageEventSignature = "4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"
|
||||
|
||||
// FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE = keccak256("FailedRelayedMessage(bytes32)")
|
||||
FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE = "99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"
|
||||
// FailedRelayedMessageEventSignature = keccak256("FailedRelayedMessage(bytes32)")
|
||||
FailedRelayedMessageEventSignature = "99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"
|
||||
|
||||
// COMMIT_BATCH_EVENT_SIGNATURE = keccak256("CommitBatch(bytes32,bytes32,uint256,bytes32)")
|
||||
COMMIT_BATCH_EVENT_SIGNATURE = "a26d4bd91c4c2eff3b1bf542129607d782506fc1950acfab1472a20d28c06596"
|
||||
// CommitBatchEventSignature = keccak256("CommitBatch(bytes32,bytes32,uint256,bytes32)")
|
||||
CommitBatchEventSignature = "a26d4bd91c4c2eff3b1bf542129607d782506fc1950acfab1472a20d28c06596"
|
||||
|
||||
// FINALIZED_BATCH_EVENT_SIGNATURE = keccak256("FinalizeBatch(bytes32,bytes32,uint256,bytes32)")
|
||||
FINALIZED_BATCH_EVENT_SIGNATURE = "e20f311a96205960de4d2bb351f7729e5136fa36ae64d7f736c67ddc4ca4cd4b"
|
||||
// FinalizedBatchEventSignature = keccak256("FinalizeBatch(bytes32,bytes32,uint256,bytes32)")
|
||||
FinalizedBatchEventSignature = "e20f311a96205960de4d2bb351f7729e5136fa36ae64d7f736c67ddc4ca4cd4b"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
@@ -49,7 +51,10 @@ func action(ctx *cli.Context) error {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
// init db connection
|
||||
// Start metrics server.
|
||||
metrics.Serve(context.Background(), ctx)
|
||||
|
||||
// Init db connection.
|
||||
var ormFactory database.OrmFactory
|
||||
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"l1_config": {
|
||||
"confirmations": 6,
|
||||
"confirmations": "0x6",
|
||||
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
|
||||
"l1_messenger_address": "0x0000000000000000000000000000000000000000",
|
||||
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
@@ -11,11 +11,11 @@
|
||||
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
|
||||
"check_pending_time": 3,
|
||||
"escalate_blocks": 100,
|
||||
"confirmations": 1,
|
||||
"confirmations": "0x1",
|
||||
"escalate_multiple_num": 11,
|
||||
"escalate_multiple_den": 10,
|
||||
"max_gas_price": 10000000000,
|
||||
"tx_type": "AccessListTx",
|
||||
"tx_type": "LegacyTx",
|
||||
"min_balance": 100000000000000000000
|
||||
},
|
||||
"message_sender_private_keys": [
|
||||
@@ -24,7 +24,7 @@
|
||||
}
|
||||
},
|
||||
"l2_config": {
|
||||
"confirmations": 1,
|
||||
"confirmations": "0x1",
|
||||
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
|
||||
"l2_messenger_address": "0x0000000000000000000000000000000000000000",
|
||||
"relayer_config": {
|
||||
@@ -34,11 +34,11 @@
|
||||
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
|
||||
"check_pending_time": 10,
|
||||
"escalate_blocks": 100,
|
||||
"confirmations": 6,
|
||||
"confirmations": "0x6",
|
||||
"escalate_multiple_num": 11,
|
||||
"escalate_multiple_den": 10,
|
||||
"max_gas_price": 10000000000,
|
||||
"tx_type": "DynamicFeeTx",
|
||||
"tx_type": "LegacyTx",
|
||||
"min_balance": 100000000000000000000
|
||||
},
|
||||
"message_sender_private_keys": [
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
package config
|
||||
|
||||
import "github.com/scroll-tech/go-ethereum/common"
|
||||
import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// L1Config loads l1eth configuration items.
|
||||
type L1Config struct {
|
||||
// Confirmations block height confirmations number.
|
||||
Confirmations uint64 `json:"confirmations"`
|
||||
Confirmations rpc.BlockNumber `json:"confirmations"`
|
||||
// l1 eth node url.
|
||||
Endpoint string `json:"endpoint"`
|
||||
// The start height to sync event from layer 1
|
||||
|
||||
@@ -3,13 +3,15 @@ package config
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
)
|
||||
|
||||
// L2Config loads l2geth configuration items.
|
||||
type L2Config struct {
|
||||
// Confirmations block height confirmations number.
|
||||
Confirmations uint64 `json:"confirmations"`
|
||||
Confirmations rpc.BlockNumber `json:"confirmations"`
|
||||
// l2geth node url.
|
||||
Endpoint string `json:"endpoint"`
|
||||
// The messenger contract address deployed on layer 2 chain.
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// SenderConfig The config for transaction sender
|
||||
@@ -19,7 +20,7 @@ type SenderConfig struct {
|
||||
// The number of blocks to wait to escalate increase gas price of the transaction.
|
||||
EscalateBlocks uint64 `json:"escalate_blocks"`
|
||||
// The gap number between a block be confirmed and the latest block.
|
||||
Confirmations uint64 `json:"confirmations"`
|
||||
Confirmations rpc.BlockNumber `json:"confirmations"`
|
||||
// The numerator of gas price escalate multiple.
|
||||
EscalateMultipleNum uint64 `json:"escalate_multiple_num"`
|
||||
// The denominator of gas price escalate multiple.
|
||||
|
||||
@@ -5,10 +5,11 @@ go 1.18
|
||||
require (
|
||||
github.com/iden3/go-iden3-crypto v0.0.13
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli/v2 v2.10.2
|
||||
golang.org/x/sync v0.1.0
|
||||
modernc.org/mathutil v1.4.1
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -27,17 +28,18 @@ require (
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.3.1 // indirect
|
||||
github.com/scroll-tech/zktrie v0.4.3 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.4.0 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -336,6 +336,8 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
@@ -348,11 +350,10 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
|
||||
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d h1:S4bEgTezJrqYmDfUSkp9Of0/lcglm4CTAWQHSnsn2HE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d/go.mod h1:OH4ZTAz6RM1IL0xcQ1zM6+Iy9s2vtcYqqwcEQdfHV7g=
|
||||
github.com/scroll-tech/zktrie v0.4.3 h1:RyhusIu8F8u5ITmzqZjkAwlL6jdC9TK9i6tfuJoZcpk=
|
||||
github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
@@ -423,8 +424,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -539,8 +540,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -552,7 +553,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
|
||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -662,5 +663,7 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
|
||||
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
|
||||
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
|
||||
@@ -26,7 +26,7 @@ func New(ctx context.Context, cfg *config.L1Config, orm database.OrmFactory) (*B
|
||||
return nil, err
|
||||
}
|
||||
|
||||
relayer, err := NewLayer1Relayer(ctx, client, int64(cfg.Confirmations), orm, cfg.RelayerConfig)
|
||||
relayer, err := NewLayer1Relayer(ctx, orm, cfg.RelayerConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/database/orm"
|
||||
@@ -29,7 +28,6 @@ import (
|
||||
// @todo It's better to be triggered by watcher.
|
||||
type Layer1Relayer struct {
|
||||
ctx context.Context
|
||||
client *ethclient.Client
|
||||
sender *sender.Sender
|
||||
|
||||
db orm.L1MessageOrm
|
||||
@@ -43,7 +41,7 @@ type Layer1Relayer struct {
|
||||
}
|
||||
|
||||
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
|
||||
func NewLayer1Relayer(ctx context.Context, ethClient *ethclient.Client, l1ConfirmNum int64, db orm.L1MessageOrm, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
|
||||
func NewLayer1Relayer(ctx context.Context, db orm.L1MessageOrm, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
|
||||
l2MessengerABI, err := bridge_abi.L2MessengerMetaData.GetAbi()
|
||||
if err != nil {
|
||||
log.Warn("new L2MessengerABI failed", "err", err)
|
||||
@@ -59,7 +57,6 @@ func NewLayer1Relayer(ctx context.Context, ethClient *ethclient.Client, l1Confir
|
||||
|
||||
return &Layer1Relayer{
|
||||
ctx: ctx,
|
||||
client: ethClient,
|
||||
sender: sender,
|
||||
db: db,
|
||||
l2MessengerABI: l2MessengerABI,
|
||||
@@ -72,15 +69,20 @@ func NewLayer1Relayer(ctx context.Context, ethClient *ethclient.Client, l1Confir
|
||||
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
|
||||
func (r *Layer1Relayer) ProcessSavedEvents() {
|
||||
// msgs are sorted by nonce in increasing order
|
||||
msgs, err := r.db.GetL1MessagesByStatus(orm.MsgPending)
|
||||
msgs, err := r.db.GetL1MessagesByStatus(orm.MsgPending, 100)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch unprocessed L1 messages", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(msgs) > 0 {
|
||||
log.Info("Processing L1 messages", "count", len(msgs))
|
||||
}
|
||||
|
||||
for _, msg := range msgs {
|
||||
if err = r.processSavedEvent(msg); err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("failed to process event", "err", err)
|
||||
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -109,6 +111,12 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
}
|
||||
|
||||
hash, err := r.sender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
|
||||
if err != nil && err.Error() == "execution reverted: Message expired" {
|
||||
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgExpired)
|
||||
}
|
||||
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
|
||||
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
@@ -20,10 +19,7 @@ func testCreateNewL1Relayer(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
client, err := ethclient.Dial(l1gethImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer, err := NewLayer1Relayer(context.Background(), client, 1, db, cfg.L2Config.RelayerConfig)
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/orm"
|
||||
@@ -19,6 +21,10 @@ import (
|
||||
"scroll-tech/bridge/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
bridgeL1MsgSyncHeightGauge = metrics.NewRegisteredGauge("bridge/l1/msg/sync/height", nil)
|
||||
)
|
||||
|
||||
type relayedMessage struct {
|
||||
msgHash common.Hash
|
||||
txHash common.Hash
|
||||
@@ -38,7 +44,7 @@ type Watcher struct {
|
||||
db database.OrmFactory
|
||||
|
||||
// The number of new blocks to wait for a block to be confirmed
|
||||
confirmations uint64
|
||||
confirmations rpc.BlockNumber
|
||||
messengerAddress common.Address
|
||||
messengerABI *abi.ABI
|
||||
|
||||
@@ -53,7 +59,7 @@ type Watcher struct {
|
||||
|
||||
// NewWatcher returns a new instance of Watcher. The instance will be not fully prepared,
|
||||
// and still needs to be finalized and ran by calling `watcher.Start`.
|
||||
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations uint64, messengerAddress common.Address, rollupAddress common.Address, db database.OrmFactory) *Watcher {
|
||||
func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress common.Address, rollupAddress common.Address, db database.OrmFactory) *Watcher {
|
||||
savedHeight, err := db.GetLayer1LatestWatchedHeight()
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch height from db", "err", err)
|
||||
@@ -91,12 +97,13 @@ func (w *Watcher) Start() {
|
||||
return
|
||||
|
||||
default:
|
||||
blockNumber, err := w.client.BlockNumber(w.ctx)
|
||||
number, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("Failed to get block number", "err", err)
|
||||
log.Error("failed to get block number", "err", err)
|
||||
continue
|
||||
}
|
||||
if err := w.FetchContractEvent(blockNumber); err != nil {
|
||||
|
||||
if err := w.FetchContractEvent(number); err != nil {
|
||||
log.Error("Failed to fetch bridge contract", "err", err)
|
||||
}
|
||||
}
|
||||
@@ -118,7 +125,7 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
}()
|
||||
|
||||
fromBlock := int64(w.processedMsgHeight) + 1
|
||||
toBlock := int64(blockHeight) - int64(w.confirmations)
|
||||
toBlock := int64(blockHeight)
|
||||
|
||||
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
|
||||
to := from + contractEventsBlocksFetchLimit - 1
|
||||
@@ -138,11 +145,11 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 5)
|
||||
query.Topics[0][0] = common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][1] = common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][2] = common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][3] = common.HexToHash(bridge_abi.COMMIT_BATCH_EVENT_SIGNATURE)
|
||||
query.Topics[0][4] = common.HexToHash(bridge_abi.FINALIZED_BATCH_EVENT_SIGNATURE)
|
||||
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
|
||||
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
|
||||
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
|
||||
query.Topics[0][3] = common.HexToHash(bridge_abi.CommitBatchEventSignature)
|
||||
query.Topics[0][4] = common.HexToHash(bridge_abi.FinalizedBatchEventSignature)
|
||||
|
||||
logs, err := w.client.FilterLogs(w.ctx, query)
|
||||
if err != nil {
|
||||
@@ -151,6 +158,7 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL1MsgSyncHeightGauge.Update(to)
|
||||
continue
|
||||
}
|
||||
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
@@ -199,10 +207,10 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
for _, msg := range relayedMessageEvents {
|
||||
if msg.isSuccessful {
|
||||
// succeed
|
||||
err = w.db.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
|
||||
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
|
||||
} else {
|
||||
// failed
|
||||
err = w.db.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
|
||||
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
|
||||
@@ -215,6 +223,7 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL1MsgSyncHeightGauge.Update(to)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -229,7 +238,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
|
||||
var rollupEvents []rollupEvent
|
||||
for _, vLog := range logs {
|
||||
switch vLog.Topics[0] {
|
||||
case common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE):
|
||||
case common.HexToHash(bridge_abi.SentMessageEventSignature):
|
||||
event := struct {
|
||||
Target common.Address
|
||||
Sender common.Address
|
||||
@@ -250,7 +259,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
|
||||
event.Target = common.HexToAddress(vLog.Topics[1].String())
|
||||
l1Messages = append(l1Messages, &orm.L1Message{
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgHash: utils.ComputeMessageHash(event.Target, event.Sender, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
|
||||
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
|
||||
Height: vLog.BlockNumber,
|
||||
Sender: event.Sender.String(),
|
||||
Value: event.Value.String(),
|
||||
@@ -261,7 +270,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
|
||||
Calldata: common.Bytes2Hex(event.Message),
|
||||
Layer1Hash: vLog.TxHash.Hex(),
|
||||
})
|
||||
case common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE):
|
||||
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
|
||||
event := struct {
|
||||
MsgHash common.Hash
|
||||
}{}
|
||||
@@ -272,7 +281,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: true,
|
||||
})
|
||||
case common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE):
|
||||
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
|
||||
event := struct {
|
||||
MsgHash common.Hash
|
||||
}{}
|
||||
@@ -283,7 +292,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: false,
|
||||
})
|
||||
case common.HexToHash(bridge_abi.COMMIT_BATCH_EVENT_SIGNATURE):
|
||||
case common.HexToHash(bridge_abi.CommitBatchEventSignature):
|
||||
event := struct {
|
||||
BatchID common.Hash
|
||||
BatchHash common.Hash
|
||||
@@ -303,7 +312,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
|
||||
txHash: vLog.TxHash,
|
||||
status: orm.RollupCommitted,
|
||||
})
|
||||
case common.HexToHash(bridge_abi.FINALIZED_BATCH_EVENT_SIGNATURE):
|
||||
case common.HexToHash(bridge_abi.FinalizedBatchEventSignature):
|
||||
event := struct {
|
||||
BatchID common.Hash
|
||||
BatchHash common.Hash
|
||||
|
||||
@@ -40,7 +40,7 @@ func newBatchProposer(cfg *config.BatchProposerConfig, orm database.OrmFactory)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *batchProposer) tryProposeBatch() error {
|
||||
func (w *batchProposer) tryProposeBatch() {
|
||||
w.mutex.Lock()
|
||||
defer w.mutex.Unlock()
|
||||
|
||||
@@ -49,20 +49,27 @@ func (w *batchProposer) tryProposeBatch() error {
|
||||
fmt.Sprintf("order by number ASC LIMIT %d", w.batchBlocksLimit),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
log.Error("failed to get unbatched blocks", "err", err)
|
||||
return
|
||||
}
|
||||
if len(blocks) == 0 {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
if blocks[0].GasUsed > w.batchGasThreshold {
|
||||
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed)
|
||||
return w.createBatchForBlocks(blocks[:1])
|
||||
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
|
||||
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if blocks[0].TxNum > w.batchTxNumThreshold {
|
||||
log.Warn("too many txs even for only 1 block", "height", blocks[0].Number, "tx_num", blocks[0].TxNum)
|
||||
return w.createBatchForBlocks(blocks[:1])
|
||||
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
|
||||
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -83,10 +90,12 @@ func (w *batchProposer) tryProposeBatch() error {
|
||||
// if it's not old enough we will skip proposing the batch,
|
||||
// otherwise we will still propose a batch
|
||||
if length == len(blocks) && blocks[0].BlockTimestamp+w.batchTimeSec > uint64(time.Now().Unix()) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
return w.createBatchForBlocks(blocks)
|
||||
if err = w.createBatchForBlocks(blocks); err != nil {
|
||||
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *batchProposer) createBatchForBlocks(blocks []*orm.BlockInfo) error {
|
||||
|
||||
@@ -49,7 +49,7 @@ func testBatchProposer(t *testing.T) {
|
||||
BatchTimeSec: 1,
|
||||
BatchBlocksLimit: 100,
|
||||
}, db)
|
||||
assert.NoError(t, proposer.tryProposeBatch())
|
||||
proposer.tryProposeBatch()
|
||||
|
||||
infos, err := db.GetUnbatchedBlocks(map[string]interface{}{},
|
||||
fmt.Sprintf("order by number ASC LIMIT %d", 100))
|
||||
|
||||
@@ -77,6 +77,7 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
|
||||
t.Run("testL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
|
||||
t.Run("testL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
|
||||
t.Run("testL2RelayerSkipBatches", testL2RelayerSkipBatches)
|
||||
|
||||
t.Run("testBatchProposer", testBatchProposer)
|
||||
|
||||
|
||||
@@ -3,7 +3,9 @@ package l2
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -13,6 +15,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"modernc.org/mathutil"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/orm"
|
||||
@@ -43,14 +46,17 @@ type Layer2Relayer struct {
|
||||
rollupCh <-chan *sender.Confirmation
|
||||
l1RollupABI *abi.ABI
|
||||
|
||||
// a list of processing message, indexed by layer2 hash
|
||||
processingMessage map[string]string
|
||||
// A list of processing message.
|
||||
// key(string): confirmation ID, value(string): layer2 hash.
|
||||
processingMessage sync.Map
|
||||
|
||||
// a list of processing batch commitment, indexed by batch id
|
||||
processingCommitment map[string]string
|
||||
// A list of processing batch commitment.
|
||||
// key(string): confirmation ID, value(string): batch id.
|
||||
processingCommitment sync.Map
|
||||
|
||||
// a list of processing batch finalization, indexed by batch id
|
||||
processingFinalization map[string]string
|
||||
// A list of processing batch finalization.
|
||||
// key(string): confirmation ID, value(string): batch id.
|
||||
processingFinalization sync.Map
|
||||
|
||||
stopCh chan struct{}
|
||||
}
|
||||
@@ -80,13 +86,15 @@ func NewLayer2Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
|
||||
rollupCh: rollupSender.ConfirmChan(),
|
||||
l1RollupABI: bridge_abi.RollupMetaABI,
|
||||
cfg: cfg,
|
||||
processingMessage: map[string]string{},
|
||||
processingCommitment: map[string]string{},
|
||||
processingFinalization: map[string]string{},
|
||||
processingMessage: sync.Map{},
|
||||
processingCommitment: sync.Map{},
|
||||
processingFinalization: sync.Map{},
|
||||
stopCh: make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
const processMsgLimit = 100
|
||||
|
||||
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
|
||||
func (r *Layer2Relayer) ProcessSavedEvents() {
|
||||
batch, err := r.db.GetLatestFinalizedBatch()
|
||||
@@ -96,7 +104,11 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
|
||||
}
|
||||
|
||||
// msgs are sorted by nonce in increasing order
|
||||
msgs, err := r.db.GetL2MessagesByStatusUpToHeight(orm.MsgPending, batch.EndBlockNumber)
|
||||
msgs, err := r.db.GetL2Messages(
|
||||
map[string]interface{}{"status": orm.MsgPending},
|
||||
fmt.Sprintf("AND height<=%d", batch.EndBlockNumber),
|
||||
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch unprocessed L2 messages", "err", err)
|
||||
@@ -104,25 +116,18 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
|
||||
}
|
||||
|
||||
// process messages in batches
|
||||
batch_size := r.messageSender.NumberOfAccounts()
|
||||
|
||||
for from := 0; from < len(msgs); from += batch_size {
|
||||
to := from + batch_size
|
||||
|
||||
if to > len(msgs) {
|
||||
to = len(msgs)
|
||||
batchSize := mathutil.Min((runtime.GOMAXPROCS(0)+1)/2, r.messageSender.NumberOfAccounts())
|
||||
for size := 0; len(msgs) > 0; msgs = msgs[size:] {
|
||||
if size = len(msgs); size > batchSize {
|
||||
size = batchSize
|
||||
}
|
||||
|
||||
var g errgroup.Group
|
||||
|
||||
for i := from; i < to; i++ {
|
||||
msg := msgs[i]
|
||||
|
||||
for _, msg := range msgs[:size] {
|
||||
msg := msg
|
||||
g.Go(func() error {
|
||||
return r.processSavedEvent(msg, batch)
|
||||
return r.processSavedEvent(msg, batch.Index)
|
||||
})
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("failed to process l2 saved event", "err", err)
|
||||
@@ -132,13 +137,13 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, batch *orm.BlockBatch) error {
|
||||
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) error {
|
||||
// @todo fetch merkle proof from l2geth
|
||||
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
|
||||
|
||||
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
|
||||
BlockHeight: big.NewInt(int64(msg.Height)),
|
||||
BatchIndex: big.NewInt(int64(batch.Index)),
|
||||
BatchIndex: big.NewInt(0).SetUint64(index),
|
||||
MerkleProof: make([]byte, 0),
|
||||
}
|
||||
from := common.HexToAddress(msg.Sender)
|
||||
@@ -161,6 +166,12 @@ func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, batch *orm.BlockBa
|
||||
}
|
||||
|
||||
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
|
||||
if err != nil && err.Error() == "execution reverted: Message expired" {
|
||||
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgExpired)
|
||||
}
|
||||
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
|
||||
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
|
||||
}
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) {
|
||||
log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err)
|
||||
@@ -176,14 +187,14 @@ func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, batch *orm.BlockBa
|
||||
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
|
||||
return err
|
||||
}
|
||||
r.processingMessage[msg.MsgHash] = msg.MsgHash
|
||||
r.processingMessage.Store(msg.MsgHash, msg.MsgHash)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessPendingBatches submit batch data to layer 1 rollup contract
|
||||
func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// batches are sorted by batch index in increasing order
|
||||
batchesInDB, err := r.db.GetPendingBatches()
|
||||
batchesInDB, err := r.db.GetPendingBatches(1)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches", "err", err)
|
||||
return
|
||||
@@ -263,20 +274,28 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Info("commitBatch in layer1", "batchID", id, "index", batch.Index, "hash", hash)
|
||||
log.Info("commitBatch in layer1", "batch_id", id, "index", batch.Index, "hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupCommitting)
|
||||
if err != nil {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "id", id, "index", batch.Index, "err", err)
|
||||
}
|
||||
r.processingCommitment[txID] = id
|
||||
r.processingCommitment.Store(txID, id)
|
||||
}
|
||||
|
||||
// ProcessCommittedBatches submit proof to layer 1 rollup contract
|
||||
func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
// set skipped batches in a single db operation
|
||||
if count, err := r.db.UpdateSkippedBatches(); err != nil {
|
||||
log.Error("UpdateSkippedBatches failed", "err", err)
|
||||
// continue anyway
|
||||
} else if count > 0 {
|
||||
log.Info("Skipping batches", "count", count)
|
||||
}
|
||||
|
||||
// batches are sorted by batch index in increasing order
|
||||
batches, err := r.db.GetCommittedBatches()
|
||||
batches, err := r.db.GetCommittedBatches(1)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch committed L2 batches", "err", err)
|
||||
return
|
||||
@@ -304,6 +323,8 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
return
|
||||
|
||||
case orm.ProvingTaskFailed, orm.ProvingTaskSkipped:
|
||||
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
|
||||
|
||||
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
|
||||
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
|
||||
}
|
||||
@@ -358,15 +379,15 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Info("finalizeBatchWithProof in layer1", "batchID", id, "hash", hash)
|
||||
log.Info("finalizeBatchWithProof in layer1", "batch_id", id, "hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupFinalizing)
|
||||
if err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batchID", id, "err", err)
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", id, "err", err)
|
||||
}
|
||||
success = true
|
||||
r.processingFinalization[txID] = id
|
||||
r.processingFinalization.Store(txID, id)
|
||||
|
||||
default:
|
||||
log.Error("encounter unreachable case in ProcessCommittedBatches",
|
||||
@@ -377,42 +398,42 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
|
||||
// Start the relayer process
|
||||
func (r *Layer2Relayer) Start() {
|
||||
go func() {
|
||||
// trigger by timer
|
||||
loop := func(ctx context.Context, f func()) {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(3)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
r.ProcessSavedEvents()
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
r.ProcessPendingBatches()
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
r.ProcessCommittedBatches()
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
case confirmation := <-r.messageCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.rollupCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
case <-r.stopCh:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
f()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
ctx, cancel := context.WithCancel(r.ctx)
|
||||
|
||||
go loop(ctx, r.ProcessSavedEvents)
|
||||
go loop(ctx, r.ProcessPendingBatches)
|
||||
go loop(ctx, r.ProcessCommittedBatches)
|
||||
|
||||
go func(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case confirmation := <-r.messageCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.rollupCh:
|
||||
r.handleConfirmation(confirmation)
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
<-r.stopCh
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -429,36 +450,36 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
|
||||
transactionType := "Unknown"
|
||||
// check whether it is message relay transaction
|
||||
if msgHash, ok := r.processingMessage[confirmation.ID]; ok {
|
||||
if msgHash, ok := r.processingMessage.Load(confirmation.ID); ok {
|
||||
transactionType = "MessageRelay"
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash, orm.MsgConfirmed, confirmation.TxHash.String())
|
||||
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), orm.MsgConfirmed, confirmation.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash, "err", err)
|
||||
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err)
|
||||
}
|
||||
delete(r.processingMessage, confirmation.ID)
|
||||
r.processingMessage.Delete(confirmation.ID)
|
||||
}
|
||||
|
||||
// check whether it is block commitment transaction
|
||||
if batch_id, ok := r.processingCommitment[confirmation.ID]; ok {
|
||||
if batchID, ok := r.processingCommitment.Load(confirmation.ID); ok {
|
||||
transactionType = "BatchCommitment"
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batch_id, confirmation.TxHash.String(), orm.RollupCommitted)
|
||||
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupCommitted)
|
||||
if err != nil {
|
||||
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_id", batch_id, "err", err)
|
||||
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
|
||||
}
|
||||
delete(r.processingCommitment, confirmation.ID)
|
||||
r.processingCommitment.Delete(confirmation.ID)
|
||||
}
|
||||
|
||||
// check whether it is proof finalization transaction
|
||||
if batch_id, ok := r.processingFinalization[confirmation.ID]; ok {
|
||||
if batchID, ok := r.processingFinalization.Load(confirmation.ID); ok {
|
||||
transactionType = "ProofFinalization"
|
||||
// @todo handle db error
|
||||
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batch_id, confirmation.TxHash.String(), orm.RollupFinalized)
|
||||
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupFinalized)
|
||||
if err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", batch_id, "err", err)
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
|
||||
}
|
||||
delete(r.processingFinalization, confirmation.ID)
|
||||
r.processingFinalization.Delete(confirmation.ID)
|
||||
}
|
||||
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
|
||||
}
|
||||
|
||||
@@ -193,3 +193,68 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalizing, status)
|
||||
}
|
||||
|
||||
func testL2RelayerSkipBatches(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.Stop()
|
||||
|
||||
createBatch := func(rollupStatus orm.RollupStatus, provingStatus orm.ProvingStatus) string {
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := db.NewBatchInDBTx(dbTx, &orm.BlockInfo{}, &orm.BlockInfo{}, "0", 1, 194676) // startBlock & endBlock & parentHash & totalTxNum & totalL2Gas don't really matter here
|
||||
assert.NoError(t, err)
|
||||
err = dbTx.Commit()
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.UpdateRollupStatus(context.Background(), batchID, rollupStatus)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
|
||||
assert.NoError(t, err)
|
||||
err = db.UpdateProvingStatus(batchID, provingStatus)
|
||||
assert.NoError(t, err)
|
||||
|
||||
return batchID
|
||||
}
|
||||
|
||||
skipped := []string{
|
||||
createBatch(orm.RollupCommitted, orm.ProvingTaskSkipped),
|
||||
createBatch(orm.RollupCommitted, orm.ProvingTaskFailed),
|
||||
}
|
||||
|
||||
notSkipped := []string{
|
||||
createBatch(orm.RollupPending, orm.ProvingTaskSkipped),
|
||||
createBatch(orm.RollupCommitting, orm.ProvingTaskSkipped),
|
||||
createBatch(orm.RollupFinalizing, orm.ProvingTaskSkipped),
|
||||
createBatch(orm.RollupFinalized, orm.ProvingTaskSkipped),
|
||||
createBatch(orm.RollupPending, orm.ProvingTaskFailed),
|
||||
createBatch(orm.RollupCommitting, orm.ProvingTaskFailed),
|
||||
createBatch(orm.RollupFinalizing, orm.ProvingTaskFailed),
|
||||
createBatch(orm.RollupFinalized, orm.ProvingTaskFailed),
|
||||
createBatch(orm.RollupCommitted, orm.ProvingTaskVerified),
|
||||
}
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
|
||||
for _, id := range skipped {
|
||||
status, err := db.GetRollupStatus(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalizationSkipped, status)
|
||||
}
|
||||
|
||||
for _, id := range notSkipped {
|
||||
status, err := db.GetRollupStatus(id)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, orm.RollupFinalizationSkipped, status)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
@@ -15,6 +14,8 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/event"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
bridge_abi "scroll-tech/bridge/abi"
|
||||
"scroll-tech/bridge/utils"
|
||||
@@ -25,6 +26,11 @@ import (
|
||||
"scroll-tech/bridge/config"
|
||||
)
|
||||
|
||||
// Metrics
|
||||
var (
|
||||
bridgeL2MsgSyncHeightGauge = metrics.NewRegisteredGauge("bridge/l2/msg/sync/height", nil)
|
||||
)
|
||||
|
||||
type relayedMessage struct {
|
||||
msgHash common.Hash
|
||||
txHash common.Hash
|
||||
@@ -40,7 +46,7 @@ type WatcherClient struct {
|
||||
|
||||
orm database.OrmFactory
|
||||
|
||||
confirmations uint64
|
||||
confirmations rpc.BlockNumber
|
||||
messengerAddress common.Address
|
||||
messengerABI *abi.ABI
|
||||
|
||||
@@ -54,7 +60,7 @@ type WatcherClient struct {
|
||||
}
|
||||
|
||||
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations uint64, bpCfg *config.BatchProposerConfig, messengerAddress common.Address, orm database.OrmFactory) *WatcherClient {
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, bpCfg *config.BatchProposerConfig, messengerAddress common.Address, orm database.OrmFactory) *WatcherClient {
|
||||
savedHeight, err := orm.GetLayer2LatestWatchedHeight()
|
||||
if err != nil {
|
||||
log.Warn("fetch height from db failed", "err", err)
|
||||
@@ -82,56 +88,70 @@ func (w *WatcherClient) Start() {
|
||||
panic("must run L2 watcher with DB")
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
defer ticker.Stop()
|
||||
ctx, cancel := context.WithCancel(w.ctx)
|
||||
|
||||
for ; true; <-ticker.C {
|
||||
select {
|
||||
case <-w.stopCh:
|
||||
return
|
||||
// trace fetcher loop
|
||||
go func(ctx context.Context) {
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
default:
|
||||
// get current height
|
||||
number, err := w.BlockNumber(w.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get_BlockNumber", "err", err)
|
||||
continue
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
number, err := utils.GetLatestConfirmedBlockNumber(ctx, w.Client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
w.tryFetchRunningMissingBlocks(ctx, number)
|
||||
}
|
||||
|
||||
if number >= w.confirmations {
|
||||
number = number - w.confirmations
|
||||
} else {
|
||||
number = 0
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(3)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := w.tryFetchRunningMissingBlocks(w.ctx, number); err != nil {
|
||||
log.Error("failed to fetchRunningMissingBlocks", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// @todo handle error
|
||||
if err := w.fetchContractEvent(number); err != nil {
|
||||
log.Error("failed to fetchContractEvent", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := w.batchProposer.tryProposeBatch(); err != nil {
|
||||
log.Error("failed to tryProposeBatch", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
// event fetcher loop
|
||||
go func(ctx context.Context) {
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
number, err := utils.GetLatestConfirmedBlockNumber(ctx, w.Client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
w.FetchContractEvent(number)
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
// batch proposer loop
|
||||
go func(ctx context.Context) {
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
w.batchProposer.tryProposeBatch()
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
<-w.stopCh
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -143,13 +163,14 @@ func (w *WatcherClient) Stop() {
|
||||
const blockTracesFetchLimit = uint64(10)
|
||||
|
||||
// try fetch missing blocks if inconsistent
|
||||
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) error {
|
||||
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) {
|
||||
// Get newest block in DB. must have blocks at that time.
|
||||
// Don't use "block_trace" table "trace" column's BlockTrace.Number,
|
||||
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
|
||||
heightInDB, err := w.orm.GetBlockTracesLatestHeight()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to GetBlockTracesLatestHeight in DB: %v", err)
|
||||
log.Error("failed to GetBlockTracesLatestHeight", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Can't get trace from genesis block, so the default start number is 1.
|
||||
@@ -167,12 +188,10 @@ func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockH
|
||||
|
||||
// Get block traces and insert into db.
|
||||
if err = w.getAndStoreBlockTraces(ctx, from, to); err != nil {
|
||||
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to)
|
||||
return err
|
||||
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
|
||||
@@ -201,7 +220,7 @@ func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uin
|
||||
const contractEventsBlocksFetchLimit = int64(10)
|
||||
|
||||
// FetchContractEvent pull latest event logs from given contract address and save in DB
|
||||
func (w *WatcherClient) fetchContractEvent(blockHeight uint64) error {
|
||||
func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
|
||||
defer func() {
|
||||
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
|
||||
}()
|
||||
@@ -226,17 +245,18 @@ func (w *WatcherClient) fetchContractEvent(blockHeight uint64) error {
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 3)
|
||||
query.Topics[0][0] = common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][1] = common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][2] = common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE)
|
||||
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
|
||||
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
|
||||
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
|
||||
|
||||
logs, err := w.FilterLogs(w.ctx, query)
|
||||
if err != nil {
|
||||
log.Error("failed to get event logs", "err", err)
|
||||
return err
|
||||
return
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL2MsgSyncHeightGauge.Update(to)
|
||||
continue
|
||||
}
|
||||
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
@@ -244,7 +264,7 @@ func (w *WatcherClient) fetchContractEvent(blockHeight uint64) error {
|
||||
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("failed to parse emitted event log", "err", err)
|
||||
return err
|
||||
return
|
||||
}
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submited message.
|
||||
@@ -259,18 +279,18 @@ func (w *WatcherClient) fetchContractEvent(blockHeight uint64) error {
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
|
||||
return err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = w.orm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil {
|
||||
return err
|
||||
log.Error("failed to save l2 messages", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL2MsgSyncHeightGauge.Update(to)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message, []relayedMessage, error) {
|
||||
@@ -281,7 +301,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
|
||||
var relayedMessages []relayedMessage
|
||||
for _, vLog := range logs {
|
||||
switch vLog.Topics[0] {
|
||||
case common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE):
|
||||
case common.HexToHash(bridge_abi.SentMessageEventSignature):
|
||||
event := struct {
|
||||
Target common.Address
|
||||
Sender common.Address
|
||||
@@ -302,7 +322,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
|
||||
event.Target = common.HexToAddress(vLog.Topics[1].String())
|
||||
l2Messages = append(l2Messages, &orm.L2Message{
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgHash: utils.ComputeMessageHash(event.Target, event.Sender, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
|
||||
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
|
||||
Height: vLog.BlockNumber,
|
||||
Sender: event.Sender.String(),
|
||||
Value: event.Value.String(),
|
||||
@@ -313,7 +333,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
|
||||
Calldata: common.Bytes2Hex(event.Message),
|
||||
Layer2Hash: vLog.TxHash.Hex(),
|
||||
})
|
||||
case common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE):
|
||||
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
|
||||
event := struct {
|
||||
MsgHash common.Hash
|
||||
}{}
|
||||
@@ -324,7 +344,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
|
||||
txHash: vLog.TxHash,
|
||||
isSuccessful: true,
|
||||
})
|
||||
case common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE):
|
||||
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
|
||||
event := struct {
|
||||
MsgHash common.Hash
|
||||
}{}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
@@ -36,7 +37,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
defer rc.Stop()
|
||||
|
||||
l1cfg := cfg.L1Config
|
||||
l1cfg.RelayerConfig.SenderConfig.Confirmations = 0
|
||||
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
|
||||
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKeys)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -112,7 +113,7 @@ func testMonitorBridgeContract(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
t.Log("Height in DB is", height)
|
||||
assert.Greater(t, height, int64(previousHeight))
|
||||
msgs, err := db.GetL2MessagesByStatus(orm.MsgPending)
|
||||
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(msgs))
|
||||
}
|
||||
@@ -184,13 +185,14 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
t.Log("LatestHeight is", height)
|
||||
assert.Greater(t, height, int64(previousHeight)) // height must be greater than previousHeight because confirmations is 0
|
||||
msgs, err := db.GetL2MessagesByStatus(orm.MsgPending)
|
||||
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, len(msgs))
|
||||
}
|
||||
|
||||
func prepareRelayerClient(l2Cli *ethclient.Client, bpCfg *config.BatchProposerConfig, db database.OrmFactory, contractAddr common.Address) *WatcherClient {
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, 0, bpCfg, contractAddr, db)
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, bpCfg, contractAddr, db)
|
||||
}
|
||||
|
||||
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
|
||||
|
||||
@@ -16,10 +16,9 @@ import (
|
||||
type accountPool struct {
|
||||
client *ethclient.Client
|
||||
|
||||
minBalance *big.Int
|
||||
accounts map[common.Address]*bind.TransactOpts
|
||||
numAccounts int
|
||||
accsCh chan *bind.TransactOpts
|
||||
minBalance *big.Int
|
||||
accounts map[common.Address]*bind.TransactOpts
|
||||
accsCh chan *bind.TransactOpts
|
||||
}
|
||||
|
||||
// newAccounts creates an accountPool instance.
|
||||
@@ -29,11 +28,10 @@ func newAccountPool(ctx context.Context, minBalance *big.Int, client *ethclient.
|
||||
minBalance.SetString("100000000000000000000", 10)
|
||||
}
|
||||
accs := &accountPool{
|
||||
client: client,
|
||||
minBalance: minBalance,
|
||||
accounts: make(map[common.Address]*bind.TransactOpts, len(privs)),
|
||||
numAccounts: len(privs),
|
||||
accsCh: make(chan *bind.TransactOpts, len(privs)+2),
|
||||
client: client,
|
||||
minBalance: minBalance,
|
||||
accounts: make(map[common.Address]*bind.TransactOpts, len(privs)),
|
||||
accsCh: make(chan *bind.TransactOpts, len(privs)+2),
|
||||
}
|
||||
|
||||
// get chainID from client
|
||||
|
||||
@@ -20,6 +20,8 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/bridge/utils"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
)
|
||||
|
||||
@@ -120,6 +122,15 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var baseFeePerGas uint64
|
||||
if config.TxType == DynamicFeeTxType {
|
||||
if header.BaseFee != nil {
|
||||
baseFeePerGas = header.BaseFee.Uint64()
|
||||
} else {
|
||||
return nil, errors.New("DynamicFeeTxType not supported, header.BaseFee nil")
|
||||
}
|
||||
}
|
||||
|
||||
sender := &Sender{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
@@ -128,7 +139,7 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
|
||||
auths: auths,
|
||||
confirmCh: make(chan *Confirmation, 128),
|
||||
blockNumber: header.Number.Uint64(),
|
||||
baseFeePerGas: header.BaseFee.Uint64(),
|
||||
baseFeePerGas: baseFeePerGas,
|
||||
pendingTxs: sync.Map{},
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
@@ -151,7 +162,7 @@ func (s *Sender) ConfirmChan() <-chan *Confirmation {
|
||||
|
||||
// NumberOfAccounts return the count of accounts.
|
||||
func (s *Sender) NumberOfAccounts() int {
|
||||
return s.auths.numAccounts
|
||||
return len(s.auths.accounts)
|
||||
}
|
||||
|
||||
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte) (*FeeData, error) {
|
||||
@@ -352,11 +363,20 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
return s.createAndSendTx(auth, feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
|
||||
}
|
||||
|
||||
// CheckPendingTransaction Check pending transaction given number of blocks to wait before confirmation.
|
||||
func (s *Sender) CheckPendingTransaction(header *types.Header) {
|
||||
// checkPendingTransaction checks the confirmation status of pending transactions against the latest confirmed block number.
|
||||
// If a transaction hasn't been confirmed after a certain number of blocks, it will be resubmitted with an increased gas price.
|
||||
func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64) {
|
||||
number := header.Number.Uint64()
|
||||
atomic.StoreUint64(&s.blockNumber, number)
|
||||
atomic.StoreUint64(&s.baseFeePerGas, header.BaseFee.Uint64())
|
||||
|
||||
if s.config.TxType == DynamicFeeTxType {
|
||||
if header.BaseFee != nil {
|
||||
atomic.StoreUint64(&s.baseFeePerGas, header.BaseFee.Uint64())
|
||||
} else {
|
||||
log.Error("DynamicFeeTxType not supported, header.BaseFee nil")
|
||||
}
|
||||
}
|
||||
|
||||
s.pendingTxs.Range(func(key, value interface{}) bool {
|
||||
// ignore empty id, since we use empty id to occupy pending task
|
||||
if value == nil || reflect.ValueOf(value).IsNil() {
|
||||
@@ -366,7 +386,7 @@ func (s *Sender) CheckPendingTransaction(header *types.Header) {
|
||||
pending := value.(*PendingTransaction)
|
||||
receipt, err := s.client.TransactionReceipt(s.ctx, pending.tx.Hash())
|
||||
if (err == nil) && (receipt != nil) {
|
||||
if number >= receipt.BlockNumber.Uint64()+s.config.Confirmations {
|
||||
if receipt.BlockNumber.Uint64() <= confirmed {
|
||||
s.pendingTxs.Delete(key)
|
||||
// send confirm message
|
||||
s.confirmCh <- &Confirmation{
|
||||
@@ -440,7 +460,14 @@ func (s *Sender) loop(ctx context.Context) {
|
||||
log.Error("failed to get latest head", "err", err)
|
||||
continue
|
||||
}
|
||||
s.CheckPendingTransaction(header)
|
||||
|
||||
confirmed, err := utils.GetLatestConfirmedBlockNumber(s.ctx, s.client, s.config.Confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get latest confirmed block number", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
s.checkPendingTransaction(header, confirmed)
|
||||
case <-checkBalanceTicker.C:
|
||||
// Check and set balance.
|
||||
_ = s.auths.checkAndSetBalances(ctx)
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
@@ -22,7 +23,7 @@ import (
|
||||
"scroll-tech/bridge/sender"
|
||||
)
|
||||
|
||||
const TX_BATCH = 50
|
||||
const TXBatch = 50
|
||||
|
||||
var (
|
||||
privateKeys []*ecdsa.PrivateKey
|
||||
@@ -68,7 +69,7 @@ func testBatchSender(t *testing.T, batchSize int) {
|
||||
}
|
||||
|
||||
senderCfg := cfg.L1Config.RelayerConfig.SenderConfig
|
||||
senderCfg.Confirmations = 0
|
||||
senderCfg.Confirmations = rpc.LatestBlockNumber
|
||||
newSender, err := sender.NewSender(context.Background(), senderCfg, privateKeys)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -84,7 +85,7 @@ func testBatchSender(t *testing.T, batchSize int) {
|
||||
for idx := 0; idx < newSender.NumberOfAccounts(); idx++ {
|
||||
index := idx
|
||||
eg.Go(func() error {
|
||||
for i := 0; i < TX_BATCH; i++ {
|
||||
for i := 0; i < TXBatch; i++ {
|
||||
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
id := strconv.Itoa(i + index*1000)
|
||||
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil)
|
||||
@@ -103,7 +104,7 @@ func testBatchSender(t *testing.T, batchSize int) {
|
||||
if err := eg.Wait(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TX_BATCH*newSender.NumberOfAccounts())
|
||||
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TXBatch*newSender.NumberOfAccounts())
|
||||
|
||||
// avoid 10 mins cause testcase panic
|
||||
after := time.After(80 * time.Second)
|
||||
|
||||
@@ -4,18 +4,20 @@ import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"math/big"
|
||||
"scroll-tech/common/docker"
|
||||
"testing"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/mock_bridge"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/bridge/config"
|
||||
"scroll-tech/bridge/mock_bridge"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -55,16 +57,20 @@ func setupEnv(t *testing.T) {
|
||||
var err error
|
||||
privateKey, err = crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121212"))
|
||||
assert.NoError(t, err)
|
||||
messagePrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121213"))
|
||||
assert.NoError(t, err)
|
||||
rollupPrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121214"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Load config.
|
||||
cfg, err = config.NewConfig("../config.json")
|
||||
assert.NoError(t, err)
|
||||
cfg.L1Config.Confirmations = 0
|
||||
cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{privateKey}
|
||||
cfg.L1Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{privateKey}
|
||||
cfg.L2Config.Confirmations = 0
|
||||
cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{privateKey}
|
||||
cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{privateKey}
|
||||
cfg.L1Config.Confirmations = rpc.LatestBlockNumber
|
||||
cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
|
||||
cfg.L1Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
|
||||
cfg.L2Config.Confirmations = rpc.LatestBlockNumber
|
||||
cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
|
||||
cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
|
||||
|
||||
// Create l1geth container.
|
||||
l1gethImg = docker.NewTestL1Docker(t)
|
||||
@@ -89,6 +95,47 @@ func setupEnv(t *testing.T) {
|
||||
// Create l1 and l2 auth
|
||||
l1Auth = prepareAuth(t, l1Client, privateKey)
|
||||
l2Auth = prepareAuth(t, l2Client, privateKey)
|
||||
|
||||
// send some balance to message and rollup sender
|
||||
transferEther(t, l1Auth, l1Client, messagePrivateKey)
|
||||
transferEther(t, l1Auth, l1Client, rollupPrivateKey)
|
||||
transferEther(t, l2Auth, l2Client, messagePrivateKey)
|
||||
transferEther(t, l2Auth, l2Client, rollupPrivateKey)
|
||||
}
|
||||
|
||||
func transferEther(t *testing.T, auth *bind.TransactOpts, client *ethclient.Client, privateKey *ecdsa.PrivateKey) {
|
||||
targetAddress := crypto.PubkeyToAddress(privateKey.PublicKey)
|
||||
|
||||
gasPrice, err := client.SuggestGasPrice(context.Background())
|
||||
assert.NoError(t, err)
|
||||
gasPrice.Mul(gasPrice, big.NewInt(2))
|
||||
|
||||
// Get pending nonce
|
||||
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// 200 ether should be enough
|
||||
value, ok := big.NewInt(0).SetString("0xad78ebc5ac6200000", 0)
|
||||
assert.Equal(t, ok, true)
|
||||
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: nonce,
|
||||
To: &targetAddress,
|
||||
Value: value,
|
||||
Gas: 500000,
|
||||
GasPrice: gasPrice,
|
||||
})
|
||||
signedTx, err := auth.Signer(auth.From, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = client.SendTransaction(context.Background(), signedTx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
receipt, err := bind.WaitMined(context.Background(), client, signedTx)
|
||||
assert.NoError(t, err)
|
||||
if receipt.Status != types.ReceiptStatusSuccessful {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
}
|
||||
|
||||
func free(t *testing.T) {
|
||||
@@ -150,6 +197,9 @@ func TestFunction(t *testing.T) {
|
||||
// l1 rollup and watch rollup events
|
||||
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
|
||||
|
||||
// l2 message
|
||||
t.Run("testRelayL2MessageSucceed", testRelayL2MessageSucceed)
|
||||
|
||||
t.Cleanup(func() {
|
||||
free(t)
|
||||
})
|
||||
|
||||
174
bridge/tests/l2_message_relay_test.go
Normal file
174
bridge/tests/l2_message_relay_test.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/database/migrate"
|
||||
"scroll-tech/database/orm"
|
||||
|
||||
"scroll-tech/bridge/l1"
|
||||
"scroll-tech/bridge/l2"
|
||||
)
|
||||
|
||||
func testRelayL2MessageSucceed(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
|
||||
defer db.Close()
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.Stop()
|
||||
|
||||
// Create L2Watcher
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.BatchProposerConfig, l2Cfg.L2MessengerAddress, db)
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := cfg.L1Config
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
|
||||
|
||||
// send message through l2 messenger contract
|
||||
nonce, err := l2MessengerInstance.MessageNonce(&bind.CallOpts{})
|
||||
assert.NoError(t, err)
|
||||
sendTx, err := l2MessengerInstance.SendMessage(l2Auth, l1Auth.From, big.NewInt(0), common.Hex2Bytes("00112233"), big.NewInt(0))
|
||||
assert.NoError(t, err)
|
||||
sendReceipt, err := bind.WaitMined(context.Background(), l2Client, sendTx)
|
||||
assert.NoError(t, err)
|
||||
if sendReceipt.Status != types.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
// l2 watch process events
|
||||
l2Watcher.FetchContractEvent(sendReceipt.BlockNumber.Uint64())
|
||||
|
||||
// check db status
|
||||
msg, err := db.GetL2MessageByNonce(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg.Status, orm.MsgPending)
|
||||
assert.Equal(t, msg.Sender, l2Auth.From.String())
|
||||
assert.Equal(t, msg.Target, l1Auth.From.String())
|
||||
|
||||
// add fake blocks
|
||||
traces := []*types.BlockTrace{
|
||||
{
|
||||
Header: &types.Header{
|
||||
Number: sendReceipt.BlockNumber,
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
StorageTrace: &types.StorageTrace{},
|
||||
},
|
||||
}
|
||||
err = db.InsertBlockTraces(traces)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add fake batch
|
||||
dbTx, err := db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
batchID, err := db.NewBatchInDBTx(dbTx,
|
||||
&orm.BlockInfo{
|
||||
Number: traces[0].Header.Number.Uint64(),
|
||||
Hash: traces[0].Header.Hash().String(),
|
||||
ParentHash: traces[0].Header.ParentHash.String(),
|
||||
},
|
||||
&orm.BlockInfo{
|
||||
Number: traces[0].Header.Number.Uint64(),
|
||||
Hash: traces[0].Header.Hash().String(),
|
||||
ParentHash: traces[0].Header.ParentHash.String(),
|
||||
},
|
||||
traces[0].Header.ParentHash.String(), 1, 194676)
|
||||
assert.NoError(t, err)
|
||||
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
|
||||
traces[0].Header.Number.Uint64(),
|
||||
traces[0].Header.Number.Uint64()}, batchID)
|
||||
assert.NoError(t, err)
|
||||
err = dbTx.Commit()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add dummy proof
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
|
||||
assert.NoError(t, err)
|
||||
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// process pending batch and check status
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
status, err := db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupCommitting, status)
|
||||
commitTxHash, err := db.GetCommitTxHash(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, commitTxHash.Valid)
|
||||
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
|
||||
assert.NoError(t, err)
|
||||
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(commitTxReceipt.Logs), 1)
|
||||
|
||||
// fetch CommitBatch rollup events
|
||||
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupCommitted, status)
|
||||
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalizing, status)
|
||||
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, finalizeTxHash.Valid)
|
||||
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
|
||||
assert.NoError(t, err)
|
||||
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
|
||||
|
||||
// fetch FinalizeBatch events
|
||||
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalized, status)
|
||||
|
||||
// process l2 messages
|
||||
l2Relayer.ProcessSavedEvents()
|
||||
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg.Status, orm.MsgSubmitted)
|
||||
relayTxHash, err := db.GetRelayL2MessageTxHash(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, relayTxHash.Valid)
|
||||
relayTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(relayTxHash.String))
|
||||
assert.NoError(t, err)
|
||||
relayTxReceipt, err := bind.WaitMined(context.Background(), l1Client, relayTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(relayTxReceipt.Logs), 1)
|
||||
|
||||
// fetch message relayed events
|
||||
err = l1Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
|
||||
assert.NoError(t, err)
|
||||
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, msg.Status, orm.MsgConfirmed)
|
||||
}
|
||||
@@ -34,7 +34,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := cfg.L1Config
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
|
||||
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
|
||||
|
||||
// add some blocks to db
|
||||
var traces []*types.BlockTrace
|
||||
@@ -80,6 +80,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
|
||||
// process pending batch and check status
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
|
||||
status, err := db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupCommitting, status)
|
||||
@@ -93,7 +94,6 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.Equal(t, len(commitTxReceipt.Logs), 1)
|
||||
|
||||
// fetch rollup events
|
||||
assert.NoError(t, err)
|
||||
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
@@ -110,6 +110,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalizing, status)
|
||||
@@ -123,7 +124,6 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
|
||||
|
||||
// fetch rollup events
|
||||
assert.NoError(t, err)
|
||||
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
|
||||
assert.NoError(t, err)
|
||||
status, err = db.GetRollupStatus(batchID)
|
||||
|
||||
56
bridge/utils/confirmation.go
Normal file
56
bridge/utils/confirmation.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
type ethClient interface {
|
||||
BlockNumber(ctx context.Context) (uint64, error)
|
||||
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
|
||||
}
|
||||
|
||||
// GetLatestConfirmedBlockNumber get confirmed block number by rpc.BlockNumber type.
|
||||
func GetLatestConfirmedBlockNumber(ctx context.Context, client ethClient, confirm rpc.BlockNumber) (uint64, error) {
|
||||
switch true {
|
||||
case confirm == rpc.SafeBlockNumber || confirm == rpc.FinalizedBlockNumber:
|
||||
var tag *big.Int
|
||||
if confirm == rpc.FinalizedBlockNumber {
|
||||
tag = big.NewInt(int64(rpc.FinalizedBlockNumber))
|
||||
} else {
|
||||
tag = big.NewInt(int64(rpc.SafeBlockNumber))
|
||||
}
|
||||
|
||||
header, err := client.HeaderByNumber(ctx, tag)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !header.Number.IsInt64() {
|
||||
return 0, fmt.Errorf("received invalid block confirm: %v", header.Number)
|
||||
}
|
||||
return header.Number.Uint64(), nil
|
||||
case confirm == rpc.LatestBlockNumber:
|
||||
number, err := client.BlockNumber(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return number, nil
|
||||
case confirm.Int64() >= 0: // If it's positive integer, consider it as a certain confirm value.
|
||||
number, err := client.BlockNumber(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
cfmNum := uint64(confirm.Int64())
|
||||
|
||||
if number >= cfmNum {
|
||||
return number - cfmNum, nil
|
||||
}
|
||||
return 0, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown confirmation type: %v", confirm)
|
||||
}
|
||||
}
|
||||
107
bridge/utils/confirmation_test.go
Normal file
107
bridge/utils/confirmation_test.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package utils_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common/math"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/bridge/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
tests = []struct {
|
||||
input string
|
||||
mustFail bool
|
||||
expected rpc.BlockNumber
|
||||
}{
|
||||
{`"0x"`, true, rpc.BlockNumber(0)},
|
||||
{`"0x0"`, false, rpc.BlockNumber(0)},
|
||||
{`"0X1"`, false, rpc.BlockNumber(1)},
|
||||
{`"0x00"`, true, rpc.BlockNumber(0)},
|
||||
{`"0x01"`, true, rpc.BlockNumber(0)},
|
||||
{`"0x1"`, false, rpc.BlockNumber(1)},
|
||||
{`"0x12"`, false, rpc.BlockNumber(18)},
|
||||
{`"0x7fffffffffffffff"`, false, rpc.BlockNumber(math.MaxInt64)},
|
||||
{`"0x8000000000000000"`, true, rpc.BlockNumber(0)},
|
||||
{"0", true, rpc.BlockNumber(0)},
|
||||
{`"ff"`, true, rpc.BlockNumber(0)},
|
||||
{`"safe"`, false, rpc.SafeBlockNumber},
|
||||
{`"finalized"`, false, rpc.FinalizedBlockNumber},
|
||||
{`"pending"`, false, rpc.PendingBlockNumber},
|
||||
{`"latest"`, false, rpc.LatestBlockNumber},
|
||||
{`"earliest"`, false, rpc.EarliestBlockNumber},
|
||||
{`someString`, true, rpc.BlockNumber(0)},
|
||||
{`""`, true, rpc.BlockNumber(0)},
|
||||
{``, true, rpc.BlockNumber(0)},
|
||||
}
|
||||
)
|
||||
|
||||
func TestUnmarshalJSON(t *testing.T) {
|
||||
for i, test := range tests {
|
||||
var num rpc.BlockNumber
|
||||
err := json.Unmarshal([]byte(test.input), &num)
|
||||
if test.mustFail && err == nil {
|
||||
t.Errorf("Test %d should fail", i)
|
||||
continue
|
||||
}
|
||||
if !test.mustFail && err != nil {
|
||||
t.Errorf("Test %d should pass but got err: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if num != test.expected {
|
||||
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
for i, test := range tests {
|
||||
var num rpc.BlockNumber
|
||||
want, err := json.Marshal(test.expected)
|
||||
assert.Nil(t, err)
|
||||
if !test.mustFail {
|
||||
err = json.Unmarshal([]byte(test.input), &num)
|
||||
assert.Nil(t, err)
|
||||
got, err := json.Marshal(&num)
|
||||
assert.Nil(t, err)
|
||||
if string(want) != string(got) {
|
||||
t.Errorf("Test %d got unexpected value, want %d, got %d", i, test.expected, num)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type MockEthClient struct {
|
||||
val uint64
|
||||
}
|
||||
|
||||
func (e MockEthClient) BlockNumber(ctx context.Context) (uint64, error) {
|
||||
return e.val, nil
|
||||
}
|
||||
|
||||
func (e MockEthClient) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
|
||||
return &types.Header{Number: new(big.Int).SetUint64(e.val)}, nil
|
||||
}
|
||||
|
||||
func TestGetLatestConfirmedBlockNumber(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := MockEthClient{}
|
||||
|
||||
client.val = 5
|
||||
confirmed, err := utils.GetLatestConfirmedBlockNumber(ctx, &client, 6)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, uint64(0), confirmed)
|
||||
|
||||
client.val = 7
|
||||
confirmed, err = utils.GetLatestConfirmedBlockNumber(ctx, &client, 6)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, uint64(1), confirmed)
|
||||
}
|
||||
@@ -20,8 +20,8 @@ func encodePacked(input ...[]byte) []byte {
|
||||
|
||||
// ComputeMessageHash compute the message hash
|
||||
func ComputeMessageHash(
|
||||
target common.Address,
|
||||
sender common.Address,
|
||||
target common.Address,
|
||||
value *big.Int,
|
||||
fee *big.Int,
|
||||
deadline *big.Int,
|
||||
@@ -29,8 +29,8 @@ func ComputeMessageHash(
|
||||
messageNonce *big.Int,
|
||||
) common.Hash {
|
||||
packed := encodePacked(
|
||||
target.Bytes(),
|
||||
sender.Bytes(),
|
||||
target.Bytes(),
|
||||
math.U256Bytes(value),
|
||||
math.U256Bytes(fee),
|
||||
math.U256Bytes(deadline),
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"scroll-tech/bridge/utils"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestKeccak2(t *testing.T) {
|
||||
@@ -28,15 +29,13 @@ func TestKeccak2(t *testing.T) {
|
||||
|
||||
func TestComputeMessageHash(t *testing.T) {
|
||||
hash := utils.ComputeMessageHash(
|
||||
common.HexToAddress("0xdafea492d9c6733ae3d56b7ed1adb60692c98bc5"),
|
||||
common.HexToAddress("0xeafea492d9c6733ae3d56b7ed1adb60692c98bf7"),
|
||||
big.NewInt(1),
|
||||
big.NewInt(2),
|
||||
big.NewInt(1234567),
|
||||
common.Hex2Bytes("0011223344"),
|
||||
big.NewInt(3),
|
||||
common.HexToAddress("0xd7227113b92e537aeda220d5a2f201b836e5879d"),
|
||||
common.HexToAddress("0x47c02b023b6787ef4e503df42bbb1a94f451a1c0"),
|
||||
big.NewInt(5000000000000000),
|
||||
big.NewInt(0),
|
||||
big.NewInt(1674204924),
|
||||
common.Hex2Bytes("8eaac8a30000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000000000000000000000000000000011c37937e0800000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000"),
|
||||
big.NewInt(30706),
|
||||
)
|
||||
if hash != common.HexToHash("0x58c9a5abfd2a558bb6a6fd5192b36fe9325d98763bafd3a51a1ea28a5d0b990b") {
|
||||
t.Fatalf("Invalid ComputeMessageHash, want %s, got %s", "0x58c9a5abfd2a558bb6a6fd5192b36fe9325d98763bafd3a51a1ea28a5d0b990b", hash.Hex())
|
||||
}
|
||||
assert.Equal(t, hash.String(), "0x920e59f62ca89a0f481d44961c55d299dd20c575693692d61fdf3ca579d8edf3")
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build libzkp dependency
|
||||
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-08-23 as chef
|
||||
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-12-10 as chef
|
||||
WORKDIR app
|
||||
|
||||
FROM chef as planner
|
||||
@@ -13,10 +13,11 @@ RUN cargo chef cook --release --recipe-path recipe.json
|
||||
|
||||
COPY ./common/libzkp/impl .
|
||||
RUN cargo build --release
|
||||
RUN find ./ | grep libzktrie.so | xargs -i cp {} /app/target/release/
|
||||
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-08-23 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-12-10 as base
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
@@ -33,11 +34,16 @@ FROM base as builder
|
||||
COPY . .
|
||||
RUN cp -r ./common/libzkp/interface ./coordinator/verifier/lib
|
||||
COPY --from=zkp-builder /app/target/release/libzkp.a ./coordinator/verifier/lib/
|
||||
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd
|
||||
COPY --from=zkp-builder /app/target/release/libzktrie.so ./coordinator/verifier/lib/
|
||||
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd && mv verifier/lib /bin/
|
||||
|
||||
# Pull coordinator into a second stage deploy alpine container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/verifier/lib
|
||||
# ENV CHAIN_ID=534353
|
||||
RUN mkdir -p /src/coordinator/verifier/lib
|
||||
COPY --from=builder /bin/lib /src/coordinator/verifier/lib
|
||||
COPY --from=builder /bin/coordinator /bin/
|
||||
|
||||
|
||||
ENTRYPOINT ["/bin/coordinator"]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
GO_VERSION := 1.18
|
||||
PYTHON_VERSION := 3.10
|
||||
RUST_VERSION := nightly-2022-08-23
|
||||
RUST_VERSION := nightly-2022-12-10
|
||||
|
||||
.PHONY: all go-alpine-builder rust-builder rust-alpine-builder go-rust-alpine-builder go-rust-builder py-runner
|
||||
|
||||
|
||||
@@ -4,4 +4,4 @@ FROM golang:1.18-alpine
|
||||
|
||||
# RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories
|
||||
|
||||
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates
|
||||
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates openssl-dev
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
FROM golang:1.18-alpine
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-08-23
|
||||
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-12-10
|
||||
|
||||
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates
|
||||
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates openssl-dev
|
||||
|
||||
# RUN apk add --no-cache libc6-compat
|
||||
# RUN apk add --no-cache gcompat
|
||||
|
||||
@@ -14,7 +14,7 @@ ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
ENV CARGO_HOME=/root/.cargo
|
||||
|
||||
# Add Toolchain
|
||||
RUN rustup toolchain install nightly-2022-08-23
|
||||
RUN rustup toolchain install nightly-2022-12-10
|
||||
|
||||
# TODO: make this ARG
|
||||
ENV CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
ARG ALPINE_VERSION=3.15
|
||||
FROM alpine:${ALPINE_VERSION}
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-08-23
|
||||
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-12-10
|
||||
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
openssl-dev \
|
||||
gcc \
|
||||
git \
|
||||
musl-dev
|
||||
|
||||
@@ -13,4 +13,4 @@ RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
# Add Toolchain
|
||||
RUN rustup toolchain install nightly-2022-08-23
|
||||
RUN rustup toolchain install nightly-2022-12-10
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM scrolltech/l2geth:prealpha-v4.2
|
||||
FROM scrolltech/l2geth:prealpha-v5.1
|
||||
|
||||
RUN mkdir -p /l2geth/keystore
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ require (
|
||||
github.com/mattn/go-colorable v0.1.8
|
||||
github.com/mattn/go-isatty v0.0.14
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli/v2 v2.10.2
|
||||
)
|
||||
@@ -67,7 +67,7 @@ require (
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.3.1 // indirect
|
||||
github.com/scroll-tech/zktrie v0.4.3 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
|
||||
@@ -77,12 +77,12 @@ require (
|
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.4.0 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/net v0.3.0 // indirect
|
||||
golang.org/x/net v0.6.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/text v0.5.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
|
||||
golang.org/x/tools v0.3.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
|
||||
@@ -404,11 +404,10 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
|
||||
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d h1:S4bEgTezJrqYmDfUSkp9Of0/lcglm4CTAWQHSnsn2HE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d/go.mod h1:OH4ZTAz6RM1IL0xcQ1zM6+Iy9s2vtcYqqwcEQdfHV7g=
|
||||
github.com/scroll-tech/zktrie v0.4.3 h1:RyhusIu8F8u5ITmzqZjkAwlL6jdC9TK9i6tfuJoZcpk=
|
||||
github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
@@ -482,8 +481,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -541,8 +540,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.3.0 h1:VWL6FNY2bEEmsGVKabSlHu5Irp34xmMRoqb/9lF9lxk=
|
||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -606,8 +605,8 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -619,8 +618,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
||||
2822
common/libzkp/impl/Cargo.lock
generated
2822
common/libzkp/impl/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -7,9 +7,16 @@ edition = "2021"
|
||||
[lib]
|
||||
crate-type = ["staticlib"]
|
||||
|
||||
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "scroll-dev-0220" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
|
||||
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "scroll-dev-0220" }
|
||||
[patch."https://github.com/scroll-tech/zktrie.git"]
|
||||
zktrie = { git = "https://github.com/lispc/zktrie", branch = "scroll-dev-0215" }
|
||||
|
||||
[dependencies]
|
||||
zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-zkevm" }
|
||||
zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="goerli-0215" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="goerli-0215" }
|
||||
|
||||
log = "0.4"
|
||||
env_logger = "0.9.0"
|
||||
|
||||
@@ -1 +1 @@
|
||||
nightly-2022-08-23
|
||||
nightly-2022-12-10
|
||||
|
||||
@@ -44,7 +44,7 @@ pub unsafe extern "C" fn create_agg_proof_multi(trace_char: *const c_char) -> *c
|
||||
let proof = PROVER
|
||||
.get_mut()
|
||||
.unwrap()
|
||||
.create_agg_circuit_proof_multi(traces.as_slice())
|
||||
.create_agg_circuit_proof_batch(traces.as_slice())
|
||||
.unwrap();
|
||||
let proof_bytes = serde_json::to_vec(&proof).unwrap();
|
||||
vec_to_c_char(proof_bytes)
|
||||
|
||||
@@ -214,8 +214,9 @@ func (z *ProofDetail) Hash() ([]byte, error) {
|
||||
|
||||
// AggProof includes the proof and public input that are required to verification and rollup.
|
||||
type AggProof struct {
|
||||
Proof []byte `json:"proof"`
|
||||
Instance []byte `json:"instance"`
|
||||
FinalPair []byte `json:"final_pair"`
|
||||
Vk []byte `json:"vk"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instance []byte `json:"instance"`
|
||||
FinalPair []byte `json:"final_pair"`
|
||||
Vk []byte `json:"vk"`
|
||||
BlockCount uint `json:"block_count"`
|
||||
}
|
||||
|
||||
53
common/metrics/metrics.go
Normal file
53
common/metrics/metrics.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/metrics/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// Serve starts the metrics server on the given address, will be closed when the given
|
||||
// context is canceled.
|
||||
func Serve(ctx context.Context, c *cli.Context) {
|
||||
if !c.Bool(utils.MetricsEnabled.Name) {
|
||||
return
|
||||
}
|
||||
|
||||
address := net.JoinHostPort(
|
||||
c.String(utils.MetricsAddr.Name),
|
||||
strconv.Itoa(c.Int(utils.MetricsPort.Name)),
|
||||
)
|
||||
|
||||
server := &http.Server{
|
||||
Addr: address,
|
||||
Handler: prometheus.Handler(metrics.DefaultRegistry),
|
||||
ReadTimeout: rpc.DefaultHTTPTimeouts.ReadTimeout,
|
||||
WriteTimeout: rpc.DefaultHTTPTimeouts.WriteTimeout,
|
||||
IdleTimeout: rpc.DefaultHTTPTimeouts.IdleTimeout,
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if err := server.Close(); err != nil {
|
||||
log.Error("Failed to close metrics server", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info("Starting metrics server", "address", address)
|
||||
|
||||
go func() {
|
||||
if err := server.ListenAndServe(); err != nil {
|
||||
log.Error("start metrics server error", "error", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -12,6 +12,9 @@ var (
|
||||
&LogFileFlag,
|
||||
&LogJSONFormat,
|
||||
&LogDebugFlag,
|
||||
&MetricsEnabled,
|
||||
&MetricsAddr,
|
||||
&MetricsPort,
|
||||
}
|
||||
// ConfigFileFlag load json type config file.
|
||||
ConfigFileFlag = cli.StringFlag{
|
||||
@@ -42,4 +45,25 @@ var (
|
||||
Name: "log.debug",
|
||||
Usage: "Prepends log messages with call-site location (file and line number)",
|
||||
}
|
||||
// MetricsEnabled enable metrics collection and reporting
|
||||
MetricsEnabled = cli.BoolFlag{
|
||||
Name: "metrics",
|
||||
Usage: "Enable metrics collection and reporting",
|
||||
Category: "METRICS",
|
||||
Value: false,
|
||||
}
|
||||
// MetricsAddr is listening address of Metrics reporting server
|
||||
MetricsAddr = cli.StringFlag{
|
||||
Name: "metrics.addr",
|
||||
Usage: "Metrics reporting server listening address",
|
||||
Category: "METRICS",
|
||||
Value: "0.0.0.0",
|
||||
}
|
||||
// MetricsPort is listening port of Metrics reporting server
|
||||
MetricsPort = cli.IntFlag{
|
||||
Name: "metrics.port",
|
||||
Usage: "Metrics reporting server listening port",
|
||||
Category: "METRICS",
|
||||
Value: 6060,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -38,10 +38,14 @@ func StartHTTPEndpoint(endpoint string, apis []rpc.API) (*http.Server, net.Addr,
|
||||
}
|
||||
|
||||
// StartWSEndpoint starts the WS RPC endpoint.
|
||||
func StartWSEndpoint(endpoint string, apis []rpc.API) (*http.Server, net.Addr, error) {
|
||||
func StartWSEndpoint(endpoint string, apis []rpc.API, compressionLevel int) (*http.Server, net.Addr, error) {
|
||||
handler, addr, err := StartHTTPEndpoint(endpoint, apis)
|
||||
if err == nil {
|
||||
srv := (handler.Handler).(*rpc.Server)
|
||||
err = srv.SetCompressionLevel(compressionLevel)
|
||||
if err != nil {
|
||||
log.Error("failed to set ws compression level", "compression level", compressionLevel, "err", err)
|
||||
}
|
||||
handler.Handler = srv.WebsocketHandler(nil)
|
||||
}
|
||||
return handler, addr, err
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
@@ -59,7 +60,7 @@ func TestStartWSEndpoint(t *testing.T) {
|
||||
Namespace: "test",
|
||||
Service: new(testService),
|
||||
},
|
||||
})
|
||||
}, flate.NoCompression)
|
||||
assert.NoError(t, err)
|
||||
defer handler.Shutdown(context.Background())
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "prealpha-v10.2"
|
||||
var tag = "alpha-v1.0"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
Submodule contracts/lib/forge-std updated: cb69e9c07f...662ae0d693
@@ -13,17 +13,34 @@ import { L2ERC721Gateway } from "../../src/L2/gateways/L2ERC721Gateway.sol";
|
||||
import { L2GatewayRouter } from "../../src/L2/gateways/L2GatewayRouter.sol";
|
||||
import { L2ScrollMessenger } from "../../src/L2/L2ScrollMessenger.sol";
|
||||
import { L2StandardERC20Gateway } from "../../src/L2/gateways/L2StandardERC20Gateway.sol";
|
||||
import { L2TxFeeVault } from "../../src/L2/predeploys/L2TxFeeVault.sol";
|
||||
import { Whitelist } from "../../src/L2/predeploys/Whitelist.sol";
|
||||
import { ScrollStandardERC20 } from "../../src/libraries/token/ScrollStandardERC20.sol";
|
||||
import { ScrollStandardERC20Factory } from "../../src/libraries/token/ScrollStandardERC20Factory.sol";
|
||||
|
||||
contract DeployL2BridgeContracts is Script {
|
||||
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
|
||||
address L1_TX_FEE_RECIPIENT_ADDR = vm.envAddress("L1_TX_FEE_RECIPIENT_ADDR");
|
||||
|
||||
L2ScrollMessenger messenger;
|
||||
ProxyAdmin proxyAdmin;
|
||||
|
||||
address L2_SCROLL_MESSENGER_PREDEPLOY_ADDR = vm.envOr("L2_SCROLL_MESSENGER_PREDEPLOY_ADDR", address(0));
|
||||
address L2_TX_FEE_VAULT_PREDEPLOY_ADDR = vm.envOr("L2_TX_FEE_VAULT_PREDEPLOY_ADDR", address(0));
|
||||
address L2_PROXY_ADMIN_PREDEPLOY_ADDR = vm.envOr("L2_PROXY_ADMIN_PREDEPLOY_ADDR", address(0));
|
||||
address L2_STANDARD_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR = vm.envOr("L2_STANDARD_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_GATEWAY_ROUTER_PROXY_PREDEPLOY_ADDR = vm.envOr("L2_GATEWAY_ROUTER_PROXY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_SCROLL_STANDARD_ERC20_FACTORY_PREDEPLOY_ADDR = vm.envOr("L2_SCROLL_STANDARD_ERC20_FACTORY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_CUSTOM_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR = vm.envOr("L2_CUSTOM_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_ERC721_GATEWAY_PROXY_PREDEPLOY_ADDR = vm.envOr("L2_ERC721_GATEWAY_PROXY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_ERC1155_GATEWAY_PROXY_PREDEPLOY_ADDR = vm.envOr("L2_ERC1155_GATEWAY_PROXY_PREDEPLOY_ADDR", address(0));
|
||||
address L2_WHITELIST_PREDEPLOY_ADDR = vm.envOr("L2_WHITELIST_PREDEPLOY_ADDR", address(0));
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
deployL2ScrollMessenger();
|
||||
deployTxFeeVault();
|
||||
deployProxyAdmin();
|
||||
deployL2StandardERC20Gateway();
|
||||
deployL2GatewayRouter();
|
||||
@@ -31,24 +48,51 @@ contract DeployL2BridgeContracts is Script {
|
||||
deployL2CustomERC20Gateway();
|
||||
deployL2ERC721Gateway();
|
||||
deployL2ERC1155Gateway();
|
||||
deployL2Whitelist();
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function deployL2ScrollMessenger() internal {
|
||||
address owner = vm.addr(L2_DEPLOYER_PRIVATE_KEY);
|
||||
L2ScrollMessenger l2ScrollMessenger = new L2ScrollMessenger(owner);
|
||||
if (L2_SCROLL_MESSENGER_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_SCROLL_MESSENGER_ADDR", address(L2_SCROLL_MESSENGER_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
logAddress("L2_SCROLL_MESSENGER_ADDR", address(l2ScrollMessenger));
|
||||
address owner = vm.addr(L2_DEPLOYER_PRIVATE_KEY);
|
||||
messenger = new L2ScrollMessenger(owner);
|
||||
|
||||
logAddress("L2_SCROLL_MESSENGER_ADDR", address(messenger));
|
||||
}
|
||||
|
||||
function deployTxFeeVault() internal {
|
||||
if (L2_TX_FEE_VAULT_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_TX_FEE_VAULT_ADDR", address(L2_TX_FEE_VAULT_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2TxFeeVault feeVault = new L2TxFeeVault(address(messenger), L1_TX_FEE_RECIPIENT_ADDR);
|
||||
|
||||
logAddress("L2_TX_FEE_VAULT_ADDR", address(feeVault));
|
||||
}
|
||||
|
||||
function deployProxyAdmin() internal {
|
||||
if (L2_PROXY_ADMIN_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_PROXY_ADMIN_ADDR", address(L2_PROXY_ADMIN_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
proxyAdmin = new ProxyAdmin();
|
||||
|
||||
logAddress("L2_PROXY_ADMIN_ADDR", address(proxyAdmin));
|
||||
}
|
||||
|
||||
function deployL2StandardERC20Gateway() internal {
|
||||
if (L2_STANDARD_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR", address(L2_STANDARD_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2StandardERC20Gateway impl = new L2StandardERC20Gateway();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(proxyAdmin), new bytes(0));
|
||||
|
||||
@@ -57,6 +101,11 @@ contract DeployL2BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployL2GatewayRouter() internal {
|
||||
if (L2_GATEWAY_ROUTER_PROXY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_GATEWAY_ROUTER_PROXY_ADDR", address(L2_GATEWAY_ROUTER_PROXY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2GatewayRouter impl = new L2GatewayRouter();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(proxyAdmin), new bytes(0));
|
||||
|
||||
@@ -65,6 +114,11 @@ contract DeployL2BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployScrollStandardERC20Factory() internal {
|
||||
if (L2_SCROLL_STANDARD_ERC20_FACTORY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR", address(L2_SCROLL_STANDARD_ERC20_FACTORY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
ScrollStandardERC20 tokenImpl = new ScrollStandardERC20();
|
||||
ScrollStandardERC20Factory scrollStandardERC20Factory = new ScrollStandardERC20Factory(address(tokenImpl));
|
||||
|
||||
@@ -73,6 +127,11 @@ contract DeployL2BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployL2CustomERC20Gateway() internal {
|
||||
if (L2_CUSTOM_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR", address(L2_CUSTOM_ERC20_GATEWAY_PROXY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2CustomERC20Gateway impl = new L2CustomERC20Gateway();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(proxyAdmin), new bytes(0));
|
||||
|
||||
@@ -81,6 +140,11 @@ contract DeployL2BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployL2ERC721Gateway() internal {
|
||||
if (L2_ERC721_GATEWAY_PROXY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_ERC721_GATEWAY_PROXY_ADDR", address(L2_ERC721_GATEWAY_PROXY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2ERC721Gateway impl = new L2ERC721Gateway();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(proxyAdmin), new bytes(0));
|
||||
|
||||
@@ -89,6 +153,11 @@ contract DeployL2BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployL2ERC1155Gateway() internal {
|
||||
if (L2_ERC1155_GATEWAY_PROXY_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_ERC1155_GATEWAY_PROXY_ADDR", address(L2_ERC1155_GATEWAY_PROXY_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
L2ERC1155Gateway impl = new L2ERC1155Gateway();
|
||||
TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(proxyAdmin), new bytes(0));
|
||||
|
||||
@@ -96,6 +165,18 @@ contract DeployL2BridgeContracts is Script {
|
||||
logAddress("L2_ERC1155_GATEWAY_PROXY_ADDR", address(proxy));
|
||||
}
|
||||
|
||||
function deployL2Whitelist() internal {
|
||||
if (L2_WHITELIST_PREDEPLOY_ADDR != address(0)) {
|
||||
logAddress("L2_WHITELIST_ADDR", address(L2_WHITELIST_PREDEPLOY_ADDR));
|
||||
return;
|
||||
}
|
||||
|
||||
address owner = vm.addr(L2_DEPLOYER_PRIVATE_KEY);
|
||||
Whitelist whitelist = new Whitelist(owner);
|
||||
|
||||
logAddress("L2_WHITELIST_ADDR", address(whitelist));
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
|
||||
@@ -3,11 +3,13 @@ pragma solidity ^0.8.10;
|
||||
|
||||
import { Script } from "forge-std/Script.sol";
|
||||
|
||||
import { L2ScrollMessenger } from "../../src/L2/L2ScrollMessenger.sol";
|
||||
import { L2CustomERC20Gateway } from "../../src/L2/gateways/L2CustomERC20Gateway.sol";
|
||||
import { L2ERC1155Gateway } from "../../src/L2/gateways/L2ERC1155Gateway.sol";
|
||||
import { L2ERC721Gateway } from "../../src/L2/gateways/L2ERC721Gateway.sol";
|
||||
import { L2GatewayRouter } from "../../src/L2/gateways/L2GatewayRouter.sol";
|
||||
import { L2StandardERC20Gateway } from "../../src/L2/gateways/L2StandardERC20Gateway.sol";
|
||||
import { Whitelist } from "../../src/L2/predeploys/Whitelist.sol";
|
||||
import { ScrollStandardERC20Factory } from "../../src/libraries/token/ScrollStandardERC20Factory.sol";
|
||||
|
||||
contract InitializeL2BridgeContracts is Script {
|
||||
@@ -20,12 +22,14 @@ contract InitializeL2BridgeContracts is Script {
|
||||
address L1_ERC1155_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ERC1155_GATEWAY_PROXY_ADDR");
|
||||
|
||||
address L2_SCROLL_MESSENGER_ADDR = vm.envAddress("L2_SCROLL_MESSENGER_ADDR");
|
||||
address L2_TX_FEE_VAULT_ADDR = vm.envAddress("L2_TX_FEE_VAULT_ADDR");
|
||||
address L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L2_GATEWAY_ROUTER_PROXY_ADDR = vm.envAddress("L2_GATEWAY_ROUTER_PROXY_ADDR");
|
||||
address L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR = vm.envAddress("L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR");
|
||||
address L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L2_ERC721_GATEWAY_PROXY_ADDR = vm.envAddress("L2_ERC721_GATEWAY_PROXY_ADDR");
|
||||
address L2_ERC1155_GATEWAY_PROXY_ADDR = vm.envAddress("L2_ERC1155_GATEWAY_PROXY_ADDR");
|
||||
address L2_WHITELIST_ADDR = vm.envAddress("L2_WHITELIST_ADDR");
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(deployerPrivateKey);
|
||||
@@ -69,6 +73,21 @@ contract InitializeL2BridgeContracts is Script {
|
||||
L2_SCROLL_MESSENGER_ADDR
|
||||
);
|
||||
|
||||
// whitelist contracts which can call sendMessage
|
||||
{
|
||||
address[] memory gateways = new address[](6);
|
||||
gateways[0] = L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR;
|
||||
gateways[1] = L2_GATEWAY_ROUTER_PROXY_ADDR;
|
||||
gateways[2] = L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR;
|
||||
gateways[3] = L2_ERC1155_GATEWAY_PROXY_ADDR;
|
||||
gateways[4] = L2_ERC721_GATEWAY_PROXY_ADDR;
|
||||
gateways[5] = L2_TX_FEE_VAULT_ADDR;
|
||||
Whitelist(L2_WHITELIST_ADDR).updateWhitelistStatus(gateways, true);
|
||||
}
|
||||
|
||||
// update whitelist contract for messenger
|
||||
L2ScrollMessenger(payable(L2_SCROLL_MESSENGER_ADDR)).updateWhitelist(L2_WHITELIST_ADDR);
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,12 +86,10 @@ contract L2ScrollMessenger is ScrollMessengerBase, OwnableBase, IL2ScrollMesseng
|
||||
uint256 _deadline,
|
||||
uint256 _nonce,
|
||||
bytes memory _message
|
||||
) external override {
|
||||
) external override onlyWhitelistedSender(msg.sender) {
|
||||
// anti reentrance
|
||||
require(xDomainMessageSender == ScrollConstants.DEFAULT_XDOMAIN_MESSAGE_SENDER, "already in execution");
|
||||
|
||||
// @todo only privileged accounts can call
|
||||
|
||||
// solhint-disable-next-line not-rely-on-time
|
||||
require(_deadline >= block.timestamp, "Message expired");
|
||||
|
||||
|
||||
14
contracts/src/L2/predeploys/L2TxFeeVault.sol
Normal file
14
contracts/src/L2/predeploys/L2TxFeeVault.sol
Normal file
@@ -0,0 +1,14 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { FeeVault } from "../../libraries/FeeVault.sol";
|
||||
|
||||
/// @title L2TxFeeVault
|
||||
/// @notice The `L2TxFeeVault` contract collects all L2 transaction fees and allows withdrawing these fees to a predefined L1 address.
|
||||
/// The minimum withdrawal amount is 10 ether.
|
||||
contract L2TxFeeVault is FeeVault {
|
||||
/// @param _messenger The address of L2ScrollMessenger.
|
||||
/// @param _recipient The fee recipient address on L1.
|
||||
constructor(address _messenger, address _recipient) FeeVault(_messenger, _recipient, 10 ether) {}
|
||||
}
|
||||
108
contracts/src/libraries/FeeVault.sol
Normal file
108
contracts/src/libraries/FeeVault.sol
Normal file
@@ -0,0 +1,108 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
// MIT License
|
||||
|
||||
// Copyright (c) 2022 Optimism
|
||||
// Copyright (c) 2022 Scroll
|
||||
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { IL2ScrollMessenger } from "../L2/IL2ScrollMessenger.sol";
|
||||
|
||||
/**
|
||||
* @title FeeVault
|
||||
* @notice The FeeVault contract contains the basic logic for the various different vault contracts
|
||||
* used to hold fee revenue generated by the L2 system.
|
||||
*/
|
||||
abstract contract FeeVault {
|
||||
/**
|
||||
* @notice Emits each time that a withdrawal occurs.
|
||||
*
|
||||
* @param value Amount that was withdrawn (in wei).
|
||||
* @param to Address that the funds were sent to.
|
||||
* @param from Address that triggered the withdrawal.
|
||||
*/
|
||||
event Withdrawal(uint256 value, address to, address from);
|
||||
|
||||
/**
|
||||
* @notice Minimum balance before a withdrawal can be triggered.
|
||||
*/
|
||||
uint256 public MIN_WITHDRAWAL_AMOUNT;
|
||||
|
||||
/**
|
||||
* @notice Scroll L2 messenger address.
|
||||
*/
|
||||
address public MESSENGER;
|
||||
|
||||
/**
|
||||
* @notice Wallet that will receive the fees on L1.
|
||||
*/
|
||||
address public RECIPIENT;
|
||||
|
||||
/**
|
||||
* @notice Total amount of wei processed by the contract.
|
||||
*/
|
||||
uint256 public totalProcessed;
|
||||
|
||||
/**
|
||||
* @param _recipient Wallet that will receive the fees on L1.
|
||||
* @param _minWithdrawalAmount Minimum balance before a withdrawal can be triggered.
|
||||
*/
|
||||
constructor(
|
||||
address _messenger,
|
||||
address _recipient,
|
||||
uint256 _minWithdrawalAmount
|
||||
) {
|
||||
MIN_WITHDRAWAL_AMOUNT = _minWithdrawalAmount;
|
||||
MESSENGER = _messenger;
|
||||
RECIPIENT = _recipient;
|
||||
}
|
||||
|
||||
/**
|
||||
* @notice Allow the contract to receive ETH.
|
||||
*/
|
||||
receive() external payable {}
|
||||
|
||||
/**
|
||||
* @notice Triggers a withdrawal of funds to the L1 fee wallet.
|
||||
*/
|
||||
function withdraw() external {
|
||||
uint256 value = address(this).balance;
|
||||
|
||||
require(
|
||||
value >= MIN_WITHDRAWAL_AMOUNT,
|
||||
"FeeVault: withdrawal amount must be greater than minimum withdrawal amount"
|
||||
);
|
||||
|
||||
unchecked {
|
||||
totalProcessed += value;
|
||||
}
|
||||
|
||||
emit Withdrawal(value, RECIPIENT, msg.sender);
|
||||
|
||||
IL2ScrollMessenger(MESSENGER).sendMessage{ value: value }(
|
||||
RECIPIENT,
|
||||
0, // no fee provided
|
||||
bytes(""), // no message (simple eth transfer)
|
||||
0 // _gasLimit is not used for eth transfers
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -337,7 +337,7 @@ library RollupVerifier {
|
||||
n.y[1] = uint256(13392588948715843804641432497768002650278120570034223513918757245338268106653);
|
||||
}
|
||||
|
||||
function get_wx_wg(uint256[] calldata proof, uint256[4] memory instances)
|
||||
function get_wx_wg(uint256[] calldata proof, uint256[6] memory instances)
|
||||
internal
|
||||
view
|
||||
returns (
|
||||
@@ -354,15 +354,15 @@ library RollupVerifier {
|
||||
|
||||
(t0, t1) = (
|
||||
ecc_mul(
|
||||
13911018583007884881416842514661274050567796652031922980888952067142200734890,
|
||||
6304656948134906299141761906515211516376236447819044970320185642735642777036,
|
||||
16273630658577275004922498653030603356133576819117084202553121866583118864964,
|
||||
6490159372778831696763963776713702553449715395136256408127406430701013586737,
|
||||
instances[0]
|
||||
)
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
10634526547038245645834822324032425487434811507756950001533785848774317018670,
|
||||
11025818855933089539342999945076144168100709119485154428833847826982360951459,
|
||||
21465583338900056601761668793508143213048509206826828900542864688378093593107,
|
||||
18916078441896187703473496284050716429170517783995157941513585201547834049281,
|
||||
instances[1],
|
||||
t0,
|
||||
t1
|
||||
@@ -370,23 +370,41 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
13485936455723319058155687139769502499697405985650416391707184524158646623799,
|
||||
16234009237501684544798205490615498675425737095147152991328466405207467143566,
|
||||
6343857336395576108841088300387244434710621968858839561085778033655098739860,
|
||||
8647137667680968494319179221347060255241434220013711910139382436020093396308,
|
||||
instances[2],
|
||||
t0,
|
||||
t1
|
||||
)
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
17609998990685530094209191702545036897101285294398654477281719279316619940710,
|
||||
7891327626892441842954365090016786852185025910332850053066512639794082797200,
|
||||
instances[3],
|
||||
t0,
|
||||
t1
|
||||
)
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
1271298011119556361067568041994358027954229594187408866479678256322993207430,
|
||||
16519855264988006509000373008036578681979317060055767197977112967887569978562,
|
||||
instances[4],
|
||||
t0,
|
||||
t1
|
||||
)
|
||||
);
|
||||
(m[0], m[1]) = (
|
||||
ecc_mul_add(
|
||||
21550585789286941025166870525096478397065943995678337623823808437877187678077,
|
||||
4447338868884713453743453617617291019986465683944733951178865127876671635659,
|
||||
instances[3],
|
||||
9106880861932848269529912338578777683259870408474914617967634470292361865683,
|
||||
3191458938194545761508145121615374474619318896841102235687991186359560600763,
|
||||
instances[5],
|
||||
t0,
|
||||
t1
|
||||
)
|
||||
);
|
||||
update_hash_scalar(7326291674247555594112707886804937707847188185923070866278273345303869756280, absorbing, 0);
|
||||
update_hash_scalar(16714713909008743871958519800387174981836263428094013165455393524274317552599, absorbing, 0);
|
||||
update_hash_point(m[0], m[1], absorbing, 2);
|
||||
for (t0 = 0; t0 <= 4; t0++) {
|
||||
update_hash_point(proof[0 + t0 * 2], proof[1 + t0 * 2], absorbing, 5 + t0 * 3);
|
||||
@@ -413,31 +431,31 @@ library RollupVerifier {
|
||||
update_hash_point(proof[137 + t0 * 2], proof[138 + t0 * 2], absorbing, 1 + t0 * 3);
|
||||
}
|
||||
m[8] = (squeeze_challenge(absorbing, 13));
|
||||
m[9] = (mulmod(m[6], 6143038923529407703646399695489445107254060255791852207908457597807435305312, q_mod));
|
||||
m[10] = (mulmod(m[6], 7358966525675286471217089135633860168646304224547606326237275077574224349359, q_mod));
|
||||
m[11] = (mulmod(m[6], 11377606117859914088982205826922132024839443553408109299929510653283289974216, q_mod));
|
||||
m[12] = (fr_pow(m[6], 33554432));
|
||||
m[9] = (mulmod(m[6], 13446667982376394161563610564587413125564757801019538732601045199901075958935, q_mod));
|
||||
m[10] = (mulmod(m[6], 16569469942529664681363945218228869388192121720036659574609237682362097667612, q_mod));
|
||||
m[11] = (mulmod(m[6], 14803907026430593724305438564799066516271154714737734572920456128449769927233, q_mod));
|
||||
m[12] = (fr_pow(m[6], 67108864));
|
||||
m[13] = (addmod(m[12], q_mod - 1, q_mod));
|
||||
m[14] = (mulmod(21888242219518804655518433051623070663413851959604507555939307129453691614729, m[13], q_mod));
|
||||
m[14] = (mulmod(21888242545679039938882419398440172875981108180010270949818755658014750055173, m[13], q_mod));
|
||||
t0 = (addmod(m[6], q_mod - 1, q_mod));
|
||||
m[14] = (fr_div(m[14], t0));
|
||||
m[15] = (mulmod(3814514741328848551622746860665626251343731549210296844380905280010844577811, m[13], q_mod));
|
||||
t0 = (addmod(m[6], q_mod - 11377606117859914088982205826922132024839443553408109299929510653283289974216, q_mod));
|
||||
m[15] = (mulmod(3495999257316610708652455694658595065970881061159015347599790211259094641512, m[13], q_mod));
|
||||
t0 = (addmod(m[6], q_mod - 14803907026430593724305438564799066516271154714737734572920456128449769927233, q_mod));
|
||||
m[15] = (fr_div(m[15], t0));
|
||||
m[16] = (mulmod(14167635312934689395373925807699824183296350635557349457928542208657273886961, m[13], q_mod));
|
||||
t0 = (addmod(m[6], q_mod - 17329448237240114492580865744088056414251735686965494637158808787419781175510, q_mod));
|
||||
m[16] = (mulmod(12851378806584061886934576302961450669946047974813165594039554733293326536714, m[13], q_mod));
|
||||
t0 = (addmod(m[6], q_mod - 11377606117859914088982205826922132024839443553408109299929510653283289974216, q_mod));
|
||||
m[16] = (fr_div(m[16], t0));
|
||||
m[17] = (mulmod(12609034248192017902501772617940356704925468750503023243291639149763830461639, m[13], q_mod));
|
||||
t0 = (addmod(m[6], q_mod - 16569469942529664681363945218228869388192121720036659574609237682362097667612, q_mod));
|
||||
m[17] = (mulmod(14638077285440018490948843142723135319134576188472316769433007423695824509066, m[13], q_mod));
|
||||
t0 = (addmod(m[6], q_mod - 3693565015985198455139889557180396682968596245011005461846595820698933079918, q_mod));
|
||||
m[17] = (fr_div(m[17], t0));
|
||||
m[18] = (mulmod(12805242257443675784492534138904933930037912868081131057088370227525924812579, m[13], q_mod));
|
||||
t0 = (addmod(m[6], q_mod - 9741553891420464328295280489650144566903017206473301385034033384879943874347, q_mod));
|
||||
m[18] = (mulmod(18027939092386982308810165776478549635922357517986691900813373197616541191289, m[13], q_mod));
|
||||
t0 = (addmod(m[6], q_mod - 17329448237240114492580865744088056414251735686965494637158808787419781175510, q_mod));
|
||||
m[18] = (fr_div(m[18], t0));
|
||||
m[19] = (mulmod(6559137297042406441428413756926584610543422337862324541665337888392460442551, m[13], q_mod));
|
||||
t0 = (addmod(m[6], q_mod - 5723528081196465413808013109680264505774289533922470433187916976440924869204, q_mod));
|
||||
m[19] = (mulmod(912591536032578604421866340844550116335029274442283291811906603256731601654, m[13], q_mod));
|
||||
t0 = (addmod(m[6], q_mod - 6047398202650739717314770882059679662647667807426525133977681644606291529311, q_mod));
|
||||
m[19] = (fr_div(m[19], t0));
|
||||
m[20] = (mulmod(14811589476322888753142612645486192973009181596950146578897598212834285850868, m[13], q_mod));
|
||||
t0 = (addmod(m[6], q_mod - 7358966525675286471217089135633860168646304224547606326237275077574224349359, q_mod));
|
||||
m[20] = (mulmod(17248638560015646562374089181598815896736916575459528793494921668169819478628, m[13], q_mod));
|
||||
t0 = (addmod(m[6], q_mod - 16569469942529664681363945218228869388192121720036659574609237682362097667612, q_mod));
|
||||
m[20] = (fr_div(m[20], t0));
|
||||
t0 = (addmod(m[15], m[16], q_mod));
|
||||
t0 = (addmod(t0, m[17], q_mod));
|
||||
@@ -724,8 +742,8 @@ library RollupVerifier {
|
||||
(t0, t1) = (ecc_mul_add_pm(m, proof, 1461486238301980199876269201563775120819706402602, t0, t1));
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
18701609130775737229348071043080155034023979562517390395403433088802478899758,
|
||||
15966955543930185772599298905781740007968379271659670990460125132276790404701,
|
||||
1166255827574633395469889753099263335112651429543747917860844891223509395230,
|
||||
18119530258797056675590474142263379269133137917926199526995010149706608452268,
|
||||
m[78],
|
||||
t0,
|
||||
t1
|
||||
@@ -733,8 +751,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
10391672869328159104536012527288890078475214572275421477472198141744100604180,
|
||||
16383182967525077486800851500412772270268328143041811261940514978333847876450,
|
||||
479654250230311733675045936187074887335076118790675548184957988765243051391,
|
||||
3100719863754926915077773261837642988281275398456491618898287285885297258973,
|
||||
m[77],
|
||||
t0,
|
||||
t1
|
||||
@@ -742,8 +760,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
1694121668121560366967381814358868176695875056710903754887787227675156636991,
|
||||
6288755472313871386012926867179622380057563139110460659328016508371672965822,
|
||||
3244117516185602927429536955777596704962143625995582449305913349309466588374,
|
||||
4949447249861524239830935874731901209583893161129086694779290040738731707868,
|
||||
m[76],
|
||||
t0,
|
||||
t1
|
||||
@@ -751,8 +769,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
8449090587209846475328734419746789925412190193479844231777165308243174237722,
|
||||
19620423218491500875965944829407986067794157844846402182805878618955604592848,
|
||||
14948547489533026990535642276664751166524290089518721217701084060838942037816,
|
||||
4158304819018152066924650590566402375351800342702049911667413813453648544913,
|
||||
m[75],
|
||||
t0,
|
||||
t1
|
||||
@@ -760,8 +778,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
5053208336959682582031156680199539869251745263409434673229644546747696847142,
|
||||
2515271708296970065769200367712058290268116287798438948140802173656220671206,
|
||||
12409839630787558779666051790740339639835641801241950167020910758875751567721,
|
||||
10190386726927990167988725115981898191213252554332296547744162818590468069671,
|
||||
m[74],
|
||||
t0,
|
||||
t1
|
||||
@@ -769,8 +787,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
14044565934581841113280816557133159251170886931106151374890478449607604267942,
|
||||
4516676687937794780030405510740994119381246893674971835541700695978704585552,
|
||||
17970998203939514710036667497443822563987440725661639935300105673829885028203,
|
||||
5681616020208389658397995048088678631695525787311431942560298329387592854586,
|
||||
m[73],
|
||||
t0,
|
||||
t1
|
||||
@@ -778,8 +796,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
8808629196631084710334110767449499515582902470045288549019060600095073238105,
|
||||
13294364470509711632739201553507258372326885785844949555702886281377427438475,
|
||||
5422170891120229182360564594866246906567981360038071999127508208070564034524,
|
||||
14722029885921976755274052080011416898514630484317773275415621146460924728182,
|
||||
m[72],
|
||||
t0,
|
||||
t1
|
||||
@@ -787,8 +805,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
5025513109896000321643874120256520860696240548707294083465215087271048364447,
|
||||
3512836639252013523316566987122028012000136443005216091303269685639094608348,
|
||||
3955318928206501525438681058758319558200398421433597349851235741670899388496,
|
||||
15892053452767975688653514510353871405466169306176036727161401156637227884251,
|
||||
m[71],
|
||||
t0,
|
||||
t1
|
||||
@@ -796,8 +814,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
20143075587083355112417414887372164250381042430441089145485481665404780784123,
|
||||
9674175910548207533970570126063643897609459066877075659644076646142886425503,
|
||||
18451207565454686459225553564649439057698581050443267052774483067774590965003,
|
||||
4419693978684087696088612463773850574955779922948673330581664932100506990694,
|
||||
m[70],
|
||||
t0,
|
||||
t1
|
||||
@@ -805,8 +823,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
15449875505347857882486479091299788291220259329814373554032711960946424724459,
|
||||
18962357525499685082729877436365914814836051345178637509857216081206536249101,
|
||||
847101878434221983907574308143360385944534458215526175646288607915875901481,
|
||||
2846353475656269162370753247605184679473264230467654203502980134120309217445,
|
||||
m[69],
|
||||
t0,
|
||||
t1
|
||||
@@ -814,8 +832,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
8808629196631084710334110767449499515582902470045288549019060600095073238105,
|
||||
13294364470509711632739201553507258372326885785844949555702886281377427438475,
|
||||
5422170891120229182360564594866246906567981360038071999127508208070564034524,
|
||||
14722029885921976755274052080011416898514630484317773275415621146460924728182,
|
||||
m[68],
|
||||
t0,
|
||||
t1
|
||||
@@ -823,8 +841,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
4919836553908828082540426444868776555669883964231731088484431671272015675682,
|
||||
2534996469663628472218664436969797350677809756735321673130157881813913441609,
|
||||
12355852135968866678343538084506414981897123075397230437920965961095525036339,
|
||||
19173350083521771086213125757940272853888577158427508914933730457941026326040,
|
||||
m[67],
|
||||
t0,
|
||||
t1
|
||||
@@ -832,8 +850,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
11667150339256836494926506499230187360957884531183800528342644917396989453992,
|
||||
15540782144062394272475578831064080588044323224200171932910650185556553066875,
|
||||
21537162186981550637121053147454964150809482185492418377558290311964245821909,
|
||||
2173324946696678910860567153502925685634606622474439126082176533839311460335,
|
||||
m[66],
|
||||
t0,
|
||||
t1
|
||||
@@ -841,8 +859,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
7298741378311576950839968993357330108079245118485170808123459961337830256312,
|
||||
10327561179499117619949936626306234488421661318541529469701192193684736307992,
|
||||
20702481083445183838662364419201395944400358423071711333544748994437443350157,
|
||||
21729036491728923882358088642589857779818948470983153549909552615176584955200,
|
||||
m[65],
|
||||
t0,
|
||||
t1
|
||||
@@ -850,8 +868,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
19156320437354843782276382482504062704637529342417677454208679985931193905144,
|
||||
12513036134308417802230431028731202760516379532825961661396005403922128650283,
|
||||
5211075648402252045446907842677410998750480902260529776286467677659191740672,
|
||||
17759936859541227097052484319437171023743724174885338509498798745592136568923,
|
||||
m[64],
|
||||
t0,
|
||||
t1
|
||||
@@ -859,8 +877,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
21344975294019301064497004820288763682448968861642019035490416932201272957274,
|
||||
10527619823264344893410550194287064640208153251186939130321425213582959780489,
|
||||
5685082624811934526131077036509066197941130699019907200139767495570575867807,
|
||||
9975752329518147542127949868789945608848626426600733728808879384778577859545,
|
||||
m[63],
|
||||
t0,
|
||||
t1
|
||||
@@ -868,8 +886,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
8972742415650205333409282370033440562593431348747288268814492203356823531160,
|
||||
8116706321112691122771049432546166822575953322170688547310064134261753771143,
|
||||
1845955600044282712468400114813806019045133083112296001842856684609288249746,
|
||||
6677624509889210837770197526955652810854887548330294041671470991988491766303,
|
||||
m[62],
|
||||
t0,
|
||||
t1
|
||||
@@ -877,8 +895,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
2245383788954722547301665173770198299224442299145553661157120655982065376923,
|
||||
21429627532145565836455474503387893562363999035988060101286707048187310790834,
|
||||
17721426954552427189787075605835833086212392642349293317822925006771731953198,
|
||||
10818582862561493154030196266254401851195091198556669943079029419869326006448,
|
||||
m[61],
|
||||
t0,
|
||||
t1
|
||||
@@ -886,8 +904,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
6631831869726773976361406817204839637256208337970281843457872807848960103655,
|
||||
9564029493986604546558813596663080644256762699468834511701525072767927949801,
|
||||
10224195420706066705577574946990328089867884648164309818089282930621493257750,
|
||||
3961164971057442035153270823831734824136501489880082889417523554417504868473,
|
||||
m[60],
|
||||
t0,
|
||||
t1
|
||||
@@ -895,8 +913,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
11480433023546787855799302686493624232665854025790899812568432142639901048711,
|
||||
19408335616099148180409133533838326787843523379558500985213116784449716389602,
|
||||
4155760488117491189818018229959225087159948854404593659816501566044290851616,
|
||||
7849169269773333823959590214273366557169699873629739076719523623811579483219,
|
||||
m[59],
|
||||
t0,
|
||||
t1
|
||||
@@ -904,8 +922,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
17119009547436104907589161251911916154539209413889810725547125453954285498068,
|
||||
16196009614025712805558792610177918739658373559330006740051047693948800191562,
|
||||
9303688548891777886487749234688027352493881691026887577351708905397127609597,
|
||||
15420408437274623857443274867832176492025874147466147921781316121716419230415,
|
||||
m[58],
|
||||
t0,
|
||||
t1
|
||||
@@ -913,8 +931,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
4833170740960210126662783488087087210159995687268566750051519788650425720369,
|
||||
14321097009933429277686973550787181101481482473464521566076287626133354519061,
|
||||
1713011977361327447402228333889074876456179272285913377605323580535155713105,
|
||||
17494574374943878587945090358233307058027002207479570017169918665020362475592,
|
||||
m[57],
|
||||
t0,
|
||||
t1
|
||||
@@ -922,8 +940,8 @@ library RollupVerifier {
|
||||
);
|
||||
(t0, t1) = (
|
||||
ecc_mul_add(
|
||||
18650010323993268535055713787599480879302828622769515272251129462854128226895,
|
||||
11244246887388549559894193327128701737108444364011850111062992666532968469107,
|
||||
688560977158667877997491129442687540611216305867558421257325952561991356422,
|
||||
1877117185103259325255107191485730322497880777053300656925558921917058739650,
|
||||
m[56],
|
||||
t0,
|
||||
t1
|
||||
@@ -937,12 +955,15 @@ library RollupVerifier {
|
||||
}
|
||||
|
||||
function verify(uint256[] calldata proof, uint256[] calldata target_circuit_final_pair) public view {
|
||||
uint256[4] memory instances;
|
||||
uint256[6] memory instances;
|
||||
instances[0] = target_circuit_final_pair[0] & ((1 << 136) - 1);
|
||||
instances[1] = (target_circuit_final_pair[0] >> 136) + ((target_circuit_final_pair[1] & 1) << 136);
|
||||
instances[2] = target_circuit_final_pair[2] & ((1 << 136) - 1);
|
||||
instances[3] = (target_circuit_final_pair[2] >> 136) + ((target_circuit_final_pair[3] & 1) << 136);
|
||||
|
||||
instances[4] = target_circuit_final_pair[4];
|
||||
instances[5] = target_circuit_final_pair[5];
|
||||
|
||||
uint256 x0 = 0;
|
||||
uint256 x1 = 0;
|
||||
uint256 y0 = 0;
|
||||
@@ -961,7 +982,7 @@ library RollupVerifier {
|
||||
g2_points[1] = get_verify_circuit_g2_n();
|
||||
|
||||
checked = pairing(g1_points, g2_points);
|
||||
require(checked, "verified failed");
|
||||
require(checked);
|
||||
|
||||
g1_points[0].x = target_circuit_final_pair[0];
|
||||
g1_points[0].y = target_circuit_final_pair[1];
|
||||
@@ -971,6 +992,6 @@ library RollupVerifier {
|
||||
g2_points[1] = get_target_circuit_g2_n();
|
||||
|
||||
checked = pairing(g1_points, g2_points);
|
||||
require(checked, "verified failed");
|
||||
require(checked);
|
||||
}
|
||||
}
|
||||
|
||||
43
contracts/src/test/L2TxFeeVault.t.sol
Normal file
43
contracts/src/test/L2TxFeeVault.t.sol
Normal file
@@ -0,0 +1,43 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import { DSTestPlus } from "solmate/test/utils/DSTestPlus.sol";
|
||||
|
||||
import { MockScrollMessenger } from "./mocks/MockScrollMessenger.sol";
|
||||
import { L2TxFeeVault } from "../L2/predeploys/L2TxFeeVault.sol";
|
||||
|
||||
contract L2TxFeeVaultTest is DSTestPlus {
|
||||
MockScrollMessenger private messenger;
|
||||
L2TxFeeVault private vault;
|
||||
|
||||
function setUp() public {
|
||||
messenger = new MockScrollMessenger();
|
||||
vault = new L2TxFeeVault(address(messenger), address(1));
|
||||
}
|
||||
|
||||
function testCantWithdrawBelowMinimum() public {
|
||||
hevm.deal(address(vault), 9 ether);
|
||||
hevm.expectRevert("FeeVault: withdrawal amount must be greater than minimum withdrawal amount");
|
||||
vault.withdraw();
|
||||
}
|
||||
|
||||
function testWithdrawOnce() public {
|
||||
hevm.deal(address(vault), 11 ether);
|
||||
vault.withdraw();
|
||||
assertEq(address(messenger).balance, 11 ether);
|
||||
assertEq(vault.totalProcessed(), 11 ether);
|
||||
}
|
||||
|
||||
function testWithdrawTwice() public {
|
||||
hevm.deal(address(vault), 11 ether);
|
||||
vault.withdraw();
|
||||
assertEq(address(messenger).balance, 11 ether);
|
||||
assertEq(vault.totalProcessed(), 11 ether);
|
||||
|
||||
hevm.deal(address(vault), 22 ether);
|
||||
vault.withdraw();
|
||||
assertEq(address(messenger).balance, 33 ether);
|
||||
assertEq(vault.totalProcessed(), 33 ether);
|
||||
}
|
||||
}
|
||||
@@ -15,7 +15,8 @@ test:
|
||||
|
||||
libzkp:
|
||||
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.a ../interface/
|
||||
cp -r ../common/libzkp/interface ./verifier/lib
|
||||
rm -rf ./verifier/lib && cp -r ../common/libzkp/interface ./verifier/lib
|
||||
find ../common | grep libzktrie.so | xargs -i cp {} ./verifier/lib/
|
||||
|
||||
coordinator: libzkp ## Builds the Coordinator instance.
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
|
||||
|
||||
File diff suppressed because one or more lines are too long
Binary file not shown.
@@ -103,7 +103,7 @@ func action(ctx *cli.Context) error {
|
||||
"%s:%d",
|
||||
ctx.String(wsListenAddrFlag.Name),
|
||||
ctx.Int(wsPortFlag.Name)),
|
||||
apis)
|
||||
apis, cfg.RollerManagerConfig.CompressionLevel)
|
||||
if err != nil {
|
||||
log.Crit("Could not start WS api", "error", err)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"roller_manager_config": {
|
||||
"compression_level": 9,
|
||||
"rollers_per_session": 1,
|
||||
"verifier_endpoint": "/tmp/verifier.sock",
|
||||
"collection_time": 180,
|
||||
"token_time_to_live": 60,
|
||||
"verifier": {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
// RollerManagerConfig loads sequencer configuration items.
|
||||
type RollerManagerConfig struct {
|
||||
CompressionLevel int `json:"compression_level,omitempty"`
|
||||
// asc or desc (default: asc)
|
||||
OrderSession string `json:"order_session,omitempty"`
|
||||
// The amount of rollers to pick per proof generation session.
|
||||
|
||||
@@ -5,7 +5,7 @@ go 1.18
|
||||
require (
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli/v2 v2.10.2
|
||||
golang.org/x/sync v0.1.0
|
||||
@@ -29,14 +29,14 @@ require (
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.3.1 // indirect
|
||||
github.com/scroll-tech/zktrie v0.4.3 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.4.0 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -349,11 +349,10 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
|
||||
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d h1:S4bEgTezJrqYmDfUSkp9Of0/lcglm4CTAWQHSnsn2HE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d/go.mod h1:OH4ZTAz6RM1IL0xcQ1zM6+Iy9s2vtcYqqwcEQdfHV7g=
|
||||
github.com/scroll-tech/zktrie v0.4.3 h1:RyhusIu8F8u5ITmzqZjkAwlL6jdC9TK9i6tfuJoZcpk=
|
||||
github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
@@ -424,8 +423,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -540,8 +539,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -553,7 +552,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
|
||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
||||
@@ -65,7 +65,6 @@ type Manager struct {
|
||||
// A map containing proof failed or verify failed proof.
|
||||
rollerPool cmap.ConcurrentMap
|
||||
|
||||
// TODO: once put into use, should add to graceful restart.
|
||||
failedSessionInfos map[string]*SessionInfo
|
||||
|
||||
// A direct connection to the Halo2 verifier, used to verify
|
||||
@@ -326,54 +325,74 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
|
||||
// CollectProofs collects proofs corresponding to a proof generation session.
|
||||
func (m *Manager) CollectProofs(sess *session) {
|
||||
select {
|
||||
case <-time.After(time.Duration(m.cfg.CollectionTime) * time.Minute):
|
||||
m.mu.Lock()
|
||||
defer func() {
|
||||
delete(m.sessions, sess.info.ID)
|
||||
m.mu.Unlock()
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-time.After(time.Duration(m.cfg.CollectionTime) * time.Minute):
|
||||
m.mu.Lock()
|
||||
defer func() {
|
||||
// TODO: remove the clean-up, rollers report healthy status.
|
||||
for pk := range sess.info.Rollers {
|
||||
m.freeTaskIDForRoller(pk, sess.info.ID)
|
||||
}
|
||||
delete(m.sessions, sess.info.ID)
|
||||
m.mu.Unlock()
|
||||
}()
|
||||
|
||||
// Pick a random winner.
|
||||
// First, round up the keys that actually sent in a valid proof.
|
||||
var participatingRollers []string
|
||||
for pk, roller := range sess.info.Rollers {
|
||||
if roller.Status == orm.RollerProofValid {
|
||||
participatingRollers = append(participatingRollers, pk)
|
||||
// Pick a random winner.
|
||||
// First, round up the keys that actually sent in a valid proof.
|
||||
var participatingRollers []string
|
||||
for pk, roller := range sess.info.Rollers {
|
||||
if roller.Status == orm.RollerProofValid {
|
||||
participatingRollers = append(participatingRollers, pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ensure we got at least one proof before selecting a winner.
|
||||
if len(participatingRollers) == 0 {
|
||||
// record failed session.
|
||||
errMsg := "proof generation session ended without receiving any valid proofs"
|
||||
m.addFailedSession(sess, errMsg)
|
||||
log.Warn(errMsg, "session id", sess.info.ID)
|
||||
// Set status as skipped.
|
||||
// Note that this is only a workaround for testnet here.
|
||||
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
|
||||
// so as to re-distribute the task in the future
|
||||
if err := m.orm.UpdateProvingStatus(sess.info.ID, orm.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", sess.info.ID, "err", err)
|
||||
// Ensure we got at least one proof before selecting a winner.
|
||||
if len(participatingRollers) == 0 {
|
||||
// record failed session.
|
||||
errMsg := "proof generation session ended without receiving any valid proofs"
|
||||
m.addFailedSession(sess, errMsg)
|
||||
log.Warn(errMsg, "session id", sess.info.ID)
|
||||
// Set status as skipped.
|
||||
// Note that this is only a workaround for testnet here.
|
||||
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
|
||||
// so as to re-distribute the task in the future
|
||||
if err := m.orm.UpdateProvingStatus(sess.info.ID, orm.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", sess.info.ID, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Now, select a random index for this slice.
|
||||
randIndex := mathrand.Intn(len(participatingRollers))
|
||||
_ = participatingRollers[randIndex]
|
||||
// TODO: reward winner
|
||||
return
|
||||
}
|
||||
|
||||
// Now, select a random index for this slice.
|
||||
randIndex := mathrand.Intn(len(participatingRollers))
|
||||
_ = participatingRollers[randIndex]
|
||||
// TODO: reward winner
|
||||
return
|
||||
|
||||
case ret := <-sess.finishChan:
|
||||
m.mu.Lock()
|
||||
sess.info.Rollers[ret.pk].Status = ret.status
|
||||
m.mu.Unlock()
|
||||
if err := m.orm.SetSessionInfo(sess.info); err != nil {
|
||||
log.Error("db set session info fail", "pk", ret.pk, "error", err)
|
||||
case ret := <-sess.finishChan:
|
||||
m.mu.Lock()
|
||||
sess.info.Rollers[ret.pk].Status = ret.status
|
||||
if m.isSessionFailed(sess.info) {
|
||||
if err := m.orm.UpdateProvingStatus(ret.id, orm.ProvingTaskFailed); err != nil {
|
||||
log.Error("failed to update proving_status as failed", "msg.ID", ret.id, "error", err)
|
||||
}
|
||||
}
|
||||
if err := m.orm.SetSessionInfo(sess.info); err != nil {
|
||||
log.Error("db set session info fail", "pk", ret.pk, "error", err)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) isSessionFailed(info *orm.SessionInfo) bool {
|
||||
for _, roller := range info.Rollers {
|
||||
if roller.Status != orm.RollerProofInvalid {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// APIs collect API services.
|
||||
func (m *Manager) APIs() []rpc.API {
|
||||
return []rpc.API{
|
||||
@@ -435,6 +454,7 @@ func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) (success boo
|
||||
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
|
||||
roller := m.selectRoller()
|
||||
if roller == nil {
|
||||
log.Info("selectRoller returns nil")
|
||||
break
|
||||
}
|
||||
log.Info("roller is picked", "session id", task.ID, "name", roller.Name, "public key", roller.PublicKey)
|
||||
@@ -452,7 +472,7 @@ func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) (success boo
|
||||
}
|
||||
|
||||
// Update session proving status as assigned.
|
||||
if err := m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskAssigned); err != nil {
|
||||
if err = m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskAssigned); err != nil {
|
||||
log.Error("failed to update task status", "id", task.ID, "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package coordinator_test
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
@@ -64,6 +65,8 @@ func TestApis(t *testing.T) {
|
||||
t.Run("TestHandshake", testHandshake)
|
||||
t.Run("TestFailedHandshake", testFailedHandshake)
|
||||
t.Run("TestSeveralConnections", testSeveralConnections)
|
||||
t.Run("TestValidProof", testValidProof)
|
||||
t.Run("TestInvalidProof", testInvalidProof)
|
||||
t.Run("TestIdleRollerSelection", testIdleRollerSelection)
|
||||
// TODO: Restart roller alone when received task, can add this test case in integration-test.
|
||||
//t.Run("TestRollerReconnect", testRollerReconnect)
|
||||
@@ -84,7 +87,7 @@ func testHandshake(t *testing.T) {
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
@@ -105,7 +108,7 @@ func testFailedHandshake(t *testing.T) {
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
@@ -171,7 +174,7 @@ func testSeveralConnections(t *testing.T) {
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
@@ -215,6 +218,124 @@ func testSeveralConnections(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
func testValidProof(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 3, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
// create mock rollers.
|
||||
rollers := make([]*mockRoller, 3)
|
||||
for i := 0; i < len(rollers); i++ {
|
||||
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
|
||||
// only roller 0 submits valid proof.
|
||||
rollers[i].waitTaskAndSendProof(t, time.Second, false, i == 0)
|
||||
}
|
||||
defer func() {
|
||||
// close connection
|
||||
for _, roller := range rollers {
|
||||
roller.close()
|
||||
}
|
||||
}()
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers())
|
||||
|
||||
var ids = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
for i := range ids {
|
||||
ID, err := l2db.NewBatchInDBTx(dbTx, &orm.BlockInfo{Number: uint64(i)}, &orm.BlockInfo{Number: uint64(i)}, "0f", 1, 194676)
|
||||
assert.NoError(t, err)
|
||||
ids[i] = ID
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
tickStop = time.Tick(10 * time.Second)
|
||||
)
|
||||
for len(ids) > 0 {
|
||||
select {
|
||||
case <-tick:
|
||||
status, err := l2db.GetProvingStatusByID(ids[0])
|
||||
assert.NoError(t, err)
|
||||
if status == orm.ProvingTaskVerified {
|
||||
ids = ids[1:]
|
||||
}
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testInvalidProof(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
l2db, err := database.NewOrmFactory(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
|
||||
defer l2db.Close()
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 3, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
// create mock rollers.
|
||||
rollers := make([]*mockRoller, 3)
|
||||
for i := 0; i < len(rollers); i++ {
|
||||
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
|
||||
rollers[i].waitTaskAndSendProof(t, time.Second, false, false)
|
||||
}
|
||||
defer func() {
|
||||
// close connection
|
||||
for _, roller := range rollers {
|
||||
roller.close()
|
||||
}
|
||||
}()
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers())
|
||||
|
||||
var ids = make([]string, 1)
|
||||
dbTx, err := l2db.Beginx()
|
||||
assert.NoError(t, err)
|
||||
for i := range ids {
|
||||
ID, err := l2db.NewBatchInDBTx(dbTx, &orm.BlockInfo{Number: uint64(i)}, &orm.BlockInfo{Number: uint64(i)}, "0f", 1, 194676)
|
||||
assert.NoError(t, err)
|
||||
ids[i] = ID
|
||||
}
|
||||
assert.NoError(t, dbTx.Commit())
|
||||
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
tickStop = time.Tick(10 * time.Second)
|
||||
)
|
||||
for len(ids) > 0 {
|
||||
select {
|
||||
case <-tick:
|
||||
status, err := l2db.GetProvingStatusByID(ids[0])
|
||||
assert.NoError(t, err)
|
||||
if status == orm.ProvingTaskFailed {
|
||||
ids = ids[1:]
|
||||
}
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testIdleRollerSelection(t *testing.T) {
|
||||
// Create db handler and reset db.
|
||||
@@ -225,7 +346,7 @@ func testIdleRollerSelection(t *testing.T) {
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
@@ -235,7 +356,7 @@ func testIdleRollerSelection(t *testing.T) {
|
||||
rollers := make([]*mockRoller, 20)
|
||||
for i := 0; i < len(rollers); i++ {
|
||||
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
|
||||
rollers[i].waitTaskAndSendProof(t, time.Second, false)
|
||||
rollers[i].waitTaskAndSendProof(t, time.Second, false, true)
|
||||
}
|
||||
defer func() {
|
||||
// close connection
|
||||
@@ -294,12 +415,12 @@ func testGracefulRestart(t *testing.T) {
|
||||
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, wsURL)
|
||||
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
|
||||
|
||||
// create mock roller
|
||||
roller := newMockRoller(t, "roller_test", wsURL)
|
||||
// wait 10 seconds, coordinator restarts before roller submits proof
|
||||
roller.waitTaskAndSendProof(t, 10*time.Second, false)
|
||||
roller.waitTaskAndSendProof(t, 10*time.Second, false, true)
|
||||
|
||||
// wait for coordinator to dispatch task
|
||||
<-time.After(5 * time.Second)
|
||||
@@ -311,7 +432,7 @@ func testGracefulRestart(t *testing.T) {
|
||||
rollerManager.Stop()
|
||||
|
||||
// Setup new coordinator and ws server.
|
||||
newRollerManager, newHandler := setupCoordinator(t, cfg.DBConfig, wsURL)
|
||||
newRollerManager, newHandler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
|
||||
defer func() {
|
||||
newHandler.Shutdown(context.Background())
|
||||
newRollerManager.Stop()
|
||||
@@ -329,7 +450,7 @@ func testGracefulRestart(t *testing.T) {
|
||||
}
|
||||
|
||||
// will overwrite the roller client for `SubmitProof`
|
||||
roller.waitTaskAndSendProof(t, time.Millisecond*500, true)
|
||||
roller.waitTaskAndSendProof(t, time.Millisecond*500, true, true)
|
||||
defer roller.close()
|
||||
|
||||
// verify proof status
|
||||
@@ -355,13 +476,13 @@ func testGracefulRestart(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func setupCoordinator(t *testing.T, dbCfg *database.DBConfig, wsURL string) (rollerManager *coordinator.Manager, handler *http.Server) {
|
||||
func setupCoordinator(t *testing.T, dbCfg *database.DBConfig, rollersPerSession uint8, wsURL string) (rollerManager *coordinator.Manager, handler *http.Server) {
|
||||
// Get db handler.
|
||||
db, err := database.NewOrmFactory(dbCfg)
|
||||
assert.True(t, assert.NoError(t, err), "failed to get db handler.")
|
||||
|
||||
rollerManager, err = coordinator.New(context.Background(), &coordinator_config.RollerManagerConfig{
|
||||
RollersPerSession: 1,
|
||||
RollersPerSession: rollersPerSession,
|
||||
Verifier: &coordinator_config.VerifierConfig{MockMode: true},
|
||||
CollectionTime: 1,
|
||||
TokenTimeToLive: 5,
|
||||
@@ -370,7 +491,7 @@ func setupCoordinator(t *testing.T, dbCfg *database.DBConfig, wsURL string) (rol
|
||||
assert.NoError(t, rollerManager.Start())
|
||||
|
||||
// start ws service
|
||||
handler, _, err = utils.StartWSEndpoint(strings.Split(wsURL, "//")[1], rollerManager.APIs())
|
||||
handler, _, err = utils.StartWSEndpoint(strings.Split(wsURL, "//")[1], rollerManager.APIs(), flate.NoCompression)
|
||||
assert.NoError(t, err)
|
||||
|
||||
return rollerManager, handler
|
||||
@@ -448,7 +569,7 @@ func (r *mockRoller) releaseTasks() {
|
||||
}
|
||||
|
||||
// Wait for the proof task, after receiving the proof task, roller submits proof after proofTime secs.
|
||||
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool) {
|
||||
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool, validProof bool) {
|
||||
// simulating the case that the roller first disconnects and then reconnects to the coordinator
|
||||
// the Subscription and its `Err()` channel will be closed, and the coordinator will `freeRoller()`
|
||||
if reconnect {
|
||||
@@ -464,10 +585,10 @@ func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration,
|
||||
r.releaseTasks()
|
||||
|
||||
r.stopCh = make(chan struct{})
|
||||
go r.loop(t, r.client, proofTime, r.stopCh)
|
||||
go r.loop(t, r.client, proofTime, validProof, r.stopCh)
|
||||
}
|
||||
|
||||
func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.Duration, stopCh chan struct{}) {
|
||||
func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.Duration, validProof bool, stopCh chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case task := <-r.taskCh:
|
||||
@@ -485,6 +606,9 @@ func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.D
|
||||
Proof: &message.AggProof{},
|
||||
},
|
||||
}
|
||||
if !validProof {
|
||||
proof.Status = message.StatusProofError
|
||||
}
|
||||
assert.NoError(t, proof.Sign(r.privKey))
|
||||
ok, err := client.SubmitProof(context.Background(), proof)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -106,17 +106,18 @@ func (m *Manager) freeTaskIDForRoller(pk string, id string) {
|
||||
}
|
||||
|
||||
// GetNumberOfIdleRollers return the count of idle rollers.
|
||||
func (m *Manager) GetNumberOfIdleRollers() int {
|
||||
pubkeys := m.rollerPool.Keys()
|
||||
for i := 0; i < len(pubkeys); i++ {
|
||||
if val, ok := m.rollerPool.Get(pubkeys[i]); ok {
|
||||
func (m *Manager) GetNumberOfIdleRollers() (count int) {
|
||||
for i, pk := range m.rollerPool.Keys() {
|
||||
if val, ok := m.rollerPool.Get(pk); ok {
|
||||
r := val.(*rollerNode)
|
||||
if r.TaskIDs.Count() > 0 {
|
||||
pubkeys[i], pubkeys = pubkeys[len(pubkeys)-1], pubkeys[:len(pubkeys)-1]
|
||||
if r.TaskIDs.Count() == 0 {
|
||||
count++
|
||||
}
|
||||
} else {
|
||||
log.Error("rollerPool Get fail", "pk", pk, "idx", i, "pk len", pk)
|
||||
}
|
||||
}
|
||||
return len(pubkeys)
|
||||
return count
|
||||
}
|
||||
|
||||
func (m *Manager) selectRoller() *rollerNode {
|
||||
@@ -128,6 +129,8 @@ func (m *Manager) selectRoller() *rollerNode {
|
||||
if r.TaskIDs.Count() == 0 {
|
||||
return r
|
||||
}
|
||||
} else {
|
||||
log.Error("rollerPool Get fail", "pk", pubkeys[idx.Int64()], "idx", idx.Int64(), "pk len", len(pubkeys))
|
||||
}
|
||||
pubkeys[idx.Int64()], pubkeys = pubkeys[0], pubkeys[1:]
|
||||
}
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
package verifier
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: ${SRCDIR}/lib/libzkp.a -lm -ldl
|
||||
#cgo gpu LDFLAGS: ${SRCDIR}/lib/libzkp.a -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart
|
||||
#cgo LDFLAGS: ${SRCDIR}/lib/libzkp.a -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: ${SRCDIR}/lib/libzkp.a -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -L${SRCDIR}/lib/ -lcudart -Wl,-rpath=${SRCDIR}/lib
|
||||
#include <stdlib.h>
|
||||
#include "./lib/libzkp.h"
|
||||
*/
|
||||
|
||||
@@ -16,17 +16,23 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
paramsPath = "../assets/test_params"
|
||||
aggVkPath = "../assets/agg_vk"
|
||||
proofPath = "../assets/agg_proof"
|
||||
)
|
||||
|
||||
func TestFFI(t *testing.T) {
|
||||
as := assert.New(t)
|
||||
cfg := &config.VerifierConfig{
|
||||
MockMode: false,
|
||||
ParamsPath: "../assets/test_params",
|
||||
AggVkPath: "../assets/agg_vk",
|
||||
ParamsPath: paramsPath,
|
||||
AggVkPath: aggVkPath,
|
||||
}
|
||||
v, err := verifier.NewVerifier(cfg)
|
||||
as.NoError(err)
|
||||
|
||||
f, err := os.Open("../assets/agg_proof")
|
||||
f, err := os.Open(proofPath)
|
||||
as.NoError(err)
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
@@ -7,7 +7,7 @@ require (
|
||||
github.com/lib/pq v1.10.6
|
||||
github.com/mattn/go-sqlite3 v1.14.14
|
||||
github.com/pressly/goose/v3 v3.7.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli/v2 v2.10.2
|
||||
)
|
||||
@@ -24,12 +24,12 @@ require (
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.3.1 // indirect
|
||||
github.com/scroll-tech/zktrie v0.4.3 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.4.0 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/tools v0.3.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -339,11 +339,10 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
|
||||
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d h1:S4bEgTezJrqYmDfUSkp9Of0/lcglm4CTAWQHSnsn2HE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d/go.mod h1:OH4ZTAz6RM1IL0xcQ1zM6+Iy9s2vtcYqqwcEQdfHV7g=
|
||||
github.com/scroll-tech/zktrie v0.4.3 h1:RyhusIu8F8u5ITmzqZjkAwlL6jdC9TK9i6tfuJoZcpk=
|
||||
github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
@@ -412,8 +411,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -526,8 +525,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
|
||||
@@ -20,7 +20,7 @@ create table l1_message
|
||||
);
|
||||
|
||||
comment
|
||||
on column l1_message.status is 'undefined, pending, submitted, confirmed';
|
||||
on column l1_message.status is 'undefined, pending, submitted, confirmed, failed, expired';
|
||||
|
||||
create unique index l1_message_hash_uindex
|
||||
on l1_message (msg_hash);
|
||||
|
||||
@@ -21,7 +21,7 @@ create table l2_message
|
||||
);
|
||||
|
||||
comment
|
||||
on column l2_message.status is 'undefined, pending, submitted, confirmed';
|
||||
on column l2_message.status is 'undefined, pending, submitted, confirmed, failed, expired';
|
||||
|
||||
create unique index l2_message_hash_uindex
|
||||
on l2_message (msg_hash);
|
||||
|
||||
@@ -159,11 +159,11 @@ func (o *blockBatchOrm) GetVerifiedProofAndInstanceByID(id string) ([]byte, []by
|
||||
return proof, instance, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) UpdateProofByID(ctx context.Context, id string, proof, instance_commitments []byte, proofTimeSec uint64) error {
|
||||
func (o *blockBatchOrm) UpdateProofByID(ctx context.Context, id string, proof, instanceCommitments []byte, proofTimeSec uint64) error {
|
||||
db := o.db
|
||||
if _, err := db.ExecContext(ctx,
|
||||
db.Rebind(`UPDATE block_batch set proof = ?, instance_commitments = ?, proof_time_sec = ? where id = ?;`),
|
||||
proof, instance_commitments, proofTimeSec, id,
|
||||
proof, instanceCommitments, proofTimeSec, id,
|
||||
); err != nil {
|
||||
log.Error("failed to update proof", "err", err)
|
||||
}
|
||||
@@ -236,8 +236,8 @@ func (o *blockBatchOrm) BatchRecordExist(id string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetPendingBatches() ([]string, error) {
|
||||
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC`, RollupPending)
|
||||
func (o *blockBatchOrm) GetPendingBatches(limit uint64) ([]string, error) {
|
||||
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC LIMIT $2`, RollupPending, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -260,7 +260,7 @@ func (o *blockBatchOrm) GetPendingBatches() ([]string, error) {
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetLatestFinalizedBatch() (*BlockBatch, error) {
|
||||
row := o.db.QueryRowx(`SELECT * FROM block_batch WHERE rollup_status = $1 OR rollup_status = $2 ORDER BY index DESC;`, RollupFinalized, RollupFinalizationSkipped)
|
||||
row := o.db.QueryRowx(`select * from block_batch where index = (select max(index) from block_batch where rollup_status = $1);`, RollupFinalized)
|
||||
batch := &BlockBatch{}
|
||||
if err := row.StructScan(batch); err != nil {
|
||||
return nil, err
|
||||
@@ -268,8 +268,8 @@ func (o *blockBatchOrm) GetLatestFinalizedBatch() (*BlockBatch, error) {
|
||||
return batch, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetCommittedBatches() ([]string, error) {
|
||||
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC`, RollupCommitted)
|
||||
func (o *blockBatchOrm) GetCommittedBatches(limit uint64) ([]string, error) {
|
||||
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC LIMIT $2`, RollupCommitted, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -305,7 +305,7 @@ func (o *blockBatchOrm) GetRollupStatusByIDList(ids []string) ([]RollupStatus, e
|
||||
return make([]RollupStatus, 0), nil
|
||||
}
|
||||
|
||||
query, args, err := sqlx.In("SELECT rollup_status FROM block_batch WHERE id IN (?);", ids)
|
||||
query, args, err := sqlx.In("SELECT id, rollup_status FROM block_batch WHERE id IN (?);", ids)
|
||||
if err != nil {
|
||||
return make([]RollupStatus, 0), err
|
||||
}
|
||||
@@ -314,17 +314,24 @@ func (o *blockBatchOrm) GetRollupStatusByIDList(ids []string) ([]RollupStatus, e
|
||||
|
||||
rows, err := o.db.Query(query, args...)
|
||||
|
||||
var statuses []RollupStatus
|
||||
statusMap := make(map[string]RollupStatus)
|
||||
for rows.Next() {
|
||||
var id string
|
||||
var status RollupStatus
|
||||
if err = rows.Scan(&status); err != nil {
|
||||
if err = rows.Scan(&id, &status); err != nil {
|
||||
break
|
||||
}
|
||||
statuses = append(statuses, status)
|
||||
statusMap[id] = status
|
||||
}
|
||||
var statuses []RollupStatus
|
||||
if err != nil {
|
||||
return statuses, err
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
statuses = append(statuses, statusMap[id])
|
||||
}
|
||||
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
@@ -360,24 +367,24 @@ func (o *blockBatchOrm) UpdateRollupStatus(ctx context.Context, id string, statu
|
||||
}
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) UpdateCommitTxHashAndRollupStatus(ctx context.Context, id string, commit_tx_hash string, status RollupStatus) error {
|
||||
func (o *blockBatchOrm) UpdateCommitTxHashAndRollupStatus(ctx context.Context, id string, commitTxHash string, status RollupStatus) error {
|
||||
switch status {
|
||||
case RollupCommitted:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ?, committed_at = ? where id = ?;"), commit_tx_hash, status, time.Now(), id)
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ?, committed_at = ? where id = ?;"), commitTxHash, status, time.Now(), id)
|
||||
return err
|
||||
default:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ? where id = ?;"), commit_tx_hash, status, id)
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ? where id = ?;"), commitTxHash, status, id)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, id string, finalize_tx_hash string, status RollupStatus) error {
|
||||
func (o *blockBatchOrm) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, id string, finalizeTxHash string, status RollupStatus) error {
|
||||
switch status {
|
||||
case RollupFinalized:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ?, finalized_at = ? where id = ?;"), finalize_tx_hash, status, time.Now(), id)
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ?, finalized_at = ? where id = ?;"), finalizeTxHash, status, time.Now(), id)
|
||||
return err
|
||||
default:
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ? where id = ?;"), finalize_tx_hash, status, id)
|
||||
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ? where id = ?;"), finalizeTxHash, status, id)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -399,3 +406,17 @@ func (o *blockBatchOrm) GetAssignedBatchIDs() ([]string, error) {
|
||||
|
||||
return ids, rows.Close()
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) UpdateSkippedBatches() (int64, error) {
|
||||
res, err := o.db.Exec(o.db.Rebind("update block_batch set rollup_status = ? where (proving_status = ? or proving_status = ?) and rollup_status = ?;"), RollupFinalizationSkipped, ProvingTaskSkipped, ProvingTaskFailed, RollupCommitted)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
count, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
@@ -154,7 +154,7 @@ func (o *blockTraceOrm) GetHashByNumber(number uint64) (*common.Hash, error) {
|
||||
func (o *blockTraceOrm) InsertBlockTraces(blockTraces []*types.BlockTrace) error {
|
||||
traceMaps := make([]map[string]interface{}, len(blockTraces))
|
||||
for i, trace := range blockTraces {
|
||||
number, hash, tx_num, mtime := trace.Header.Number.Int64(),
|
||||
number, hash, txNum, mtime := trace.Header.Number.Int64(),
|
||||
trace.Header.Hash().String(),
|
||||
len(trace.Transactions),
|
||||
trace.Header.Time
|
||||
@@ -174,7 +174,7 @@ func (o *blockTraceOrm) InsertBlockTraces(blockTraces []*types.BlockTrace) error
|
||||
"hash": hash,
|
||||
"parent_hash": trace.Header.ParentHash.String(),
|
||||
"trace": string(data),
|
||||
"tx_num": tx_num,
|
||||
"tx_num": txNum,
|
||||
"gas_used": gasCost,
|
||||
"block_timestamp": mtime,
|
||||
}
|
||||
@@ -186,8 +186,8 @@ func (o *blockTraceOrm) InsertBlockTraces(blockTraces []*types.BlockTrace) error
|
||||
return err
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) DeleteTracesByBatchID(batch_id string) error {
|
||||
if _, err := o.db.Exec(o.db.Rebind("update block_trace set trace = ? where batch_id = ?;"), "{}", batch_id); err != nil {
|
||||
func (o *blockTraceOrm) DeleteTracesByBatchID(batchID string) error {
|
||||
if _, err := o.db.Exec(o.db.Rebind("update block_trace set trace = ? where batch_id = ?;"), "{}", batchID); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -28,6 +28,9 @@ const (
|
||||
|
||||
// MsgFailed represents the from_layer message status is failed
|
||||
MsgFailed
|
||||
|
||||
// MsgExpired represents the from_layer message status is expired
|
||||
MsgExpired
|
||||
)
|
||||
|
||||
// L1Message is structure of stored layer1 bridge message
|
||||
@@ -118,7 +121,7 @@ type BlockTraceOrm interface {
|
||||
GetBlockTracesLatestHeight() (int64, error)
|
||||
GetBlockTraces(fields map[string]interface{}, args ...string) ([]*types.BlockTrace, error)
|
||||
GetBlockInfos(fields map[string]interface{}, args ...string) ([]*BlockInfo, error)
|
||||
// add `GetUnbatchedBlocks` because `GetBlockInfos` cannot support query "batch_id is NULL"
|
||||
// GetUnbatchedBlocks add `GetUnbatchedBlocks` because `GetBlockInfos` cannot support query "batch_id is NULL"
|
||||
GetUnbatchedBlocks(fields map[string]interface{}, args ...string) ([]*BlockInfo, error)
|
||||
GetHashByNumber(number uint64) (*common.Hash, error)
|
||||
DeleteTracesByBatchID(batchID string) error
|
||||
@@ -137,35 +140,39 @@ type BlockBatchOrm interface {
|
||||
GetBlockBatches(fields map[string]interface{}, args ...string) ([]*BlockBatch, error)
|
||||
GetProvingStatusByID(id string) (ProvingStatus, error)
|
||||
GetVerifiedProofAndInstanceByID(id string) ([]byte, []byte, error)
|
||||
UpdateProofByID(ctx context.Context, id string, proof, instance_commitments []byte, proofTimeSec uint64) error
|
||||
UpdateProofByID(ctx context.Context, id string, proof, instanceCommitments []byte, proofTimeSec uint64) error
|
||||
UpdateProvingStatus(id string, status ProvingStatus) error
|
||||
ResetProvingStatusFor(before ProvingStatus) error
|
||||
NewBatchInDBTx(dbTx *sqlx.Tx, startBlock *BlockInfo, endBlock *BlockInfo, parentHash string, totalTxNum uint64, gasUsed uint64) (string, error)
|
||||
BatchRecordExist(id string) (bool, error)
|
||||
GetPendingBatches() ([]string, error)
|
||||
GetCommittedBatches() ([]string, error)
|
||||
GetPendingBatches(limit uint64) ([]string, error)
|
||||
GetCommittedBatches(limit uint64) ([]string, error)
|
||||
GetRollupStatus(id string) (RollupStatus, error)
|
||||
GetRollupStatusByIDList(ids []string) ([]RollupStatus, error)
|
||||
GetCommitTxHash(id string) (sql.NullString, error)
|
||||
GetFinalizeTxHash(id string) (sql.NullString, error)
|
||||
GetLatestFinalizedBatch() (*BlockBatch, error)
|
||||
UpdateRollupStatus(ctx context.Context, id string, status RollupStatus) error
|
||||
UpdateCommitTxHashAndRollupStatus(ctx context.Context, id string, commit_tx_hash string, status RollupStatus) error
|
||||
UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, id string, finalize_tx_hash string, status RollupStatus) error
|
||||
UpdateCommitTxHashAndRollupStatus(ctx context.Context, id string, commitTxHash string, status RollupStatus) error
|
||||
UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, id string, finalizeTxHash string, status RollupStatus) error
|
||||
GetAssignedBatchIDs() ([]string, error)
|
||||
UpdateSkippedBatches() (int64, error)
|
||||
|
||||
GetCommitTxHash(id string) (sql.NullString, error) // for unit tests only
|
||||
GetFinalizeTxHash(id string) (sql.NullString, error) // for unit tests only
|
||||
}
|
||||
|
||||
// L1MessageOrm is layer1 message db interface
|
||||
type L1MessageOrm interface {
|
||||
GetL1MessageByNonce(nonce uint64) (*L1Message, error)
|
||||
GetL1MessageByMsgHash(msgHash string) (*L1Message, error)
|
||||
GetL1MessagesByStatus(status MsgStatus) ([]*L1Message, error)
|
||||
GetL1MessagesByStatus(status MsgStatus, limit uint64) ([]*L1Message, error)
|
||||
GetL1ProcessedNonce() (int64, error)
|
||||
SaveL1Messages(ctx context.Context, messages []*L1Message) error
|
||||
UpdateLayer2Hash(ctx context.Context, msgHash string, layer2Hash string) error
|
||||
UpdateLayer1Status(ctx context.Context, msgHash string, status MsgStatus) error
|
||||
UpdateLayer1StatusAndLayer2Hash(ctx context.Context, msgHash string, status MsgStatus, layer2Hash string) error
|
||||
GetLayer1LatestWatchedHeight() (int64, error)
|
||||
|
||||
GetRelayL1MessageTxHash(nonce uint64) (sql.NullString, error) // for unit tests only
|
||||
}
|
||||
|
||||
// L2MessageOrm is layer2 message db interface
|
||||
@@ -174,8 +181,7 @@ type L2MessageOrm interface {
|
||||
GetL2MessageByMsgHash(msgHash string) (*L2Message, error)
|
||||
MessageProofExist(nonce uint64) (bool, error)
|
||||
GetMessageProofByNonce(nonce uint64) (string, error)
|
||||
GetL2MessagesByStatus(status MsgStatus) ([]*L2Message, error)
|
||||
GetL2MessagesByStatusUpToHeight(status MsgStatus, height uint64) ([]*L2Message, error)
|
||||
GetL2Messages(fields map[string]interface{}, args ...string) ([]*L2Message, error)
|
||||
GetL2ProcessedNonce() (int64, error)
|
||||
SaveL2Messages(ctx context.Context, messages []*L2Message) error
|
||||
UpdateLayer1Hash(ctx context.Context, msgHash string, layer1Hash string) error
|
||||
@@ -183,4 +189,6 @@ type L2MessageOrm interface {
|
||||
UpdateLayer2StatusAndLayer1Hash(ctx context.Context, msgHash string, status MsgStatus, layer1Hash string) error
|
||||
UpdateMessageProof(ctx context.Context, nonce uint64, proof string) error
|
||||
GetLayer2LatestWatchedHeight() (int64, error)
|
||||
|
||||
GetRelayL2MessageTxHash(nonce uint64) (sql.NullString, error) // for unit tests only
|
||||
}
|
||||
|
||||
@@ -45,8 +45,8 @@ func (m *l1MessageOrm) GetL1MessageByNonce(nonce uint64) (*L1Message, error) {
|
||||
}
|
||||
|
||||
// GetL1MessagesByStatus fetch list of unprocessed messages given msg status
|
||||
func (m *l1MessageOrm) GetL1MessagesByStatus(status MsgStatus) ([]*L1Message, error) {
|
||||
rows, err := m.db.Queryx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash, status FROM l1_message WHERE status = $1 ORDER BY nonce ASC;`, status)
|
||||
func (m *l1MessageOrm) GetL1MessagesByStatus(status MsgStatus, limit uint64) ([]*L1Message, error) {
|
||||
rows, err := m.db.Queryx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash, status FROM l1_message WHERE status = $1 ORDER BY nonce ASC LIMIT $2;`, status, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -167,3 +167,12 @@ func (m *l1MessageOrm) GetLayer1LatestWatchedHeight() (int64, error) {
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func (m *l1MessageOrm) GetRelayL1MessageTxHash(nonce uint64) (sql.NullString, error) {
|
||||
row := m.db.QueryRow(`SELECT layer2_hash FROM l1_message WHERE nonce = $1`, nonce)
|
||||
var hash sql.NullString
|
||||
if err := row.Scan(&hash); err != nil {
|
||||
return sql.NullString{}, err
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
@@ -88,32 +89,15 @@ func (m *layer2MessageOrm) GetL2ProcessedNonce() (int64, error) {
|
||||
}
|
||||
|
||||
// GetL2MessagesByStatus fetch list of messages given msg status
|
||||
func (m *layer2MessageOrm) GetL2MessagesByStatus(status MsgStatus) ([]*L2Message, error) {
|
||||
rows, err := m.db.Queryx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash FROM l2_message WHERE status = $1 ORDER BY nonce ASC;`, status)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (m *layer2MessageOrm) GetL2Messages(fields map[string]interface{}, args ...string) ([]*L2Message, error) {
|
||||
query := "SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash FROM l2_message WHERE 1 = 1 "
|
||||
for key := range fields {
|
||||
query += fmt.Sprintf("AND %s=:%s ", key, key)
|
||||
}
|
||||
query = strings.Join(append([]string{query}, args...), " ")
|
||||
|
||||
var msgs []*L2Message
|
||||
for rows.Next() {
|
||||
msg := &L2Message{}
|
||||
if err = rows.StructScan(&msg); err != nil {
|
||||
break
|
||||
}
|
||||
msgs = append(msgs, msg)
|
||||
}
|
||||
if len(msgs) == 0 || errors.Is(err, sql.ErrNoRows) {
|
||||
// log.Warn("no unprocessed layer2 messages in db", "err", err)
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return msgs, rows.Close()
|
||||
}
|
||||
|
||||
// GetL2MessagesByStatusUpToHeight fetch list of messages given msg status and an upper limit on height
|
||||
func (m *layer2MessageOrm) GetL2MessagesByStatusUpToHeight(status MsgStatus, height uint64) ([]*L2Message, error) {
|
||||
rows, err := m.db.Queryx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash FROM l2_message WHERE status = $1 AND height <= $2 ORDER BY nonce ASC;`, status, height)
|
||||
db := m.db
|
||||
rows, err := db.NamedQuery(db.Rebind(query), fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -222,3 +206,12 @@ func (m *layer2MessageOrm) GetLayer2LatestWatchedHeight() (int64, error) {
|
||||
}
|
||||
return height, nil
|
||||
}
|
||||
|
||||
func (m *layer2MessageOrm) GetRelayL2MessageTxHash(nonce uint64) (sql.NullString, error) {
|
||||
row := m.db.QueryRow(`SELECT layer1_hash FROM l2_message WHERE nonce = $1`, nonce)
|
||||
var hash sql.NullString
|
||||
if err := row.Scan(&hash); err != nil {
|
||||
return sql.NullString{}, err
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
@@ -281,7 +281,7 @@ func testOrmBlockBatch(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int(2), len(batches))
|
||||
|
||||
batcheIDs, err := ormBatch.GetPendingBatches()
|
||||
batcheIDs, err := ormBatch.GetPendingBatches(10)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int(2), len(batcheIDs))
|
||||
assert.Equal(t, batchID1, batcheIDs[0])
|
||||
@@ -290,33 +290,51 @@ func testOrmBlockBatch(t *testing.T) {
|
||||
err = ormBatch.UpdateCommitTxHashAndRollupStatus(context.Background(), batchID1, "commit_tx_1", orm.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batcheIDs, err = ormBatch.GetPendingBatches()
|
||||
batcheIDs, err = ormBatch.GetPendingBatches(10)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int(1), len(batcheIDs))
|
||||
assert.Equal(t, batchID2, batcheIDs[0])
|
||||
|
||||
proving_status, err := ormBatch.GetProvingStatusByID(batchID1)
|
||||
provingStatus, err := ormBatch.GetProvingStatusByID(batchID1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.ProvingTaskUnassigned, proving_status)
|
||||
assert.Equal(t, orm.ProvingTaskUnassigned, provingStatus)
|
||||
err = ormBatch.UpdateProofByID(context.Background(), batchID1, []byte{1}, []byte{2}, 1200)
|
||||
assert.NoError(t, err)
|
||||
err = ormBatch.UpdateProvingStatus(batchID1, orm.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
proving_status, err = ormBatch.GetProvingStatusByID(batchID1)
|
||||
provingStatus, err = ormBatch.GetProvingStatusByID(batchID1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.ProvingTaskVerified, proving_status)
|
||||
assert.Equal(t, orm.ProvingTaskVerified, provingStatus)
|
||||
|
||||
rollup_status, err := ormBatch.GetRollupStatus(batchID1)
|
||||
rollupStatus, err := ormBatch.GetRollupStatus(batchID1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupCommitted, rollup_status)
|
||||
assert.Equal(t, orm.RollupCommitted, rollupStatus)
|
||||
err = ormBatch.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchID1, "finalize_tx_1", orm.RollupFinalized)
|
||||
assert.NoError(t, err)
|
||||
rollup_status, err = ormBatch.GetRollupStatus(batchID1)
|
||||
rollupStatus, err = ormBatch.GetRollupStatus(batchID1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, orm.RollupFinalized, rollup_status)
|
||||
assert.Equal(t, orm.RollupFinalized, rollupStatus)
|
||||
result, err := ormBatch.GetLatestFinalizedBatch()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, batchID1, result.ID)
|
||||
|
||||
status1, err := ormBatch.GetRollupStatus(batchID1)
|
||||
assert.NoError(t, err)
|
||||
status2, err := ormBatch.GetRollupStatus(batchID2)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, status1, status2)
|
||||
statues, err := ormBatch.GetRollupStatusByIDList([]string{batchID1, batchID2, batchID1, batchID2})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, statues[0], status1)
|
||||
assert.Equal(t, statues[1], status2)
|
||||
assert.Equal(t, statues[2], status1)
|
||||
assert.Equal(t, statues[3], status2)
|
||||
statues, err = ormBatch.GetRollupStatusByIDList([]string{batchID2, batchID1, batchID2, batchID1})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, statues[0], status2)
|
||||
assert.Equal(t, statues[1], status1)
|
||||
assert.Equal(t, statues[2], status2)
|
||||
assert.Equal(t, statues[3], status1)
|
||||
}
|
||||
|
||||
// testOrmSessionInfo test rollup result table functions
|
||||
@@ -342,9 +360,9 @@ func testOrmSessionInfo(t *testing.T) {
|
||||
ids, err := ormBatch.GetAssignedBatchIDs()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(ids))
|
||||
session_infos, err := ormSession.GetSessionInfosByIDs(ids)
|
||||
sessionInfos, err := ormSession.GetSessionInfosByIDs(ids)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(session_infos))
|
||||
assert.Equal(t, 0, len(sessionInfos))
|
||||
|
||||
sessionInfo := orm.SessionInfo{
|
||||
ID: batchID,
|
||||
@@ -359,25 +377,25 @@ func testOrmSessionInfo(t *testing.T) {
|
||||
|
||||
// insert
|
||||
assert.NoError(t, ormSession.SetSessionInfo(&sessionInfo))
|
||||
session_infos, err = ormSession.GetSessionInfosByIDs(ids)
|
||||
sessionInfos, err = ormSession.GetSessionInfosByIDs(ids)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(session_infos))
|
||||
assert.Equal(t, sessionInfo, *session_infos[0])
|
||||
assert.Equal(t, 1, len(sessionInfos))
|
||||
assert.Equal(t, sessionInfo, *sessionInfos[0])
|
||||
|
||||
// update
|
||||
sessionInfo.Rollers["0"].Status = orm.RollerProofValid
|
||||
assert.NoError(t, ormSession.SetSessionInfo(&sessionInfo))
|
||||
session_infos, err = ormSession.GetSessionInfosByIDs(ids)
|
||||
sessionInfos, err = ormSession.GetSessionInfosByIDs(ids)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(session_infos))
|
||||
assert.Equal(t, sessionInfo, *session_infos[0])
|
||||
assert.Equal(t, 1, len(sessionInfos))
|
||||
assert.Equal(t, sessionInfo, *sessionInfos[0])
|
||||
|
||||
// delete
|
||||
assert.NoError(t, ormBatch.UpdateProvingStatus(batchID, orm.ProvingTaskVerified))
|
||||
ids, err = ormBatch.GetAssignedBatchIDs()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(ids))
|
||||
session_infos, err = ormSession.GetSessionInfosByIDs(ids)
|
||||
sessionInfos, err = ormSession.GetSessionInfosByIDs(ids)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(session_infos))
|
||||
assert.Equal(t, 0, len(sessionInfos))
|
||||
}
|
||||
|
||||
@@ -223,6 +223,7 @@ github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNG
|
||||
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221202061207-804e7edc23ba/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY=
|
||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
||||
@@ -255,6 +256,7 @@ go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg=
|
||||
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=
|
||||
@@ -268,8 +270,11 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
@@ -290,5 +295,6 @@ gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUk
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o=
|
||||
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
|
||||
rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=
|
||||
|
||||
@@ -11,7 +11,8 @@ endif
|
||||
|
||||
libzkp:
|
||||
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.a ../interface/
|
||||
cp -r ../common/libzkp/interface ./prover/lib
|
||||
rm -rf ./prover/lib && cp -r ../common/libzkp/interface ./prover/lib
|
||||
find ../common | grep libzktrie.so | xargs -i cp {} ./prover/lib/
|
||||
|
||||
roller: libzkp ## Build the Roller instance.
|
||||
GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/roller ./cmd
|
||||
|
||||
@@ -6,7 +6,14 @@ make clean && make roller
|
||||
```
|
||||
|
||||
## Start
|
||||
- use config.toml
|
||||
- Set environment variables
|
||||
```shell
|
||||
export CHAIN_ID=534353 # change to correct chain_id
|
||||
export RUST_MIN_STACK=100000000
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./prover/lib:/usr/local/cuda/ # cuda only for GPU machine
|
||||
```
|
||||
|
||||
- Use config.toml
|
||||
```shell
|
||||
./build/bin/roller
|
||||
```
|
||||
@@ -1,200 +0,0 @@
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": {
|
||||
"blockTrace": {
|
||||
"number": "0x1",
|
||||
"hash": "0x5366b507fd5ec49c1090655d6858835823b179b1b2773a45654687e0db4ec627",
|
||||
"gasLimit": 939082033,
|
||||
"difficulty": "0x2",
|
||||
"baseFee": "0x342770c0",
|
||||
"coinbase": {
|
||||
"address": "0x4cb1ab63af5d8931ce09673ebd8ae2ce16fd6571",
|
||||
"nonce": 0,
|
||||
"balance": "0x3635c9adc5dea00000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
"time": 1663875646,
|
||||
"transactions": []
|
||||
},
|
||||
"storageTrace": {
|
||||
"rootBefore": "0x1358bd270133c112737e834e13d3fe6381d0cf9aea7afecb46d19188d078c451",
|
||||
"rootAfter": "0x1358bd270133c112737e834e13d3fe6381d0cf9aea7afecb46d19188d078c451",
|
||||
"proofs": {
|
||||
"0x4cb1aB63aF5D8931Ce09673EbD8ae2ce16fD6571": [
|
||||
"0x001a4f0d7d9eb169b9a45c37b1a2995ef5d15849e7a582cb935ad18ed10363bfd91bdb4da71c0bc7067be54de6667dc1a8e2e4032141815a5fee2ea58014657014",
|
||||
"0x0129bdbea092f4f7e6de593fd1a16ddb50b1c2a6297d4ae141a60f8da631e4817504040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003635c9adc5dea00000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000204cb1ab63af5d8931ce09673ebd8ae2ce16fd6571000000000000000000000000",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
|
||||
]
|
||||
}
|
||||
},
|
||||
"executionResults": [],
|
||||
"mptwitness": [
|
||||
{
|
||||
"address": "0x4cb1ab63af5d8931ce09673ebd8ae2ce16fd6571",
|
||||
"accountKey": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29",
|
||||
"accountPath": [
|
||||
{
|
||||
"pathPart": "0x1",
|
||||
"root": "0x51c478d08891d146cbfe7aea9acfd08163fed3134e837e7312c1330127bd5813",
|
||||
"path": [
|
||||
{
|
||||
"value": "0x1470651480a52eee5f5a81412103e4e2a8c17d66e64de57b06c70b1ca74ddb1b",
|
||||
"sibling": "0xd9bf6303d18ed15a93cb82a5e74958d1f55e99a2b1375ca4b969b19e7d0d4f1a"
|
||||
}
|
||||
],
|
||||
"leaf": {
|
||||
"value": "0xad23a3af3faa69c7bb5215f7a927404429cdeea43d07430790241bebdce9270b",
|
||||
"sibling": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pathPart": "0x1",
|
||||
"root": "0x51c478d08891d146cbfe7aea9acfd08163fed3134e837e7312c1330127bd5813",
|
||||
"path": [
|
||||
{
|
||||
"value": "0x1470651480a52eee5f5a81412103e4e2a8c17d66e64de57b06c70b1ca74ddb1b",
|
||||
"sibling": "0xd9bf6303d18ed15a93cb82a5e74958d1f55e99a2b1375ca4b969b19e7d0d4f1a"
|
||||
}
|
||||
],
|
||||
"leaf": {
|
||||
"value": "0xad23a3af3faa69c7bb5215f7a927404429cdeea43d07430790241bebdce9270b",
|
||||
"sibling": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29"
|
||||
}
|
||||
}
|
||||
],
|
||||
"accountUpdate": [
|
||||
{
|
||||
"nonce": 0,
|
||||
"balance": "0x3635c9adc5dea00000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
{
|
||||
"nonce": 0,
|
||||
"balance": "0x3635c9adc5dea00000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
}
|
||||
],
|
||||
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"statePath": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"stateUpdate": [
|
||||
null,
|
||||
null
|
||||
]
|
||||
},
|
||||
{
|
||||
"address": "0x4cb1ab63af5d8931ce09673ebd8ae2ce16fd6571",
|
||||
"accountKey": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29",
|
||||
"accountPath": [
|
||||
{
|
||||
"pathPart": "0x1",
|
||||
"root": "0x51c478d08891d146cbfe7aea9acfd08163fed3134e837e7312c1330127bd5813",
|
||||
"path": [
|
||||
{
|
||||
"value": "0x1470651480a52eee5f5a81412103e4e2a8c17d66e64de57b06c70b1ca74ddb1b",
|
||||
"sibling": "0xd9bf6303d18ed15a93cb82a5e74958d1f55e99a2b1375ca4b969b19e7d0d4f1a"
|
||||
}
|
||||
],
|
||||
"leaf": {
|
||||
"value": "0xad23a3af3faa69c7bb5215f7a927404429cdeea43d07430790241bebdce9270b",
|
||||
"sibling": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pathPart": "0x1",
|
||||
"root": "0x51c478d08891d146cbfe7aea9acfd08163fed3134e837e7312c1330127bd5813",
|
||||
"path": [
|
||||
{
|
||||
"value": "0x1470651480a52eee5f5a81412103e4e2a8c17d66e64de57b06c70b1ca74ddb1b",
|
||||
"sibling": "0xd9bf6303d18ed15a93cb82a5e74958d1f55e99a2b1375ca4b969b19e7d0d4f1a"
|
||||
}
|
||||
],
|
||||
"leaf": {
|
||||
"value": "0xad23a3af3faa69c7bb5215f7a927404429cdeea43d07430790241bebdce9270b",
|
||||
"sibling": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29"
|
||||
}
|
||||
}
|
||||
],
|
||||
"accountUpdate": [
|
||||
{
|
||||
"nonce": 0,
|
||||
"balance": "0x3635c9adc5dea00000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
{
|
||||
"nonce": 0,
|
||||
"balance": "0x3635c9adc5dea00000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
}
|
||||
],
|
||||
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"statePath": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"stateUpdate": [
|
||||
null,
|
||||
null
|
||||
]
|
||||
},
|
||||
{
|
||||
"address": "0x4cb1ab63af5d8931ce09673ebd8ae2ce16fd6571",
|
||||
"accountKey": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29",
|
||||
"accountPath": [
|
||||
{
|
||||
"pathPart": "0x1",
|
||||
"root": "0x51c478d08891d146cbfe7aea9acfd08163fed3134e837e7312c1330127bd5813",
|
||||
"path": [
|
||||
{
|
||||
"value": "0x1470651480a52eee5f5a81412103e4e2a8c17d66e64de57b06c70b1ca74ddb1b",
|
||||
"sibling": "0xd9bf6303d18ed15a93cb82a5e74958d1f55e99a2b1375ca4b969b19e7d0d4f1a"
|
||||
}
|
||||
],
|
||||
"leaf": {
|
||||
"value": "0xad23a3af3faa69c7bb5215f7a927404429cdeea43d07430790241bebdce9270b",
|
||||
"sibling": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pathPart": "0x1",
|
||||
"root": "0x51c478d08891d146cbfe7aea9acfd08163fed3134e837e7312c1330127bd5813",
|
||||
"path": [
|
||||
{
|
||||
"value": "0x1470651480a52eee5f5a81412103e4e2a8c17d66e64de57b06c70b1ca74ddb1b",
|
||||
"sibling": "0xd9bf6303d18ed15a93cb82a5e74958d1f55e99a2b1375ca4b969b19e7d0d4f1a"
|
||||
}
|
||||
],
|
||||
"leaf": {
|
||||
"value": "0xad23a3af3faa69c7bb5215f7a927404429cdeea43d07430790241bebdce9270b",
|
||||
"sibling": "0x7581e431a68d0fa641e14a7d29a6c2b150db6da1d13f59dee6f7f492a0bebd29"
|
||||
}
|
||||
}
|
||||
],
|
||||
"accountUpdate": [
|
||||
{
|
||||
"nonce": 0,
|
||||
"balance": "0x3635c9adc5dea00000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
{
|
||||
"nonce": 0,
|
||||
"balance": "0x3635c9adc5dea00000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
}
|
||||
],
|
||||
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"statePath": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"stateUpdate": [
|
||||
null,
|
||||
null
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
42482
roller/assets/traces/196.json
Normal file
42482
roller/assets/traces/196.json
Normal file
File diff suppressed because one or more lines are too long
@@ -3,7 +3,7 @@ module scroll-tech/roller
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli/v2 v2.10.2
|
||||
go.etcd.io/bbolt v1.3.6
|
||||
@@ -21,11 +21,11 @@ require (
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.3.1 // indirect
|
||||
github.com/scroll-tech/zktrie v0.4.3 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.4.0 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -323,11 +323,10 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
|
||||
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d h1:S4bEgTezJrqYmDfUSkp9Of0/lcglm4CTAWQHSnsn2HE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230210093343-bb26fa3e391d/go.mod h1:OH4ZTAz6RM1IL0xcQ1zM6+Iy9s2vtcYqqwcEQdfHV7g=
|
||||
github.com/scroll-tech/zktrie v0.4.3 h1:RyhusIu8F8u5ITmzqZjkAwlL6jdC9TK9i6tfuJoZcpk=
|
||||
github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
@@ -398,8 +397,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -512,8 +511,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
package prover
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: ${SRCDIR}/lib/libzkp.a -lm -ldl
|
||||
#cgo gpu LDFLAGS: ${SRCDIR}/lib/libzkp.a -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart
|
||||
#cgo LDFLAGS: ${SRCDIR}/lib/libzkp.a -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: ${SRCDIR}/lib/libzkp.a -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -L${SRCDIR}/lib/ -lcudart -Wl,-rpath=${SRCDIR}/lib
|
||||
#include <stdlib.h>
|
||||
#include "./lib/libzkp.h"
|
||||
*/
|
||||
|
||||
@@ -17,17 +17,12 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
paramsPath = "../assets/test_params"
|
||||
seedPath = "../assets/test_seed"
|
||||
tracesPath = "../assets/traces"
|
||||
paramsPath = "../assets/test_params"
|
||||
seedPath = "../assets/test_seed"
|
||||
tracesPath = "../assets/traces"
|
||||
proofDumpPath = "agg_proof"
|
||||
)
|
||||
|
||||
type RPCTrace struct {
|
||||
Jsonrpc string `json:"jsonrpc"`
|
||||
ID int64 `json:"id"`
|
||||
Result *types.BlockTrace `json:"result"`
|
||||
}
|
||||
|
||||
func TestFFI(t *testing.T) {
|
||||
as := assert.New(t)
|
||||
cfg := &config.ProverConfig{
|
||||
@@ -50,11 +45,20 @@ func TestFFI(t *testing.T) {
|
||||
as.NoError(err)
|
||||
byt, err = io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
rpcTrace := &RPCTrace{}
|
||||
as.NoError(json.Unmarshal(byt, rpcTrace))
|
||||
traces = append(traces, rpcTrace.Result)
|
||||
trace := &types.BlockTrace{}
|
||||
as.NoError(json.Unmarshal(byt, trace))
|
||||
traces = append(traces, trace)
|
||||
}
|
||||
_, err = prover.Prove(traces)
|
||||
proof, err := prover.Prove(traces)
|
||||
as.NoError(err)
|
||||
t.Log("prove success")
|
||||
|
||||
// dump the proof
|
||||
os.RemoveAll(proofDumpPath)
|
||||
proofByt, err := json.Marshal(proof)
|
||||
as.NoError(err)
|
||||
proofFile, err := os.Create(proofDumpPath)
|
||||
as.NoError(err)
|
||||
_, err = proofFile.Write(proofByt)
|
||||
as.NoError(err)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,6 @@ DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
||||
PROJ_DIR=$DIR"/.."
|
||||
|
||||
mkdir -p $PROJ_DIR/assets/params
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/release-0920/test_seed -O $PROJ_DIR/assets/seed
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/release-0920/test_params/params18 -O $PROJ_DIR/assets/params/params18
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/release-0920/test_params/params25 -O $PROJ_DIR/assets/params/params25
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/release-1220/test_seed -O $PROJ_DIR/assets/seed
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/release-1220/test_params/params19 -O $PROJ_DIR/assets/params/params19
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/circuit-release/release-1220/test_params/params26 -O $PROJ_DIR/assets/params/params26
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
@@ -119,10 +120,10 @@ func runSender(t *testing.T, endpoint string) *sender.Sender {
|
||||
Endpoint: endpoint,
|
||||
CheckPendingTime: 3,
|
||||
EscalateBlocks: 100,
|
||||
Confirmations: 0,
|
||||
Confirmations: rpc.LatestBlockNumber,
|
||||
EscalateMultipleNum: 11,
|
||||
EscalateMultipleDen: 10,
|
||||
TxType: "DynamicFeeTx",
|
||||
TxType: "LegacyTx",
|
||||
}, []*ecdsa.PrivateKey{priv})
|
||||
assert.NoError(t, err)
|
||||
return newSender
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user