Compare commits

..

2 Commits

Author SHA1 Message Date
vincent
7c3e409df0 add tag jenkinsfile (+11 squashed commits) 2023-01-10 14:33:00 +08:00
vincent
eb28409382 add_tag_job 2023-01-10 11:40:30 +08:00
61 changed files with 667 additions and 1079 deletions

View File

@@ -1,7 +0,0 @@
1. Purpose or design rationale of this PR
2. Does this PR involve a new deployment, and involve a new git tag & docker image tag? If so, has `tag` in `common/version.go` been updated?
3. Is this PR a breaking change? If so, have it been attached a `breaking-change` label?

38
Jenkinsfile vendored
View File

@@ -17,6 +17,18 @@ pipeline {
}
stages {
stage('Build') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
changeset "tests/**"
}
}
parallel {
stage('Build Prerequisite') {
steps {
@@ -58,6 +70,18 @@ pipeline {
}
}
stage('Parallel Test') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
changeset "tests/**"
}
}
parallel{
stage('Test bridge package') {
steps {
@@ -102,12 +126,24 @@ pipeline {
}
}
stage('Compare Coverage') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
changeset "tests/**"
}
}
steps {
sh "./build/post-test-report-coverage.sh"
script {
currentBuild.result = 'SUCCESS'
}
step([$class: 'CompareCoverageAction', publishResultAs: 'Comment', scmVars: [GIT_URL: env.GIT_URL]])
step([$class: 'CompareCoverageAction', publishResultAs: 'statusCheck', scmVars: [GIT_URL: env.GIT_URL]])
}
}
}

View File

@@ -9,20 +9,20 @@ import (
)
const (
// SentMessageEventSignature = keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
SentMessageEventSignature = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
// SENT_MESSAGE_EVENT_SIGNATURE = keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
SENT_MESSAGE_EVENT_SIGNATURE = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
// RelayedMessageEventSignature = keccak256("RelayedMessage(bytes32)")
RelayedMessageEventSignature = "4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"
// RELAYED_MESSAGE_EVENT_SIGNATURE = keccak256("RelayedMessage(bytes32)")
RELAYED_MESSAGE_EVENT_SIGNATURE = "4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"
// FailedRelayedMessageEventSignature = keccak256("FailedRelayedMessage(bytes32)")
FailedRelayedMessageEventSignature = "99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"
// FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE = keccak256("FailedRelayedMessage(bytes32)")
FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE = "99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"
// CommitBatchEventSignature = keccak256("CommitBatch(bytes32,bytes32,uint256,bytes32)")
CommitBatchEventSignature = "a26d4bd91c4c2eff3b1bf542129607d782506fc1950acfab1472a20d28c06596"
// COMMIT_BATCH_EVENT_SIGNATURE = keccak256("CommitBatch(bytes32,bytes32,uint256,bytes32)")
COMMIT_BATCH_EVENT_SIGNATURE = "a26d4bd91c4c2eff3b1bf542129607d782506fc1950acfab1472a20d28c06596"
// FinalizedBatchEventSignature = keccak256("FinalizeBatch(bytes32,bytes32,uint256,bytes32)")
FinalizedBatchEventSignature = "e20f311a96205960de4d2bb351f7729e5136fa36ae64d7f736c67ddc4ca4cd4b"
// FINALIZED_BATCH_EVENT_SIGNATURE = keccak256("FinalizeBatch(bytes32,bytes32,uint256,bytes32)")
FINALIZED_BATCH_EVENT_SIGNATURE = "e20f311a96205960de4d2bb351f7729e5136fa36ae64d7f736c67ddc4ca4cd4b"
)
var (

View File

@@ -15,5 +15,5 @@ func TestRunBridge(t *testing.T) {
// wait result
bridge.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("bridge version %s", version.Version))
bridge.RunApp(nil)
bridge.RunApp(false)
}

View File

@@ -5,11 +5,10 @@ go 1.18
require (
github.com/iden3/go-iden3-crypto v0.0.13
github.com/orcaman/concurrent-map v1.0.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.10.2
golang.org/x/sync v0.1.0
modernc.org/mathutil v1.4.1
)
require (
@@ -28,7 +27,6 @@ require (
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
@@ -38,8 +36,8 @@ require (
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/sys v0.4.0 // indirect
golang.org/x/crypto v0.4.0 // indirect
golang.org/x/sys v0.3.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -336,8 +336,6 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
@@ -350,8 +348,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
@@ -425,8 +423,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -541,8 +539,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -554,7 +552,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -664,7 +662,5 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=

View File

@@ -138,11 +138,11 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 5)
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
query.Topics[0][3] = common.HexToHash(bridge_abi.CommitBatchEventSignature)
query.Topics[0][4] = common.HexToHash(bridge_abi.FinalizedBatchEventSignature)
query.Topics[0][0] = common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][1] = common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][2] = common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][3] = common.HexToHash(bridge_abi.COMMIT_BATCH_EVENT_SIGNATURE)
query.Topics[0][4] = common.HexToHash(bridge_abi.FINALIZED_BATCH_EVENT_SIGNATURE)
logs, err := w.client.FilterLogs(w.ctx, query)
if err != nil {
@@ -199,10 +199,10 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
for _, msg := range relayedMessageEvents {
if msg.isSuccessful {
// succeed
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
err = w.db.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
} else {
// failed
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
err = w.db.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
}
if err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
@@ -229,7 +229,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
var rollupEvents []rollupEvent
for _, vLog := range logs {
switch vLog.Topics[0] {
case common.HexToHash(bridge_abi.SentMessageEventSignature):
case common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE):
event := struct {
Target common.Address
Sender common.Address
@@ -250,7 +250,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
event.Target = common.HexToAddress(vLog.Topics[1].String())
l1Messages = append(l1Messages, &orm.L1Message{
Nonce: event.MessageNonce.Uint64(),
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
MsgHash: utils.ComputeMessageHash(event.Target, event.Sender, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
@@ -261,7 +261,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
Calldata: common.Bytes2Hex(event.Message),
Layer1Hash: vLog.TxHash.Hex(),
})
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
case common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE):
event := struct {
MsgHash common.Hash
}{}
@@ -272,7 +272,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
txHash: vLog.TxHash,
isSuccessful: true,
})
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
case common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE):
event := struct {
MsgHash common.Hash
}{}
@@ -283,7 +283,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
txHash: vLog.TxHash,
isSuccessful: false,
})
case common.HexToHash(bridge_abi.CommitBatchEventSignature):
case common.HexToHash(bridge_abi.COMMIT_BATCH_EVENT_SIGNATURE):
event := struct {
BatchID common.Hash
BatchHash common.Hash
@@ -303,7 +303,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
txHash: vLog.TxHash,
status: orm.RollupCommitted,
})
case common.HexToHash(bridge_abi.FinalizedBatchEventSignature):
case common.HexToHash(bridge_abi.FINALIZED_BATCH_EVENT_SIGNATURE):
event := struct {
BatchID common.Hash
BatchHash common.Hash

View File

@@ -40,8 +40,7 @@ func newBatchProposer(cfg *config.BatchProposerConfig, orm database.OrmFactory)
}
}
func (w *batchProposer) tryProposeBatch(wg *sync.WaitGroup) {
defer wg.Done()
func (w *batchProposer) tryProposeBatch() error {
w.mutex.Lock()
defer w.mutex.Unlock()
@@ -50,27 +49,20 @@ func (w *batchProposer) tryProposeBatch(wg *sync.WaitGroup) {
fmt.Sprintf("order by number ASC LIMIT %d", w.batchBlocksLimit),
)
if err != nil {
log.Error("failed to get unbatched blocks", "err", err)
return
return err
}
if len(blocks) == 0 {
return
return nil
}
if blocks[0].GasUsed > w.batchGasThreshold {
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed)
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
return w.createBatchForBlocks(blocks[:1])
}
if blocks[0].TxNum > w.batchTxNumThreshold {
log.Warn("too many txs even for only 1 block", "height", blocks[0].Number, "tx_num", blocks[0].TxNum)
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
return w.createBatchForBlocks(blocks[:1])
}
var (
@@ -91,12 +83,10 @@ func (w *batchProposer) tryProposeBatch(wg *sync.WaitGroup) {
// if it's not old enough we will skip proposing the batch,
// otherwise we will still propose a batch
if length == len(blocks) && blocks[0].BlockTimestamp+w.batchTimeSec > uint64(time.Now().Unix()) {
return
return nil
}
if err = w.createBatchForBlocks(blocks); err != nil {
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
}
return w.createBatchForBlocks(blocks)
}
func (w *batchProposer) createBatchForBlocks(blocks []*orm.BlockInfo) error {

View File

@@ -5,7 +5,6 @@ import (
"fmt"
"math/big"
"os"
"sync"
"testing"
"github.com/scroll-tech/go-ethereum/core/types"
@@ -50,10 +49,7 @@ func testBatchProposer(t *testing.T) {
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, db)
var wg sync.WaitGroup
wg.Add(1)
proposer.tryProposeBatch(&wg)
wg.Wait()
assert.NoError(t, proposer.tryProposeBatch())
infos, err := db.GetUnbatchedBlocks(map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", 100))

View File

@@ -77,7 +77,6 @@ func TestFunction(t *testing.T) {
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
t.Run("testL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
t.Run("testL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("testL2RelayerSkipBatches", testL2RelayerSkipBatches)
t.Run("testBatchProposer", testBatchProposer)

View File

@@ -3,9 +3,7 @@ package l2
import (
"context"
"errors"
"fmt"
"math/big"
"runtime"
"sync"
"time"
@@ -15,7 +13,6 @@ import (
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"golang.org/x/sync/errgroup"
"modernc.org/mathutil"
"scroll-tech/database"
"scroll-tech/database/orm"
@@ -46,17 +43,14 @@ type Layer2Relayer struct {
rollupCh <-chan *sender.Confirmation
l1RollupABI *abi.ABI
// A list of processing message.
// key(string): confirmation ID, value(string): layer2 hash.
processingMessage sync.Map
// a list of processing message, indexed by layer2 hash
processingMessage map[string]string
// A list of processing batch commitment.
// key(string): confirmation ID, value(string): batch id.
processingCommitment sync.Map
// a list of processing batch commitment, indexed by batch id
processingCommitment map[string]string
// A list of processing batch finalization.
// key(string): confirmation ID, value(string): batch id.
processingFinalization sync.Map
// a list of processing batch finalization, indexed by batch id
processingFinalization map[string]string
stopCh chan struct{}
}
@@ -86,18 +80,15 @@ func NewLayer2Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
rollupCh: rollupSender.ConfirmChan(),
l1RollupABI: bridge_abi.RollupMetaABI,
cfg: cfg,
processingMessage: sync.Map{},
processingCommitment: sync.Map{},
processingFinalization: sync.Map{},
processingMessage: map[string]string{},
processingCommitment: map[string]string{},
processingFinalization: map[string]string{},
stopCh: make(chan struct{}),
}, nil
}
const processMsgLimit = 100
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer2Relayer) ProcessSavedEvents(wg *sync.WaitGroup) {
defer wg.Done()
func (r *Layer2Relayer) ProcessSavedEvents() {
batch, err := r.db.GetLatestFinalizedBatch()
if err != nil {
log.Error("GetLatestFinalizedBatch failed", "err", err)
@@ -105,10 +96,7 @@ func (r *Layer2Relayer) ProcessSavedEvents(wg *sync.WaitGroup) {
}
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL2Messages(
map[string]interface{}{"status": orm.MsgPending},
fmt.Sprintf("AND height<=%d ORDER BY nonce ASC LIMIT %d", batch.EndBlockNumber, processMsgLimit),
)
msgs, err := r.db.GetL2MessagesByStatusUpToHeight(orm.MsgPending, batch.EndBlockNumber)
if err != nil {
log.Error("Failed to fetch unprocessed L2 messages", "err", err)
@@ -116,18 +104,25 @@ func (r *Layer2Relayer) ProcessSavedEvents(wg *sync.WaitGroup) {
}
// process messages in batches
batchSize := mathutil.Min((runtime.GOMAXPROCS(0)+1)/2, r.messageSender.NumberOfAccounts())
for size := 0; len(msgs) > 0; msgs = msgs[size:] {
if size = len(msgs); size > batchSize {
size = batchSize
batch_size := r.messageSender.NumberOfAccounts()
for from := 0; from < len(msgs); from += batch_size {
to := from + batch_size
if to > len(msgs) {
to = len(msgs)
}
var g errgroup.Group
for _, msg := range msgs[:size] {
msg := msg
for i := from; i < to; i++ {
msg := msgs[i]
g.Go(func() error {
return r.processSavedEvent(msg, batch.Index)
return r.processSavedEvent(msg, batch)
})
}
if err := g.Wait(); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("failed to process l2 saved event", "err", err)
@@ -137,13 +132,13 @@ func (r *Layer2Relayer) ProcessSavedEvents(wg *sync.WaitGroup) {
}
}
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) error {
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, batch *orm.BlockBatch) error {
// @todo fetch merkle proof from l2geth
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
BlockHeight: big.NewInt(int64(msg.Height)),
BatchIndex: big.NewInt(0).SetUint64(index),
BatchIndex: big.NewInt(int64(batch.Index)),
MerkleProof: make([]byte, 0),
}
from := common.HexToAddress(msg.Sender)
@@ -181,15 +176,14 @@ func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) erro
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
return err
}
r.processingMessage.Store(msg.MsgHash, msg.MsgHash)
r.processingMessage[msg.MsgHash] = msg.MsgHash
return nil
}
// ProcessPendingBatches submit batch data to layer 1 rollup contract
func (r *Layer2Relayer) ProcessPendingBatches(wg *sync.WaitGroup) {
defer wg.Done()
func (r *Layer2Relayer) ProcessPendingBatches() {
// batches are sorted by batch index in increasing order
batchesInDB, err := r.db.GetPendingBatches(1)
batchesInDB, err := r.db.GetPendingBatches()
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
@@ -269,28 +263,20 @@ func (r *Layer2Relayer) ProcessPendingBatches(wg *sync.WaitGroup) {
}
return
}
log.Info("commitBatch in layer1", "batch_id", id, "index", batch.Index, "hash", hash)
log.Info("commitBatch in layer1", "batchID", id, "index", batch.Index, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupCommitting)
if err != nil {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "id", id, "index", batch.Index, "err", err)
}
r.processingCommitment.Store(txID, id)
r.processingCommitment[txID] = id
}
// ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches(wg *sync.WaitGroup) {
defer wg.Done()
// set skipped batches in a single db operation
if err := r.db.UpdateSkippedBatches(); err != nil {
log.Error("UpdateSkippedBatches failed", "err", err)
// continue anyway
}
func (r *Layer2Relayer) ProcessCommittedBatches() {
// batches are sorted by batch index in increasing order
batches, err := r.db.GetCommittedBatches(1)
batches, err := r.db.GetCommittedBatches()
if err != nil {
log.Error("Failed to fetch committed L2 batches", "err", err)
return
@@ -318,8 +304,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches(wg *sync.WaitGroup) {
return
case orm.ProvingTaskFailed, orm.ProvingTaskSkipped:
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
}
@@ -374,15 +358,15 @@ func (r *Layer2Relayer) ProcessCommittedBatches(wg *sync.WaitGroup) {
}
return
}
log.Info("finalizeBatchWithProof in layer1", "batch_id", id, "hash", hash)
log.Info("finalizeBatchWithProof in layer1", "batchID", id, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupFinalizing)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", id, "err", err)
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batchID", id, "err", err)
}
success = true
r.processingFinalization.Store(txID, id)
r.processingFinalization[txID] = id
default:
log.Error("encounter unreachable case in ProcessCommittedBatches",
@@ -401,12 +385,26 @@ func (r *Layer2Relayer) Start() {
for {
select {
case <-ticker.C:
var wg = sync.WaitGroup{}
var wg sync.WaitGroup
wg.Add(3)
go r.ProcessSavedEvents(&wg)
go r.ProcessPendingBatches(&wg)
go r.ProcessCommittedBatches(&wg)
go func() {
defer wg.Done()
r.ProcessSavedEvents()
}()
go func() {
defer wg.Done()
r.ProcessPendingBatches()
}()
go func() {
defer wg.Done()
r.ProcessCommittedBatches()
}()
wg.Wait()
case confirmation := <-r.messageCh:
r.handleConfirmation(confirmation)
case confirmation := <-r.rollupCh:
@@ -431,36 +429,36 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
transactionType := "Unknown"
// check whether it is message relay transaction
if msgHash, ok := r.processingMessage.Load(confirmation.ID); ok {
if msgHash, ok := r.processingMessage[confirmation.ID]; ok {
transactionType = "MessageRelay"
// @todo handle db error
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), orm.MsgConfirmed, confirmation.TxHash.String())
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash, orm.MsgConfirmed, confirmation.TxHash.String())
if err != nil {
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err)
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash, "err", err)
}
r.processingMessage.Delete(confirmation.ID)
delete(r.processingMessage, confirmation.ID)
}
// check whether it is block commitment transaction
if batchID, ok := r.processingCommitment.Load(confirmation.ID); ok {
if batch_id, ok := r.processingCommitment[confirmation.ID]; ok {
transactionType = "BatchCommitment"
// @todo handle db error
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupCommitted)
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batch_id, confirmation.TxHash.String(), orm.RollupCommitted)
if err != nil {
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_id", batch_id, "err", err)
}
r.processingCommitment.Delete(confirmation.ID)
delete(r.processingCommitment, confirmation.ID)
}
// check whether it is proof finalization transaction
if batchID, ok := r.processingFinalization.Load(confirmation.ID); ok {
if batch_id, ok := r.processingFinalization[confirmation.ID]; ok {
transactionType = "ProofFinalization"
// @todo handle db error
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupFinalized)
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batch_id, confirmation.TxHash.String(), orm.RollupFinalized)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", batch_id, "err", err)
}
r.processingFinalization.Delete(confirmation.ID)
delete(r.processingFinalization, confirmation.ID)
}
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
}

View File

@@ -5,7 +5,6 @@ import (
"encoding/json"
"math/big"
"os"
"sync"
"testing"
"time"
@@ -95,10 +94,7 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupFinalized)
assert.NoError(t, err)
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessSavedEvents(&wg)
wg.Wait()
relayer.ProcessSavedEvents()
msg, err := db.GetL2MessageByNonce(templateL2Message[0].Nonce)
assert.NoError(t, err)
@@ -154,10 +150,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
// err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupPending)
// assert.NoError(t, err)
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessPendingBatches(&wg)
wg.Wait()
relayer.ProcessPendingBatches()
// Check if Rollup Result is changed successfully
status, err := db.GetRollupStatus(batchID)
@@ -194,80 +187,9 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessCommittedBatches(&wg)
wg.Wait()
relayer.ProcessCommittedBatches()
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
}
func testL2RelayerSkipBatches(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
createBatch := func(rollupStatus orm.RollupStatus, provingStatus orm.ProvingStatus) string {
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx, &orm.BlockInfo{}, &orm.BlockInfo{}, "0", 1, 194676) // startBlock & endBlock & parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchID, rollupStatus)
assert.NoError(t, err)
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, provingStatus)
assert.NoError(t, err)
return batchID
}
skipped := []string{
createBatch(orm.RollupCommitted, orm.ProvingTaskSkipped),
createBatch(orm.RollupCommitted, orm.ProvingTaskFailed),
}
notSkipped := []string{
createBatch(orm.RollupPending, orm.ProvingTaskSkipped),
createBatch(orm.RollupCommitting, orm.ProvingTaskSkipped),
createBatch(orm.RollupFinalizing, orm.ProvingTaskSkipped),
createBatch(orm.RollupFinalized, orm.ProvingTaskSkipped),
createBatch(orm.RollupPending, orm.ProvingTaskFailed),
createBatch(orm.RollupCommitting, orm.ProvingTaskFailed),
createBatch(orm.RollupFinalizing, orm.ProvingTaskFailed),
createBatch(orm.RollupFinalized, orm.ProvingTaskFailed),
createBatch(orm.RollupCommitted, orm.ProvingTaskVerified),
}
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessCommittedBatches(&wg)
wg.Wait()
for _, id := range skipped {
status, err := db.GetRollupStatus(id)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizationSkipped, status)
}
for _, id := range notSkipped {
status, err := db.GetRollupStatus(id)
assert.NoError(t, err)
assert.NotEqual(t, orm.RollupFinalizationSkipped, status)
}
}

View File

@@ -5,7 +5,6 @@ import (
"fmt"
"math/big"
"reflect"
"sync"
"time"
geth "github.com/scroll-tech/go-ethereum"
@@ -104,12 +103,18 @@ func (w *WatcherClient) Start() {
number = 0
}
var wg sync.WaitGroup
wg.Add(3)
go w.tryFetchRunningMissingBlocks(w.ctx, &wg, number)
go w.FetchContractEvent(&wg, number)
go w.batchProposer.tryProposeBatch(&wg)
wg.Wait()
if err := w.tryFetchRunningMissingBlocks(w.ctx, number); err != nil {
log.Error("failed to fetchRunningMissingBlocks", "err", err)
}
// @todo handle error
if err := w.fetchContractEvent(number); err != nil {
log.Error("failed to fetchContractEvent", "err", err)
}
if err := w.batchProposer.tryProposeBatch(); err != nil {
log.Error("failed to tryProposeBatch", "err", err)
}
}
}
}()
@@ -123,15 +128,13 @@ func (w *WatcherClient) Stop() {
const blockTracesFetchLimit = uint64(10)
// try fetch missing blocks if inconsistent
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, wg *sync.WaitGroup, blockHeight uint64) {
defer wg.Done()
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) error {
// Get newest block in DB. must have blocks at that time.
// Don't use "block_trace" table "trace" column's BlockTrace.Number,
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
heightInDB, err := w.orm.GetBlockTracesLatestHeight()
if err != nil {
log.Error("failed to GetBlockTracesLatestHeight", "err", err)
return
return fmt.Errorf("failed to GetBlockTracesLatestHeight in DB: %v", err)
}
// Can't get trace from genesis block, so the default start number is 1.
@@ -149,10 +152,12 @@ func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, wg *sy
// Get block traces and insert into db.
if err = w.getAndStoreBlockTraces(ctx, from, to); err != nil {
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
return
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to)
return err
}
}
return nil
}
func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
@@ -181,8 +186,7 @@ func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uin
const contractEventsBlocksFetchLimit = int64(10)
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *WatcherClient) FetchContractEvent(wg *sync.WaitGroup, blockHeight uint64) {
defer wg.Done()
func (w *WatcherClient) fetchContractEvent(blockHeight uint64) error {
defer func() {
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
@@ -207,14 +211,14 @@ func (w *WatcherClient) FetchContractEvent(wg *sync.WaitGroup, blockHeight uint6
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 3)
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
query.Topics[0][0] = common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][1] = common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][2] = common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE)
logs, err := w.FilterLogs(w.ctx, query)
if err != nil {
log.Error("failed to get event logs", "err", err)
return
return err
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
@@ -225,7 +229,7 @@ func (w *WatcherClient) FetchContractEvent(wg *sync.WaitGroup, blockHeight uint6
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("failed to parse emitted event log", "err", err)
return
return err
}
// Update relayed message first to make sure we don't forget to update submited message.
@@ -240,17 +244,18 @@ func (w *WatcherClient) FetchContractEvent(wg *sync.WaitGroup, blockHeight uint6
}
if err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return
return err
}
}
if err = w.orm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil {
log.Error("failed to save l2 messages", "err", err)
return
return err
}
w.processedMsgHeight = uint64(to)
}
return nil
}
func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message, []relayedMessage, error) {
@@ -261,7 +266,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
var relayedMessages []relayedMessage
for _, vLog := range logs {
switch vLog.Topics[0] {
case common.HexToHash(bridge_abi.SentMessageEventSignature):
case common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE):
event := struct {
Target common.Address
Sender common.Address
@@ -282,7 +287,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
event.Target = common.HexToAddress(vLog.Topics[1].String())
l2Messages = append(l2Messages, &orm.L2Message{
Nonce: event.MessageNonce.Uint64(),
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
MsgHash: utils.ComputeMessageHash(event.Target, event.Sender, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
@@ -293,7 +298,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
Calldata: common.Bytes2Hex(event.Message),
Layer2Hash: vLog.TxHash.Hex(),
})
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
case common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE):
event := struct {
MsgHash common.Hash
}{}
@@ -304,7 +309,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
txHash: vLog.TxHash,
isSuccessful: true,
})
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
case common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE):
event := struct {
MsgHash common.Hash
}{}

View File

@@ -112,7 +112,7 @@ func testMonitorBridgeContract(t *testing.T) {
assert.NoError(t, err)
t.Log("Height in DB is", height)
assert.Greater(t, height, int64(previousHeight))
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
msgs, err := db.GetL2MessagesByStatus(orm.MsgPending)
assert.NoError(t, err)
assert.Equal(t, 2, len(msgs))
}
@@ -184,7 +184,7 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
assert.NoError(t, err)
t.Log("LatestHeight is", height)
assert.Greater(t, height, int64(previousHeight)) // height must be greater than previousHeight because confirmations is 0
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
msgs, err := db.GetL2MessagesByStatus(orm.MsgPending)
assert.NoError(t, err)
assert.Equal(t, 5, len(msgs))
}

View File

@@ -16,9 +16,10 @@ import (
type accountPool struct {
client *ethclient.Client
minBalance *big.Int
accounts map[common.Address]*bind.TransactOpts
accsCh chan *bind.TransactOpts
minBalance *big.Int
accounts map[common.Address]*bind.TransactOpts
numAccounts int
accsCh chan *bind.TransactOpts
}
// newAccounts creates an accountPool instance.
@@ -28,10 +29,11 @@ func newAccountPool(ctx context.Context, minBalance *big.Int, client *ethclient.
minBalance.SetString("100000000000000000000", 10)
}
accs := &accountPool{
client: client,
minBalance: minBalance,
accounts: make(map[common.Address]*bind.TransactOpts, len(privs)),
accsCh: make(chan *bind.TransactOpts, len(privs)+2),
client: client,
minBalance: minBalance,
accounts: make(map[common.Address]*bind.TransactOpts, len(privs)),
numAccounts: len(privs),
accsCh: make(chan *bind.TransactOpts, len(privs)+2),
}
// get chainID from client

View File

@@ -151,7 +151,7 @@ func (s *Sender) ConfirmChan() <-chan *Confirmation {
// NumberOfAccounts return the count of accounts.
func (s *Sender) NumberOfAccounts() int {
return len(s.auths.accounts)
return s.auths.numAccounts
}
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte) (*FeeData, error) {

View File

@@ -22,7 +22,7 @@ import (
"scroll-tech/bridge/sender"
)
const TXBatch = 50
const TX_BATCH = 50
var (
privateKeys []*ecdsa.PrivateKey
@@ -84,7 +84,7 @@ func testBatchSender(t *testing.T, batchSize int) {
for idx := 0; idx < newSender.NumberOfAccounts(); idx++ {
index := idx
eg.Go(func() error {
for i := 0; i < TXBatch; i++ {
for i := 0; i < TX_BATCH; i++ {
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
id := strconv.Itoa(i + index*1000)
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil)
@@ -103,7 +103,7 @@ func testBatchSender(t *testing.T, batchSize int) {
if err := eg.Wait(); err != nil {
t.Error(err)
}
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TXBatch*newSender.NumberOfAccounts())
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TX_BATCH*newSender.NumberOfAccounts())
// avoid 10 mins cause testcase panic
after := time.After(80 * time.Second)

View File

@@ -55,20 +55,16 @@ func setupEnv(t *testing.T) {
var err error
privateKey, err = crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121212"))
assert.NoError(t, err)
messagePrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121213"))
assert.NoError(t, err)
rollupPrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121214"))
assert.NoError(t, err)
// Load config.
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L1Config.Confirmations = 0
cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
cfg.L1Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{privateKey}
cfg.L1Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{privateKey}
cfg.L2Config.Confirmations = 0
cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{privateKey}
cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{privateKey}
// Create l1geth container.
l1gethImg = docker.NewTestL1Docker(t)
@@ -93,47 +89,6 @@ func setupEnv(t *testing.T) {
// Create l1 and l2 auth
l1Auth = prepareAuth(t, l1Client, privateKey)
l2Auth = prepareAuth(t, l2Client, privateKey)
// send some balance to message and rollup sender
transferEther(t, l1Auth, l1Client, messagePrivateKey)
transferEther(t, l1Auth, l1Client, rollupPrivateKey)
transferEther(t, l2Auth, l2Client, messagePrivateKey)
transferEther(t, l2Auth, l2Client, rollupPrivateKey)
}
func transferEther(t *testing.T, auth *bind.TransactOpts, client *ethclient.Client, privateKey *ecdsa.PrivateKey) {
targetAddress := crypto.PubkeyToAddress(privateKey.PublicKey)
gasPrice, err := client.SuggestGasPrice(context.Background())
assert.NoError(t, err)
gasPrice.Mul(gasPrice, big.NewInt(2))
// Get pending nonce
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
assert.NoError(t, err)
// 200 ether should be enough
value, ok := big.NewInt(0).SetString("0xad78ebc5ac6200000", 0)
assert.Equal(t, ok, true)
tx := types.NewTx(&types.LegacyTx{
Nonce: nonce,
To: &targetAddress,
Value: value,
Gas: 500000,
GasPrice: gasPrice,
})
signedTx, err := auth.Signer(auth.From, tx)
assert.NoError(t, err)
err = client.SendTransaction(context.Background(), signedTx)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), client, signedTx)
assert.NoError(t, err)
if receipt.Status != types.ReceiptStatusSuccessful {
t.Fatalf("Call failed")
}
}
func free(t *testing.T) {
@@ -195,9 +150,6 @@ func TestFunction(t *testing.T) {
// l1 rollup and watch rollup events
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
// l2 message
t.Run("testRelayL2MessageSucceed", testRelayL2MessageSucceed)
t.Cleanup(func() {
free(t)
})

View File

@@ -1,175 +0,0 @@
package tests
import (
"context"
"math/big"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
"sync"
"testing"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
)
func testRelayL2MessageSucceed(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
var wg sync.WaitGroup
wg.Add(4)
prepareContracts(t)
// Create L2Relayer
l2Cfg := cfg.L2Config
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// Create L2Watcher
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, 0, l2Cfg.BatchProposerConfig, l2Cfg.L2MessengerAddress, db)
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
// send message through l2 messenger contract
nonce, err := l2MessengerInstance.MessageNonce(&bind.CallOpts{})
assert.NoError(t, err)
sendTx, err := l2MessengerInstance.SendMessage(l2Auth, l1Auth.From, big.NewInt(0), common.Hex2Bytes("00112233"), big.NewInt(0))
assert.NoError(t, err)
sendReceipt, err := bind.WaitMined(context.Background(), l2Client, sendTx)
assert.NoError(t, err)
if sendReceipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// l2 watch process events
l2Watcher.FetchContractEvent(&wg, sendReceipt.BlockNumber.Uint64())
// check db status
msg, err := db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgPending)
assert.Equal(t, msg.Sender, l2Auth.From.String())
assert.Equal(t, msg.Target, l1Auth.From.String())
// add fake blocks
traces := []*types.BlockTrace{
{
Header: &types.Header{
Number: sendReceipt.BlockNumber,
ParentHash: common.Hash{},
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
},
StorageTrace: &types.StorageTrace{},
},
}
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
// add fake batch
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
traces[0].Header.ParentHash.String(), 1, 194676)
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
traces[0].Header.Number.Uint64(),
traces[0].Header.Number.Uint64()}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
// add dummy proof
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
// process pending batch and check status
l2Relayer.ProcessPendingBatches(&wg)
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
commitTxHash, err := db.GetCommitTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, commitTxHash.Valid)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
assert.NoError(t, err)
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
assert.NoError(t, err)
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch CommitBatch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitted, status)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches(&wg)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, finalizeTxHash.Valid)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
assert.NoError(t, err)
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
assert.NoError(t, err)
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch FinalizeBatch events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalized, status)
// process l2 messages
l2Relayer.ProcessSavedEvents(&wg)
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgSubmitted)
relayTxHash, err := db.GetRelayL2MessageTxHash(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, true, relayTxHash.Valid)
relayTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(relayTxHash.String))
assert.NoError(t, err)
relayTxReceipt, err := bind.WaitMined(context.Background(), l1Client, relayTx)
assert.NoError(t, err)
assert.Equal(t, len(relayTxReceipt.Logs), 1)
// fetch message relayed events
err = l1Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgConfirmed)
}

View File

@@ -6,7 +6,6 @@ import (
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
"sync"
"testing"
"scroll-tech/bridge/l1"
@@ -79,12 +78,8 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
err = dbTx.Commit()
assert.NoError(t, err)
var wg = sync.WaitGroup{}
wg.Add(1)
// process pending batch and check status
l2Relayer.ProcessPendingBatches(&wg)
wg.Wait()
l2Relayer.ProcessPendingBatches()
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
@@ -98,6 +93,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch rollup events
assert.NoError(t, err)
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
@@ -112,11 +108,8 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
wg.Add(1)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches(&wg)
wg.Wait()
l2Relayer.ProcessCommittedBatches()
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
@@ -130,6 +123,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch rollup events
assert.NoError(t, err)
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)

View File

@@ -20,8 +20,8 @@ func encodePacked(input ...[]byte) []byte {
// ComputeMessageHash compute the message hash
func ComputeMessageHash(
sender common.Address,
target common.Address,
sender common.Address,
value *big.Int,
fee *big.Int,
deadline *big.Int,
@@ -29,8 +29,8 @@ func ComputeMessageHash(
messageNonce *big.Int,
) common.Hash {
packed := encodePacked(
sender.Bytes(),
target.Bytes(),
sender.Bytes(),
math.U256Bytes(value),
math.U256Bytes(fee),
math.U256Bytes(deadline),

View File

@@ -7,7 +7,6 @@ import (
"scroll-tech/bridge/utils"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
)
func TestKeccak2(t *testing.T) {
@@ -29,13 +28,15 @@ func TestKeccak2(t *testing.T) {
func TestComputeMessageHash(t *testing.T) {
hash := utils.ComputeMessageHash(
common.HexToAddress("0xd7227113b92e537aeda220d5a2f201b836e5879d"),
common.HexToAddress("0x47c02b023b6787ef4e503df42bbb1a94f451a1c0"),
big.NewInt(5000000000000000),
big.NewInt(0),
big.NewInt(1674204924),
common.Hex2Bytes("8eaac8a30000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000000000000000000000000000000011c37937e0800000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000"),
big.NewInt(30706),
common.HexToAddress("0xdafea492d9c6733ae3d56b7ed1adb60692c98bc5"),
common.HexToAddress("0xeafea492d9c6733ae3d56b7ed1adb60692c98bf7"),
big.NewInt(1),
big.NewInt(2),
big.NewInt(1234567),
common.Hex2Bytes("0011223344"),
big.NewInt(3),
)
assert.Equal(t, hash.String(), "0x920e59f62ca89a0f481d44961c55d299dd20c575693692d61fdf3ca579d8edf3")
if hash != common.HexToHash("0x58c9a5abfd2a558bb6a6fd5192b36fe9325d98763bafd3a51a1ea28a5d0b990b") {
t.Fatalf("Invalid ComputeMessageHash, want %s, got %s", "0x58c9a5abfd2a558bb6a6fd5192b36fe9325d98763bafd3a51a1ea28a5d0b990b", hash.Hex())
}
}

View File

@@ -179,13 +179,6 @@ linters:
- depguard
- gocyclo
- unparam
- exportloopref
- sqlclosecheck
- rowserrcheck
- durationcheck
- bidichk
- typecheck
- unused
enable-all: false
disable:
@@ -207,7 +200,16 @@ issues:
# Exclude some linters from running on tests files.
- path: _test\.go
linters:
- gocyclo
- errcheck
- dupl
- gosec
# Exclude known linters from partially hard-vendored code,
# which is impossible to exclude via "nolint" comments.
- path: internal/hmac/
text: "weak cryptographic primitive"
linters:
- gosec
# Exclude some staticcheck messages
@@ -215,6 +217,18 @@ issues:
- staticcheck
text: "SA9003:"
- linters:
- golint
text: "package comment should be of the form"
- linters:
- golint
text: "don't use ALL_CAPS in Go names;"
- linters:
- golint
text: "don't use underscores in Go names;"
# Exclude lll issues for long lines with go:generate
- linters:
- lll

View File

@@ -1,13 +1,17 @@
package cmd
import (
"fmt"
"os"
"os/exec"
"strings"
"sync"
"testing"
"time"
"github.com/docker/docker/pkg/reexec"
cmap "github.com/orcaman/concurrent-map"
"github.com/stretchr/testify/assert"
)
var verbose bool
@@ -47,6 +51,48 @@ func NewCmd(t *testing.T, name string, args ...string) *Cmd {
}
}
// RunApp exec's the current binary using name as argv[0] which will trigger the
// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go)
func (t *Cmd) RunApp(parallel bool) {
t.Log("cmd: ", append([]string{t.name}, t.args...))
cmd := &exec.Cmd{
Path: reexec.Self(),
Args: append([]string{t.name}, t.args...),
Stderr: t,
Stdout: t,
}
if parallel {
go func() {
_ = cmd.Run()
}()
} else {
_ = cmd.Run()
}
t.mu.Lock()
t.cmd = cmd
t.mu.Unlock()
}
// WaitExit wait util process exit.
func (t *Cmd) WaitExit() {
// Wait all the check funcs are finished or test status is failed.
for !(t.Failed() || t.checkFuncs.IsEmpty()) {
<-time.After(time.Millisecond * 500)
}
// Send interrupt signal.
t.mu.Lock()
_ = t.cmd.Process.Signal(os.Interrupt)
t.mu.Unlock()
}
// Interrupt send interrupt signal.
func (t *Cmd) Interrupt() {
t.mu.Lock()
t.Err = t.cmd.Process.Signal(os.Interrupt)
t.mu.Unlock()
}
// RegistFunc register check func
func (t *Cmd) RegistFunc(key string, check checkFunc) {
t.checkFuncs.Set(key, check)
@@ -57,6 +103,39 @@ func (t *Cmd) UnRegistFunc(key string) {
t.checkFuncs.Pop(key)
}
// ExpectWithTimeout wait result during timeout time.
func (t *Cmd) ExpectWithTimeout(parallel bool, timeout time.Duration, keyword string) {
if keyword == "" {
return
}
okCh := make(chan struct{}, 1)
t.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
waitResult := func() {
defer t.UnRegistFunc(keyword)
select {
case <-okCh:
return
case <-time.After(timeout):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", keyword))
}
}
if parallel {
go waitResult()
} else {
waitResult()
}
}
func (t *Cmd) runCmd() {
cmd := exec.Command(t.args[0], t.args[1:]...) //nolint:gosec
cmd.Stdout = t

View File

@@ -1,114 +0,0 @@
package cmd
import (
"fmt"
"os"
"os/exec"
"strings"
"time"
"github.com/docker/docker/pkg/reexec"
"github.com/stretchr/testify/assert"
)
// RunApp exec's the current binary using name as argv[0] which will trigger the
// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go)
func (t *Cmd) RunApp(waitResult func() bool) {
t.Log("cmd: ", append([]string{t.name}, t.args...))
cmd := &exec.Cmd{
Path: reexec.Self(),
Args: append([]string{t.name}, t.args...),
Stderr: t,
Stdout: t,
}
if waitResult != nil {
go func() {
_ = cmd.Run()
}()
waitResult()
} else {
_ = cmd.Run()
}
t.mu.Lock()
t.cmd = cmd
t.mu.Unlock()
}
// WaitExit wait util process exit.
func (t *Cmd) WaitExit() {
// Wait all the check funcs are finished or test status is failed.
for !(t.Failed() || t.checkFuncs.IsEmpty()) {
<-time.After(time.Millisecond * 500)
}
// Send interrupt signal.
t.mu.Lock()
_ = t.cmd.Process.Signal(os.Interrupt)
t.mu.Unlock()
}
// Interrupt send interrupt signal.
func (t *Cmd) Interrupt() {
t.mu.Lock()
t.Err = t.cmd.Process.Signal(os.Interrupt)
t.mu.Unlock()
}
// WaitResult return true when get the keyword during timeout.
func (t *Cmd) WaitResult(timeout time.Duration, keyword string) bool {
if keyword == "" {
return false
}
okCh := make(chan struct{}, 1)
t.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer t.UnRegistFunc(keyword)
select {
case <-okCh:
return true
case <-time.After(timeout):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", keyword))
}
return false
}
// ExpectWithTimeout wait result during timeout time.
func (t *Cmd) ExpectWithTimeout(parallel bool, timeout time.Duration, keyword string) {
if keyword == "" {
return
}
okCh := make(chan struct{}, 1)
t.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
waitResult := func() {
defer t.UnRegistFunc(keyword)
select {
case <-okCh:
return
case <-time.After(timeout):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", keyword))
}
}
if parallel {
go waitResult()
} else {
waitResult()
}
}

View File

@@ -9,9 +9,10 @@ require (
github.com/mattn/go-colorable v0.1.8
github.com/mattn/go-isatty v0.0.14
github.com/orcaman/concurrent-map v1.0.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.10.2
golang.org/x/sync v0.1.0
)
require (
@@ -77,12 +78,11 @@ require (
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/crypto v0.4.0 // indirect
golang.org/x/mod v0.7.0 // indirect
golang.org/x/net v0.5.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.4.0 // indirect
golang.org/x/text v0.6.0 // indirect
golang.org/x/net v0.3.0 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/text v0.5.0 // indirect
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
golang.org/x/tools v0.3.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect

View File

@@ -404,8 +404,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
@@ -482,8 +482,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -541,8 +541,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.3.0 h1:VWL6FNY2bEEmsGVKabSlHu5Irp34xmMRoqb/9lF9lxk=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -606,8 +606,8 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -619,8 +619,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=

View File

@@ -1380,7 +1380,7 @@ dependencies = [
[[package]]
name = "halo2-mpt-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=scroll-dev-0920-fix#d6bd0f291d41c4585e783d2d94a77fd80e1ba47e"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=scroll-dev-0902#b0ffab97316f9cf4b9e65aba398a047a8d6424a1"
dependencies = [
"bitvec 0.22.3",
"ff 0.11.1",
@@ -1451,7 +1451,7 @@ dependencies = [
[[package]]
name = "halo2_proofs"
version = "0.2.0"
source = "git+https://github.com/scroll-tech/halo2.git?branch=scroll-dev-0902#6b8c8a07da9fbf5d5878831e35360b8c5e6d89a3"
source = "git+https://github.com/scroll-tech/halo2.git?branch=scroll-dev-0902#6f18f38e82d302cd8b6ce8809b59c32350b019a3"
dependencies = [
"blake2b_simd",
"cfg-if 0.1.10",
@@ -2397,7 +2397,7 @@ dependencies = [
[[package]]
name = "poseidon"
version = "0.2.0"
source = "git+https://github.com/appliedzkp/poseidon.git#0b9965fbcd9e03559088b8f68489592286bc55e0"
source = "git+https://github.com/appliedzkp/poseidon.git#5d29df01a95e3df6334080d28e983407f56b5da3"
dependencies = [
"group",
"halo2curves 0.2.1 (git+https://github.com/privacy-scaling-explorations/halo2curves?tag=0.3.0)",
@@ -2407,7 +2407,7 @@ dependencies = [
[[package]]
name = "poseidon"
version = "0.2.0"
source = "git+https://github.com/privacy-scaling-explorations/poseidon.git#0b9965fbcd9e03559088b8f68489592286bc55e0"
source = "git+https://github.com/privacy-scaling-explorations/poseidon.git#5d29df01a95e3df6334080d28e983407f56b5da3"
dependencies = [
"group",
"halo2curves 0.2.1 (git+https://github.com/privacy-scaling-explorations/halo2curves?tag=0.3.0)",
@@ -3389,7 +3389,7 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]]
name = "types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/scroll-zkevm?branch=fix/mpt_limit#cfe8b4e959d6e09b3a45b58e45589ae62988a729"
source = "git+https://github.com/scroll-tech/scroll-zkevm#51b9b022fcdb45c505ea351f9dca8f2cfbec4d86"
dependencies = [
"base64 0.13.0",
"blake2",
@@ -3816,7 +3816,7 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f"
[[package]]
name = "zkevm"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/scroll-zkevm?branch=fix/mpt_limit#cfe8b4e959d6e09b3a45b58e45589ae62988a729"
source = "git+https://github.com/scroll-tech/scroll-zkevm#51b9b022fcdb45c505ea351f9dca8f2cfbec4d86"
dependencies = [
"anyhow",
"blake2",

View File

@@ -8,8 +8,8 @@ edition = "2021"
crate-type = ["staticlib"]
[dependencies]
zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="fix/mpt_limit" }
types = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="fix/mpt_limit" }
zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm" }
types = { git = "https://github.com/scroll-tech/scroll-zkevm" }
log = "0.4"
env_logger = "0.9.0"

View File

@@ -4,12 +4,12 @@ import (
"crypto/ecdsa"
"crypto/rand"
"encoding/hex"
"encoding/json"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// RespStatus represents status code from roller to scroll
@@ -36,11 +36,11 @@ type AuthMsg struct {
type Identity struct {
// Roller name
Name string `json:"name"`
// Unverified Unix timestamp of message creation
Timestamp uint32 `json:"timestamp"`
// Time of message creation
Timestamp int64 `json:"timestamp"`
// Roller public key
PublicKey string `json:"publicKey"`
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
// Version is common.Version+ZK_VERSION. Use the following to check the latest ZK_VERSION version.
// curl -sL https://api.github.com/repos/scroll-tech/scroll-zkevm/commits | jq -r ".[0].sha"
Version string `json:"version"`
// Random unique token generated by manager
@@ -115,11 +115,12 @@ func (a *AuthMsg) PublicKey() (string, error) {
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Identity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
bs, err := json.Marshal(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
hash := crypto.Keccak256Hash(bs)
return hash[:], nil
}
@@ -203,12 +204,12 @@ type ProofDetail struct {
// Hash return proofMsg content hash.
func (z *ProofDetail) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(z)
bs, err := json.Marshal(z)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
hash := crypto.Keccak256Hash(bs)
return hash[:], nil
}

View File

@@ -16,7 +16,7 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
authMsg := &AuthMsg{
Identity: &Identity{
Name: "testRoller",
Timestamp: uint32(time.Now().Unix()),
Timestamp: time.Now().UnixNano(),
},
}
assert.NoError(t, authMsg.Sign(privkey))

View File

@@ -1,10 +1,30 @@
package utils
import (
"sync/atomic"
"github.com/scroll-tech/go-ethereum/core/types"
"golang.org/x/sync/errgroup"
)
// ComputeTraceGasCost computes gascost based on ExecutionResults.StructLogs.GasCost
func ComputeTraceGasCost(trace *types.BlockTrace) uint64 {
return trace.Header.GasUsed
var (
gasCost uint64
eg errgroup.Group
)
for idx := range trace.ExecutionResults {
i := idx
eg.Go(func() error {
var sum uint64
for _, log := range trace.ExecutionResults[i].StructLogs {
sum += log.GasCost
}
atomic.AddUint64(&gasCost, sum)
return nil
})
}
_ = eg.Wait()
return gasCost
}

View File

@@ -19,7 +19,13 @@ func TestComputeTraceCost(t *testing.T) {
blockTrace := &types.BlockTrace{}
err = json.Unmarshal(templateBlockTrace, blockTrace)
assert.NoError(t, err)
var expected = blockTrace.Header.GasUsed
got := utils.ComputeTraceGasCost(blockTrace)
assert.Equal(t, expected, got)
var sum uint64
for _, v := range blockTrace.ExecutionResults {
for _, sv := range v.StructLogs {
sum += sv.GasCost
}
}
res := utils.ComputeTraceGasCost(blockTrace)
assert.Equal(t, sum, res)
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "prealpha-v11.5"
var tag = "prealpha-v9.2"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -22,8 +22,8 @@ var commit = func() string {
return ""
}()
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-zkevm
var ZkVersion string
// ZK_VERSION is commit-id of common/libzkp/impl/cargo.lock/scroll-zkevm
var ZK_VERSION string
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, roller, contracts and etc.
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZkVersion)
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZK_VERSION)

View File

@@ -386,7 +386,7 @@ library RollupVerifier {
t1
)
);
update_hash_scalar(18620528901694425296072105892920066495478887717015933899493919566746585676047, absorbing, 0);
update_hash_scalar(7326291674247555594112707886804937707847188185923070866278273345303869756280, absorbing, 0);
update_hash_point(m[0], m[1], absorbing, 2);
for (t0 = 0; t0 <= 4; t0++) {
update_hash_point(proof[0 + t0 * 2], proof[1 + t0 * 2], absorbing, 5 + t0 * 3);
@@ -724,8 +724,8 @@ library RollupVerifier {
(t0, t1) = (ecc_mul_add_pm(m, proof, 1461486238301980199876269201563775120819706402602, t0, t1));
(t0, t1) = (
ecc_mul_add(
5335172776193682293002595672140655300498265857728161236987288793112411362256,
9153855726472286104461915396077745889260526805920405949557461469033032628222,
18701609130775737229348071043080155034023979562517390395403433088802478899758,
15966955543930185772599298905781740007968379271659670990460125132276790404701,
m[78],
t0,
t1
@@ -733,8 +733,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
9026202793013131831701482540600751978141377016764300618037152689098701087208,
19644677619301694001087044142922327551116787792977369058786364247421954485859,
10391672869328159104536012527288890078475214572275421477472198141744100604180,
16383182967525077486800851500412772270268328143041811261940514978333847876450,
m[77],
t0,
t1
@@ -742,8 +742,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
10826234859452509771814283128042282248838241144105945602706734900173561719624,
5628243352113405764051108388315822074832358861640908064883601198703833923438,
1694121668121560366967381814358868176695875056710903754887787227675156636991,
6288755472313871386012926867179622380057563139110460659328016508371672965822,
m[76],
t0,
t1
@@ -751,8 +751,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
9833916648960859819503777242562918959056952519298917148524233826817297321072,
837915750759756490172805968793319594111899487492554675680829218939384285955,
8449090587209846475328734419746789925412190193479844231777165308243174237722,
19620423218491500875965944829407986067794157844846402182805878618955604592848,
m[75],
t0,
t1
@@ -760,8 +760,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
10257805671474982710489846158410183388099935223468876792311814484878286190506,
6925081619093494730602614238209964215162532591387952125009817011864359314464,
5053208336959682582031156680199539869251745263409434673229644546747696847142,
2515271708296970065769200367712058290268116287798438948140802173656220671206,
m[74],
t0,
t1
@@ -769,8 +769,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
4475887208248126488081900175351981014160135345959097965081514547035591501401,
17809934801579097157548855239127693133451078551727048660674021788322026074440,
14044565934581841113280816557133159251170886931106151374890478449607604267942,
4516676687937794780030405510740994119381246893674971835541700695978704585552,
m[73],
t0,
t1
@@ -823,8 +823,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
18539453526841971932568089122596968064597086391356856358866942118522457107863,
3647865108410881496134024808028560930237661296032155096209994441023206530212,
4919836553908828082540426444868776555669883964231731088484431671272015675682,
2534996469663628472218664436969797350677809756735321673130157881813913441609,
m[67],
t0,
t1
@@ -841,8 +841,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
19267653195273486172950176174654469275684545789180737280515385961619717720594,
8975180971271331994178632284567744253406636398050840906044549681238954521839,
7298741378311576950839968993357330108079245118485170808123459961337830256312,
10327561179499117619949936626306234488421661318541529469701192193684736307992,
m[65],
t0,
t1
@@ -859,8 +859,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
14883199025754033315476980677331963148201112555947054150371482532558947065890,
19913319410736467436640597337700981504577668548125107926660028143291852201132,
21344975294019301064497004820288763682448968861642019035490416932201272957274,
10527619823264344893410550194287064640208153251186939130321425213582959780489,
m[63],
t0,
t1
@@ -868,8 +868,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
18290509522533712126835038141804610778392690327965261406132668236833728306838,
3710298066677974093924183129147170087104393961634393354172472701713090868425,
8972742415650205333409282370033440562593431348747288268814492203356823531160,
8116706321112691122771049432546166822575953322170688547310064134261753771143,
m[62],
t0,
t1
@@ -877,8 +877,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
19363467492491917195052183458025198134822377737629876295496853723068679518308,
5854330679906778271391785925618350923591828884998994352880284635518306250788,
2245383788954722547301665173770198299224442299145553661157120655982065376923,
21429627532145565836455474503387893562363999035988060101286707048187310790834,
m[61],
t0,
t1
@@ -886,8 +886,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
16181109780595136982201896766265118193466959602989950846464420951358063185297,
8811570609098296287610981932552574275858846837699446990256241563674576678567,
6631831869726773976361406817204839637256208337970281843457872807848960103655,
9564029493986604546558813596663080644256762699468834511701525072767927949801,
m[60],
t0,
t1
@@ -895,8 +895,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
10062549363619405779400496848029040530770586215674909583260224093983878118724,
1989582851705118987083736605676322092152792129388805756040224163519806904905,
11480433023546787855799302686493624232665854025790899812568432142639901048711,
19408335616099148180409133533838326787843523379558500985213116784449716389602,
m[59],
t0,
t1
@@ -904,8 +904,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
19016883348721334939078393300672242978266248861945205052660538073783955572863,
1976040209279107904310062622264754919366006151976304093568644070161390236037,
17119009547436104907589161251911916154539209413889810725547125453954285498068,
16196009614025712805558792610177918739658373559330006740051047693948800191562,
m[58],
t0,
t1
@@ -922,8 +922,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
10610438624239085062445835373411523076517149007370367578847561825933262473434,
14538778619212692682166219259545768162136692909816914624880992580957990166795,
18650010323993268535055713787599480879302828622769515272251129462854128226895,
11244246887388549559894193327128701737108444364011850111062992666532968469107,
m[56],
t0,
t1

View File

@@ -15,10 +15,10 @@ test:
libzkp:
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.a ../interface/
rm -rf ./verifier/lib && cp -r ../common/libzkp/interface ./verifier/lib
cp -r ../common/libzkp/interface ./verifier/lib
coordinator: libzkp ## Builds the Coordinator instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
go build -ldflags "-X scroll-tech/common/version.ZK_VERSION=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
test-verifier: libzkp
go test -tags ffi -timeout 0 -v ./verifier

File diff suppressed because one or more lines are too long

Binary file not shown.

View File

@@ -15,5 +15,5 @@ func TestRunCoordinator(t *testing.T) {
// wait result
coordinator.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("coordinator version %s", version.Version))
coordinator.RunApp(nil)
coordinator.RunApp(false)
}

View File

@@ -5,7 +5,7 @@ go 1.18
require (
github.com/orcaman/concurrent-map v1.0.0
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.10.2
golang.org/x/sync v0.1.0
@@ -35,8 +35,8 @@ require (
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/sys v0.4.0 // indirect
golang.org/x/crypto v0.4.0 // indirect
golang.org/x/sys v0.3.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -349,8 +349,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
@@ -424,8 +424,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -540,8 +540,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -553,7 +553,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=

View File

@@ -65,6 +65,7 @@ type Manager struct {
// A map containing proof failed or verify failed proof.
rollerPool cmap.ConcurrentMap
// TODO: once put into use, should add to graceful restart.
failedSessionInfos map[string]*SessionInfo
// A direct connection to the Halo2 verifier, used to verify
@@ -197,12 +198,7 @@ func (m *Manager) restorePrevSessions() {
log.Info("Coordinator restart reload sessions", "session start time", time.Unix(sess.info.StartTimestamp, 0))
for _, roller := range sess.info.Rollers {
log.Info(
"restore roller info for session",
"session id", sess.info.ID,
"roller name", roller.Name,
"public key", roller.PublicKey,
"proof status", roller.Status)
log.Info("restore roller info for session", "session id", sess.info.ID, "roller name", roller.Name, "public key", roller.PublicKey, "proof status", roller.Status)
}
go m.CollectProofs(sess)
@@ -278,6 +274,11 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
"roller pk", roller.PublicKey,
"error", msg.Error,
)
if dbErr = m.orm.UpdateProvingStatus(msg.ID, orm.ProvingTaskFailed); dbErr != nil {
log.Error("failed to update task status as failed", "error", dbErr)
}
// record the failed session.
m.addFailedSession(sess, msg.Error)
return nil
}
@@ -302,6 +303,8 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
success, err = m.verifier.VerifyProof(msg.Proof)
if err != nil {
// record failed session.
m.addFailedSession(sess, err.Error())
// TODO: this is only a temp workaround for testnet, we should return err in real cases
success = false
log.Error("Failed to verify zk proof", "proof id", msg.ID, "error", err)
@@ -310,83 +313,78 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
log.Info("Verify zk proof successfully", "verification result", success, "proof id", msg.ID)
}
var status orm.ProvingStatus
if success {
if dbErr = m.orm.UpdateProvingStatus(msg.ID, orm.ProvingTaskVerified); dbErr != nil {
log.Error(
"failed to update proving_status",
"msg.ID", msg.ID,
"status", orm.ProvingTaskVerified,
"error", dbErr)
}
return dbErr
status = orm.ProvingTaskVerified
} else {
// Set status as skipped if verification fails.
// Note that this is only a workaround for testnet here.
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
// so as to re-distribute the task in the future
status = orm.ProvingTaskFailed
}
return nil
if dbErr = m.orm.UpdateProvingStatus(msg.ID, status); dbErr != nil {
log.Error("failed to update proving_status", "msg.ID", msg.ID, "status", status, "error", dbErr)
}
return dbErr
}
// CollectProofs collects proofs corresponding to a proof generation session.
func (m *Manager) CollectProofs(sess *session) {
select {
case <-time.After(time.Duration(m.cfg.CollectionTime) * time.Minute):
m.mu.Lock()
defer func() {
delete(m.sessions, sess.info.ID)
m.mu.Unlock()
}()
timer := time.NewTimer(time.Duration(m.cfg.CollectionTime) * time.Minute)
// Pick a random winner.
// First, round up the keys that actually sent in a valid proof.
var participatingRollers []string
for pk, roller := range sess.info.Rollers {
if roller.Status == orm.RollerProofValid {
participatingRollers = append(participatingRollers, pk)
for {
select {
case <-timer.C:
m.mu.Lock()
// Ensure proper clean-up of resources.
defer func() {
delete(m.sessions, sess.info.ID)
m.mu.Unlock()
}()
// Pick a random winner.
// First, round up the keys that actually sent in a valid proof.
var participatingRollers []string
for pk, roller := range sess.info.Rollers {
if roller.Status == orm.RollerProofValid {
participatingRollers = append(participatingRollers, pk)
}
}
}
// Ensure we got at least one proof before selecting a winner.
if len(participatingRollers) == 0 {
// record failed session.
errMsg := "proof generation session ended without receiving any valid proofs"
m.addFailedSession(sess, errMsg)
log.Warn(errMsg, "session id", sess.info.ID)
// Set status as skipped.
// Note that this is only a workaround for testnet here.
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
// so as to re-distribute the task in the future
if err := m.orm.UpdateProvingStatus(sess.info.ID, orm.ProvingTaskFailed); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", sess.info.ID, "err", err)
// Ensure we got at least one proof before selecting a winner.
if len(participatingRollers) == 0 {
// record failed session.
errMsg := "proof generation session ended without receiving any valid proofs"
m.addFailedSession(sess, errMsg)
log.Warn(errMsg, "session id", sess.info.ID)
// Set status as skipped.
// Note that this is only a workaround for testnet here.
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
// so as to re-distribute the task in the future
if err := m.orm.UpdateProvingStatus(sess.info.ID, orm.ProvingTaskFailed); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", sess.info.ID, "err", err)
}
return
}
// Now, select a random index for this slice.
randIndex := mathrand.Intn(len(participatingRollers))
_ = participatingRollers[randIndex]
// TODO: reward winner
return
}
// Now, select a random index for this slice.
randIndex := mathrand.Intn(len(participatingRollers))
_ = participatingRollers[randIndex]
// TODO: reward winner
return
case ret := <-sess.finishChan:
m.mu.Lock()
sess.info.Rollers[ret.pk].Status = ret.status
m.mu.Unlock()
if m.isSessionFailed(sess.info) {
if err := m.orm.UpdateProvingStatus(ret.id, orm.ProvingTaskFailed); err != nil {
log.Error("failed to update proving_status as failed", "msg.ID", ret.id, "error", err)
case ret := <-sess.finishChan:
m.mu.Lock()
sess.info.Rollers[ret.pk].Status = ret.status
m.mu.Unlock()
if err := m.orm.SetSessionInfo(sess.info); err != nil {
log.Error("db set session info fail", "pk", ret.pk, "error", err)
}
}
if err := m.orm.SetSessionInfo(sess.info); err != nil {
log.Error("db set session info fail", "pk", ret.pk, "error", err)
}
}
}
func (m *Manager) isSessionFailed(info *orm.SessionInfo) bool {
for _, roller := range info.Rollers {
if roller.Status != orm.RollerProofInvalid {
return false
}
}
return true
}
// APIs collect API services.
func (m *Manager) APIs() []rpc.API {
return []rpc.API{
@@ -405,12 +403,12 @@ func (m *Manager) APIs() []rpc.API {
// StartProofGenerationSession starts a proof generation session
func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) (success bool) {
if m.GetNumberOfIdleRollers() == 0 {
log.Warn("no idle roller when starting proof generation session", "id", task.ID)
roller := m.selectRoller()
if roller == nil {
return false
}
log.Info("start proof generation session", "id", task.ID)
defer func() {
if !success {
if err := m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskUnassigned); err != nil {
@@ -418,8 +416,11 @@ func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) (success boo
}
}
}()
if err := m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskAssigned); err != nil {
log.Error("failed to update task status", "id", task.ID, "err", err)
return false
}
// Get block traces.
blockInfos, err := m.orm.GetBlockInfos(map[string]interface{}{"batch_id": task.ID})
if err != nil {
log.Error(
@@ -429,6 +430,7 @@ func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) (success boo
)
return false
}
traces := make([]*types.BlockTrace, len(blockInfos))
for i, blockInfo := range blockInfos {
traces[i], err = m.Client.GetBlockTraceByHash(m.ctx, common.HexToHash(blockInfo.Hash))
@@ -443,62 +445,41 @@ func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) (success boo
}
}
// Dispatch task to rollers.
rollers := make(map[string]*orm.RollerStatus)
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
roller := m.selectRoller()
if roller == nil {
log.Info("selectRoller returns nil")
break
}
log.Info("roller is picked", "session id", task.ID, "name", roller.Name, "public key", roller.PublicKey)
// send trace to roller
if !roller.sendTask(task.ID, traces) {
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", task.ID)
continue
}
rollers[roller.PublicKey] = &orm.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: orm.RollerAssigned}
}
// No roller assigned.
if len(rollers) == 0 {
log.Error("no roller assigned", "id", task.ID, "number of idle rollers", m.GetNumberOfIdleRollers())
return false
}
// Update session proving status as assigned.
if err = m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskAssigned); err != nil {
log.Error("failed to update task status", "id", task.ID, "err", err)
log.Info("roller is picked", "session id", task.ID, "name", roller.Name, "public_key", roller.PublicKey)
// send trace to roller
if !roller.sendTask(task.ID, traces) {
log.Error("send task failed", "roller name", roller.Name, "public_key", roller.PublicKey, "id", task.ID)
return false
}
pk := roller.PublicKey
// Create a proof generation session.
sess := &session{
s := &session{
info: &orm.SessionInfo{
ID: task.ID,
Rollers: rollers,
ID: task.ID,
Rollers: map[string]*orm.RollerStatus{
pk: {
PublicKey: pk,
Name: roller.Name,
Status: orm.RollerAssigned,
},
},
StartTimestamp: time.Now().Unix(),
},
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
// Store session info.
if err = m.orm.SetSessionInfo(sess.info); err != nil {
log.Error("db set session info fail", "error", err)
for _, roller := range sess.info.Rollers {
log.Error(
"restore roller info for session",
"session id", sess.info.ID,
"roller name", roller.Name,
"public key", roller.PublicKey,
"proof status", roller.Status)
}
if err = m.orm.SetSessionInfo(s.info); err != nil {
log.Error("db set session info fail", "roller name", roller.Name, "public_key", pk, "error", err)
return false
}
m.mu.Lock()
m.sessions[task.ID] = sess
m.sessions[task.ID] = s
m.mu.Unlock()
go m.CollectProofs(sess)
go m.CollectProofs(s)
return true
}

View File

@@ -64,7 +64,6 @@ func TestApis(t *testing.T) {
t.Run("TestHandshake", testHandshake)
t.Run("TestFailedHandshake", testFailedHandshake)
t.Run("TestSeveralConnections", testSeveralConnections)
t.Run("TestInvalidProof", testInvalidProof)
t.Run("TestIdleRollerSelection", testIdleRollerSelection)
// TODO: Restart roller alone when received task, can add this test case in integration-test.
//t.Run("TestRollerReconnect", testRollerReconnect)
@@ -128,7 +127,7 @@ func testFailedHandshake(t *testing.T) {
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: name,
Timestamp: uint32(time.Now().Unix()),
Timestamp: time.Now().UnixNano(),
},
}
assert.NoError(t, authMsg.Sign(privkey))
@@ -146,7 +145,7 @@ func testFailedHandshake(t *testing.T) {
authMsg = &message.AuthMsg{
Identity: &message.Identity{
Name: name,
Timestamp: uint32(time.Now().Unix()),
Timestamp: time.Now().UnixNano(),
},
}
assert.NoError(t, authMsg.Sign(privkey))
@@ -217,58 +216,6 @@ func testSeveralConnections(t *testing.T) {
}
}
func testInvalidProof(t *testing.T) {
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, wsURL)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
}()
// create mock rollers.
roller := newMockRoller(t, "roller_test", wsURL)
roller.waitTaskAndSendProof(t, time.Second, false, false)
defer roller.close()
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers())
var ids = make([]string, 1)
dbTx, err := l2db.Beginx()
assert.NoError(t, err)
for i := range ids {
ID, err := l2db.NewBatchInDBTx(dbTx, &orm.BlockInfo{Number: uint64(i)}, &orm.BlockInfo{Number: uint64(i)}, "0f", 1, 194676)
assert.NoError(t, err)
ids[i] = ID
}
assert.NoError(t, dbTx.Commit())
// verify proof status
var (
tick = time.Tick(500 * time.Millisecond)
tickStop = time.Tick(10 * time.Second)
)
for len(ids) > 0 {
select {
case <-tick:
status, err := l2db.GetProvingStatusByID(ids[0])
assert.NoError(t, err)
if status == orm.ProvingTaskFailed {
ids = ids[1:]
}
case <-tickStop:
t.Error("failed to check proof status")
return
}
}
}
func testIdleRollerSelection(t *testing.T) {
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(cfg.DBConfig)
@@ -288,7 +235,7 @@ func testIdleRollerSelection(t *testing.T) {
rollers := make([]*mockRoller, 20)
for i := 0; i < len(rollers); i++ {
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
rollers[i].waitTaskAndSendProof(t, time.Second, false, true)
rollers[i].waitTaskAndSendProof(t, time.Second, false)
}
defer func() {
// close connection
@@ -352,7 +299,7 @@ func testGracefulRestart(t *testing.T) {
// create mock roller
roller := newMockRoller(t, "roller_test", wsURL)
// wait 10 seconds, coordinator restarts before roller submits proof
roller.waitTaskAndSendProof(t, 10*time.Second, false, true)
roller.waitTaskAndSendProof(t, 10*time.Second, false)
// wait for coordinator to dispatch task
<-time.After(5 * time.Second)
@@ -382,7 +329,7 @@ func testGracefulRestart(t *testing.T) {
}
// will overwrite the roller client for `SubmitProof`
roller.waitTaskAndSendProof(t, time.Millisecond*500, true, true)
roller.waitTaskAndSendProof(t, time.Millisecond*500, true)
defer roller.close()
// verify proof status
@@ -472,7 +419,7 @@ func (r *mockRoller) connectToCoordinator() (*client2.Client, ethereum.Subscript
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: r.rollerName,
Timestamp: uint32(time.Now().Unix()),
Timestamp: time.Now().UnixNano(),
},
}
_ = authMsg.Sign(r.privKey)
@@ -501,7 +448,7 @@ func (r *mockRoller) releaseTasks() {
}
// Wait for the proof task, after receiving the proof task, roller submits proof after proofTime secs.
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool, validProof bool) {
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool) {
// simulating the case that the roller first disconnects and then reconnects to the coordinator
// the Subscription and its `Err()` channel will be closed, and the coordinator will `freeRoller()`
if reconnect {
@@ -517,10 +464,10 @@ func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration,
r.releaseTasks()
r.stopCh = make(chan struct{})
go r.loop(t, r.client, proofTime, validProof, r.stopCh)
go r.loop(t, r.client, proofTime, r.stopCh)
}
func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.Duration, validProof bool, stopCh chan struct{}) {
func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.Duration, stopCh chan struct{}) {
for {
select {
case task := <-r.taskCh:
@@ -538,9 +485,6 @@ func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.D
Proof: &message.AggProof{},
},
}
if !validProof {
proof.Status = message.StatusProofError
}
assert.NoError(t, proof.Sign(r.privKey))
ok, err := client.SubmitProof(context.Background(), proof)
assert.NoError(t, err)

View File

@@ -41,7 +41,7 @@ func (r *rollerNode) sendTask(id string, traces []*types.BlockTrace) bool {
}:
r.TaskIDs.Set(id, struct{}{})
default:
log.Warn("roller channel is full", "roller name", r.Name, "public key", r.PublicKey)
log.Warn("roller channel is full", "roller name", r.Name, "public_key", r.PublicKey)
return false
}
return true
@@ -77,7 +77,7 @@ func (m *Manager) register(pubkey string, identity *message.Identity) (<-chan *m
roller := node.(*rollerNode)
// avoid reconnection too frequently.
if time.Since(roller.registerTime) < 60 {
log.Warn("roller reconnect too frequently", "roller_name", identity.Name, "public key", pubkey)
log.Warn("roller reconnect too frequently", "roller_name", identity.Name, "public_key", pubkey)
return nil, fmt.Errorf("roller reconnect too frequently")
}
// update register time and status
@@ -106,18 +106,17 @@ func (m *Manager) freeTaskIDForRoller(pk string, id string) {
}
// GetNumberOfIdleRollers return the count of idle rollers.
func (m *Manager) GetNumberOfIdleRollers() (count int) {
for i, pk := range m.rollerPool.Keys() {
if val, ok := m.rollerPool.Get(pk); ok {
func (m *Manager) GetNumberOfIdleRollers() int {
pubkeys := m.rollerPool.Keys()
for i := 0; i < len(pubkeys); i++ {
if val, ok := m.rollerPool.Get(pubkeys[i]); ok {
r := val.(*rollerNode)
if r.TaskIDs.Count() == 0 {
count++
if r.TaskIDs.Count() > 0 {
pubkeys[i], pubkeys = pubkeys[len(pubkeys)-1], pubkeys[:len(pubkeys)-1]
}
} else {
log.Error("rollerPool Get fail", "pk", pk, "idx", i, "pk len", pk)
}
}
return count
return len(pubkeys)
}
func (m *Manager) selectRoller() *rollerNode {
@@ -129,8 +128,6 @@ func (m *Manager) selectRoller() *rollerNode {
if r.TaskIDs.Count() == 0 {
return r
}
} else {
log.Error("rollerPool Get fail", "pk", pubkeys[idx.Int64()], "idx", idx.Int64(), "pk len", len(pubkeys))
}
pubkeys[idx.Int64()], pubkeys = pubkeys[0], pubkeys[1:]
}

View File

@@ -15,5 +15,5 @@ func TestRunDatabase(t *testing.T) {
// wait result
bridge.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("db_cli version %s", version.Version))
bridge.RunApp(nil)
bridge.RunApp(false)
}

View File

@@ -7,7 +7,7 @@ require (
github.com/lib/pq v1.10.6
github.com/mattn/go-sqlite3 v1.14.14
github.com/pressly/goose/v3 v3.7.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.10.2
)
@@ -28,8 +28,8 @@ require (
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/sys v0.4.0 // indirect
golang.org/x/crypto v0.4.0 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/tools v0.3.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -339,8 +339,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
@@ -412,8 +412,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -526,8 +526,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=

View File

@@ -159,11 +159,11 @@ func (o *blockBatchOrm) GetVerifiedProofAndInstanceByID(id string) ([]byte, []by
return proof, instance, nil
}
func (o *blockBatchOrm) UpdateProofByID(ctx context.Context, id string, proof, instanceCommitments []byte, proofTimeSec uint64) error {
func (o *blockBatchOrm) UpdateProofByID(ctx context.Context, id string, proof, instance_commitments []byte, proofTimeSec uint64) error {
db := o.db
if _, err := db.ExecContext(ctx,
db.Rebind(`UPDATE block_batch set proof = ?, instance_commitments = ?, proof_time_sec = ? where id = ?;`),
proof, instanceCommitments, proofTimeSec, id,
proof, instance_commitments, proofTimeSec, id,
); err != nil {
log.Error("failed to update proof", "err", err)
}
@@ -236,8 +236,8 @@ func (o *blockBatchOrm) BatchRecordExist(id string) (bool, error) {
return true, nil
}
func (o *blockBatchOrm) GetPendingBatches(limit uint64) ([]string, error) {
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC LIMIT $2`, RollupPending, limit)
func (o *blockBatchOrm) GetPendingBatches() ([]string, error) {
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC`, RollupPending)
if err != nil {
return nil, err
}
@@ -268,8 +268,8 @@ func (o *blockBatchOrm) GetLatestFinalizedBatch() (*BlockBatch, error) {
return batch, nil
}
func (o *blockBatchOrm) GetCommittedBatches(limit uint64) ([]string, error) {
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC LIMIT $2`, RollupCommitted, limit)
func (o *blockBatchOrm) GetCommittedBatches() ([]string, error) {
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC`, RollupCommitted)
if err != nil {
return nil, err
}
@@ -305,7 +305,7 @@ func (o *blockBatchOrm) GetRollupStatusByIDList(ids []string) ([]RollupStatus, e
return make([]RollupStatus, 0), nil
}
query, args, err := sqlx.In("SELECT id, rollup_status FROM block_batch WHERE id IN (?);", ids)
query, args, err := sqlx.In("SELECT rollup_status FROM block_batch WHERE id IN (?);", ids)
if err != nil {
return make([]RollupStatus, 0), err
}
@@ -314,24 +314,17 @@ func (o *blockBatchOrm) GetRollupStatusByIDList(ids []string) ([]RollupStatus, e
rows, err := o.db.Query(query, args...)
statusMap := make(map[string]RollupStatus)
var statuses []RollupStatus
for rows.Next() {
var id string
var status RollupStatus
if err = rows.Scan(&id, &status); err != nil {
if err = rows.Scan(&status); err != nil {
break
}
statusMap[id] = status
statuses = append(statuses, status)
}
var statuses []RollupStatus
if err != nil {
return statuses, err
}
for _, id := range ids {
statuses = append(statuses, statusMap[id])
}
return statuses, nil
}
@@ -367,24 +360,24 @@ func (o *blockBatchOrm) UpdateRollupStatus(ctx context.Context, id string, statu
}
}
func (o *blockBatchOrm) UpdateCommitTxHashAndRollupStatus(ctx context.Context, id string, commitTxHash string, status RollupStatus) error {
func (o *blockBatchOrm) UpdateCommitTxHashAndRollupStatus(ctx context.Context, id string, commit_tx_hash string, status RollupStatus) error {
switch status {
case RollupCommitted:
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ?, committed_at = ? where id = ?;"), commitTxHash, status, time.Now(), id)
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ?, committed_at = ? where id = ?;"), commit_tx_hash, status, time.Now(), id)
return err
default:
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ? where id = ?;"), commitTxHash, status, id)
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ? where id = ?;"), commit_tx_hash, status, id)
return err
}
}
func (o *blockBatchOrm) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, id string, finalizeTxHash string, status RollupStatus) error {
func (o *blockBatchOrm) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, id string, finalize_tx_hash string, status RollupStatus) error {
switch status {
case RollupFinalized:
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ?, finalized_at = ? where id = ?;"), finalizeTxHash, status, time.Now(), id)
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ?, finalized_at = ? where id = ?;"), finalize_tx_hash, status, time.Now(), id)
return err
default:
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ? where id = ?;"), finalizeTxHash, status, id)
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ? where id = ?;"), finalize_tx_hash, status, id)
return err
}
}
@@ -406,8 +399,3 @@ func (o *blockBatchOrm) GetAssignedBatchIDs() ([]string, error) {
return ids, rows.Close()
}
func (o *blockBatchOrm) UpdateSkippedBatches() error {
_, err := o.db.Exec(o.db.Rebind("update block_batch set rollup_status = ? where (proving_status = ? or proving_status = ?) and rollup_status = ?;"), RollupFinalizationSkipped, ProvingTaskSkipped, ProvingTaskFailed, RollupCommitted)
return err
}

View File

@@ -154,7 +154,7 @@ func (o *blockTraceOrm) GetHashByNumber(number uint64) (*common.Hash, error) {
func (o *blockTraceOrm) InsertBlockTraces(blockTraces []*types.BlockTrace) error {
traceMaps := make([]map[string]interface{}, len(blockTraces))
for i, trace := range blockTraces {
number, hash, txNum, mtime := trace.Header.Number.Int64(),
number, hash, tx_num, mtime := trace.Header.Number.Int64(),
trace.Header.Hash().String(),
len(trace.Transactions),
trace.Header.Time
@@ -174,7 +174,7 @@ func (o *blockTraceOrm) InsertBlockTraces(blockTraces []*types.BlockTrace) error
"hash": hash,
"parent_hash": trace.Header.ParentHash.String(),
"trace": string(data),
"tx_num": txNum,
"tx_num": tx_num,
"gas_used": gasCost,
"block_timestamp": mtime,
}
@@ -186,8 +186,8 @@ func (o *blockTraceOrm) InsertBlockTraces(blockTraces []*types.BlockTrace) error
return err
}
func (o *blockTraceOrm) DeleteTracesByBatchID(batchID string) error {
if _, err := o.db.Exec(o.db.Rebind("update block_trace set trace = ? where batch_id = ?;"), "{}", batchID); err != nil {
func (o *blockTraceOrm) DeleteTracesByBatchID(batch_id string) error {
if _, err := o.db.Exec(o.db.Rebind("update block_trace set trace = ? where batch_id = ?;"), "{}", batch_id); err != nil {
return err
}
return nil

View File

@@ -118,7 +118,7 @@ type BlockTraceOrm interface {
GetBlockTracesLatestHeight() (int64, error)
GetBlockTraces(fields map[string]interface{}, args ...string) ([]*types.BlockTrace, error)
GetBlockInfos(fields map[string]interface{}, args ...string) ([]*BlockInfo, error)
// GetUnbatchedBlocks add `GetUnbatchedBlocks` because `GetBlockInfos` cannot support query "batch_id is NULL"
// add `GetUnbatchedBlocks` because `GetBlockInfos` cannot support query "batch_id is NULL"
GetUnbatchedBlocks(fields map[string]interface{}, args ...string) ([]*BlockInfo, error)
GetHashByNumber(number uint64) (*common.Hash, error)
DeleteTracesByBatchID(batchID string) error
@@ -137,24 +137,22 @@ type BlockBatchOrm interface {
GetBlockBatches(fields map[string]interface{}, args ...string) ([]*BlockBatch, error)
GetProvingStatusByID(id string) (ProvingStatus, error)
GetVerifiedProofAndInstanceByID(id string) ([]byte, []byte, error)
UpdateProofByID(ctx context.Context, id string, proof, instanceCommitments []byte, proofTimeSec uint64) error
UpdateProofByID(ctx context.Context, id string, proof, instance_commitments []byte, proofTimeSec uint64) error
UpdateProvingStatus(id string, status ProvingStatus) error
ResetProvingStatusFor(before ProvingStatus) error
NewBatchInDBTx(dbTx *sqlx.Tx, startBlock *BlockInfo, endBlock *BlockInfo, parentHash string, totalTxNum uint64, gasUsed uint64) (string, error)
BatchRecordExist(id string) (bool, error)
GetPendingBatches(limit uint64) ([]string, error)
GetCommittedBatches(limit uint64) ([]string, error)
GetPendingBatches() ([]string, error)
GetCommittedBatches() ([]string, error)
GetRollupStatus(id string) (RollupStatus, error)
GetRollupStatusByIDList(ids []string) ([]RollupStatus, error)
GetCommitTxHash(id string) (sql.NullString, error)
GetFinalizeTxHash(id string) (sql.NullString, error)
GetLatestFinalizedBatch() (*BlockBatch, error)
UpdateRollupStatus(ctx context.Context, id string, status RollupStatus) error
UpdateCommitTxHashAndRollupStatus(ctx context.Context, id string, commitTxHash string, status RollupStatus) error
UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, id string, finalizeTxHash string, status RollupStatus) error
UpdateCommitTxHashAndRollupStatus(ctx context.Context, id string, commit_tx_hash string, status RollupStatus) error
UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, id string, finalize_tx_hash string, status RollupStatus) error
GetAssignedBatchIDs() ([]string, error)
UpdateSkippedBatches() error
GetCommitTxHash(id string) (sql.NullString, error) // for unit tests only
GetFinalizeTxHash(id string) (sql.NullString, error) // for unit tests only
}
// L1MessageOrm is layer1 message db interface
@@ -168,8 +166,6 @@ type L1MessageOrm interface {
UpdateLayer1Status(ctx context.Context, msgHash string, status MsgStatus) error
UpdateLayer1StatusAndLayer2Hash(ctx context.Context, msgHash string, status MsgStatus, layer2Hash string) error
GetLayer1LatestWatchedHeight() (int64, error)
GetRelayL1MessageTxHash(nonce uint64) (sql.NullString, error) // for unit tests only
}
// L2MessageOrm is layer2 message db interface
@@ -178,7 +174,8 @@ type L2MessageOrm interface {
GetL2MessageByMsgHash(msgHash string) (*L2Message, error)
MessageProofExist(nonce uint64) (bool, error)
GetMessageProofByNonce(nonce uint64) (string, error)
GetL2Messages(fields map[string]interface{}, args ...string) ([]*L2Message, error)
GetL2MessagesByStatus(status MsgStatus) ([]*L2Message, error)
GetL2MessagesByStatusUpToHeight(status MsgStatus, height uint64) ([]*L2Message, error)
GetL2ProcessedNonce() (int64, error)
SaveL2Messages(ctx context.Context, messages []*L2Message) error
UpdateLayer1Hash(ctx context.Context, msgHash string, layer1Hash string) error
@@ -186,6 +183,4 @@ type L2MessageOrm interface {
UpdateLayer2StatusAndLayer1Hash(ctx context.Context, msgHash string, status MsgStatus, layer1Hash string) error
UpdateMessageProof(ctx context.Context, nonce uint64, proof string) error
GetLayer2LatestWatchedHeight() (int64, error)
GetRelayL2MessageTxHash(nonce uint64) (sql.NullString, error) // for unit tests only
}

View File

@@ -167,12 +167,3 @@ func (m *l1MessageOrm) GetLayer1LatestWatchedHeight() (int64, error) {
}
return -1, nil
}
func (m *l1MessageOrm) GetRelayL1MessageTxHash(nonce uint64) (sql.NullString, error) {
row := m.db.QueryRow(`SELECT layer2_hash FROM l1_message WHERE nonce = $1`, nonce)
var hash sql.NullString
if err := row.Scan(&hash); err != nil {
return sql.NullString{}, err
}
return hash, nil
}

View File

@@ -5,7 +5,6 @@ import (
"database/sql"
"errors"
"fmt"
"strings"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/log"
@@ -89,15 +88,32 @@ func (m *layer2MessageOrm) GetL2ProcessedNonce() (int64, error) {
}
// GetL2MessagesByStatus fetch list of messages given msg status
func (m *layer2MessageOrm) GetL2Messages(fields map[string]interface{}, args ...string) ([]*L2Message, error) {
query := "SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash FROM l2_message WHERE 1 = 1 "
for key := range fields {
query += fmt.Sprintf("AND %s=:%s ", key, key)
func (m *layer2MessageOrm) GetL2MessagesByStatus(status MsgStatus) ([]*L2Message, error) {
rows, err := m.db.Queryx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash FROM l2_message WHERE status = $1 ORDER BY nonce ASC;`, status)
if err != nil {
return nil, err
}
query = strings.Join(append([]string{query}, args...), " ")
db := m.db
rows, err := db.NamedQuery(db.Rebind(query), fields)
var msgs []*L2Message
for rows.Next() {
msg := &L2Message{}
if err = rows.StructScan(&msg); err != nil {
break
}
msgs = append(msgs, msg)
}
if len(msgs) == 0 || errors.Is(err, sql.ErrNoRows) {
// log.Warn("no unprocessed layer2 messages in db", "err", err)
} else if err != nil {
return nil, err
}
return msgs, rows.Close()
}
// GetL2MessagesByStatusUpToHeight fetch list of messages given msg status and an upper limit on height
func (m *layer2MessageOrm) GetL2MessagesByStatusUpToHeight(status MsgStatus, height uint64) ([]*L2Message, error) {
rows, err := m.db.Queryx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash FROM l2_message WHERE status = $1 AND height <= $2 ORDER BY nonce ASC;`, status, height)
if err != nil {
return nil, err
}
@@ -206,12 +222,3 @@ func (m *layer2MessageOrm) GetLayer2LatestWatchedHeight() (int64, error) {
}
return height, nil
}
func (m *layer2MessageOrm) GetRelayL2MessageTxHash(nonce uint64) (sql.NullString, error) {
row := m.db.QueryRow(`SELECT layer1_hash FROM l2_message WHERE nonce = $1`, nonce)
var hash sql.NullString
if err := row.Scan(&hash); err != nil {
return sql.NullString{}, err
}
return hash, nil
}

View File

@@ -281,7 +281,7 @@ func testOrmBlockBatch(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, int(2), len(batches))
batcheIDs, err := ormBatch.GetPendingBatches(10)
batcheIDs, err := ormBatch.GetPendingBatches()
assert.NoError(t, err)
assert.Equal(t, int(2), len(batcheIDs))
assert.Equal(t, batchID1, batcheIDs[0])
@@ -290,51 +290,33 @@ func testOrmBlockBatch(t *testing.T) {
err = ormBatch.UpdateCommitTxHashAndRollupStatus(context.Background(), batchID1, "commit_tx_1", orm.RollupCommitted)
assert.NoError(t, err)
batcheIDs, err = ormBatch.GetPendingBatches(10)
batcheIDs, err = ormBatch.GetPendingBatches()
assert.NoError(t, err)
assert.Equal(t, int(1), len(batcheIDs))
assert.Equal(t, batchID2, batcheIDs[0])
provingStatus, err := ormBatch.GetProvingStatusByID(batchID1)
proving_status, err := ormBatch.GetProvingStatusByID(batchID1)
assert.NoError(t, err)
assert.Equal(t, orm.ProvingTaskUnassigned, provingStatus)
assert.Equal(t, orm.ProvingTaskUnassigned, proving_status)
err = ormBatch.UpdateProofByID(context.Background(), batchID1, []byte{1}, []byte{2}, 1200)
assert.NoError(t, err)
err = ormBatch.UpdateProvingStatus(batchID1, orm.ProvingTaskVerified)
assert.NoError(t, err)
provingStatus, err = ormBatch.GetProvingStatusByID(batchID1)
proving_status, err = ormBatch.GetProvingStatusByID(batchID1)
assert.NoError(t, err)
assert.Equal(t, orm.ProvingTaskVerified, provingStatus)
assert.Equal(t, orm.ProvingTaskVerified, proving_status)
rollupStatus, err := ormBatch.GetRollupStatus(batchID1)
rollup_status, err := ormBatch.GetRollupStatus(batchID1)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitted, rollupStatus)
assert.Equal(t, orm.RollupCommitted, rollup_status)
err = ormBatch.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchID1, "finalize_tx_1", orm.RollupFinalized)
assert.NoError(t, err)
rollupStatus, err = ormBatch.GetRollupStatus(batchID1)
rollup_status, err = ormBatch.GetRollupStatus(batchID1)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalized, rollupStatus)
assert.Equal(t, orm.RollupFinalized, rollup_status)
result, err := ormBatch.GetLatestFinalizedBatch()
assert.NoError(t, err)
assert.Equal(t, batchID1, result.ID)
status1, err := ormBatch.GetRollupStatus(batchID1)
assert.NoError(t, err)
status2, err := ormBatch.GetRollupStatus(batchID2)
assert.NoError(t, err)
assert.NotEqual(t, status1, status2)
statues, err := ormBatch.GetRollupStatusByIDList([]string{batchID1, batchID2, batchID1, batchID2})
assert.NoError(t, err)
assert.Equal(t, statues[0], status1)
assert.Equal(t, statues[1], status2)
assert.Equal(t, statues[2], status1)
assert.Equal(t, statues[3], status2)
statues, err = ormBatch.GetRollupStatusByIDList([]string{batchID2, batchID1, batchID2, batchID1})
assert.NoError(t, err)
assert.Equal(t, statues[0], status2)
assert.Equal(t, statues[1], status1)
assert.Equal(t, statues[2], status2)
assert.Equal(t, statues[3], status1)
}
// testOrmSessionInfo test rollup result table functions
@@ -360,9 +342,9 @@ func testOrmSessionInfo(t *testing.T) {
ids, err := ormBatch.GetAssignedBatchIDs()
assert.NoError(t, err)
assert.Equal(t, 1, len(ids))
sessionInfos, err := ormSession.GetSessionInfosByIDs(ids)
session_infos, err := ormSession.GetSessionInfosByIDs(ids)
assert.NoError(t, err)
assert.Equal(t, 0, len(sessionInfos))
assert.Equal(t, 0, len(session_infos))
sessionInfo := orm.SessionInfo{
ID: batchID,
@@ -377,25 +359,25 @@ func testOrmSessionInfo(t *testing.T) {
// insert
assert.NoError(t, ormSession.SetSessionInfo(&sessionInfo))
sessionInfos, err = ormSession.GetSessionInfosByIDs(ids)
session_infos, err = ormSession.GetSessionInfosByIDs(ids)
assert.NoError(t, err)
assert.Equal(t, 1, len(sessionInfos))
assert.Equal(t, sessionInfo, *sessionInfos[0])
assert.Equal(t, 1, len(session_infos))
assert.Equal(t, sessionInfo, *session_infos[0])
// update
sessionInfo.Rollers["0"].Status = orm.RollerProofValid
assert.NoError(t, ormSession.SetSessionInfo(&sessionInfo))
sessionInfos, err = ormSession.GetSessionInfosByIDs(ids)
session_infos, err = ormSession.GetSessionInfosByIDs(ids)
assert.NoError(t, err)
assert.Equal(t, 1, len(sessionInfos))
assert.Equal(t, sessionInfo, *sessionInfos[0])
assert.Equal(t, 1, len(session_infos))
assert.Equal(t, sessionInfo, *session_infos[0])
// delete
assert.NoError(t, ormBatch.UpdateProvingStatus(batchID, orm.ProvingTaskVerified))
ids, err = ormBatch.GetAssignedBatchIDs()
assert.NoError(t, err)
assert.Equal(t, 0, len(ids))
sessionInfos, err = ormSession.GetSessionInfosByIDs(ids)
session_infos, err = ormSession.GetSessionInfosByIDs(ids)
assert.NoError(t, err)
assert.Equal(t, 0, len(sessionInfos))
assert.Equal(t, 0, len(session_infos))
}

View File

@@ -223,7 +223,6 @@ github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNG
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221202061207-804e7edc23ba/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
@@ -271,7 +270,6 @@ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BG
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=

View File

@@ -11,13 +11,13 @@ endif
libzkp:
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.a ../interface/
rm -rf ./prover/lib && cp -r ../common/libzkp/interface ./prover/lib
cp -r ../common/libzkp/interface ./prover/lib
roller: libzkp ## Build the Roller instance.
GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/roller ./cmd
GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZK_VERSION=${ZK_VERSION}" -o $(PWD)/build/bin/roller ./cmd
gpu-roller: libzkp ## Build the GPU Roller instance.
GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -tags gpu -o $(PWD)/build/bin/roller ./cmd
GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZK_VERSION=${ZK_VERSION}" -tags gpu -o $(PWD)/build/bin/roller ./cmd
mock_roller:
GOBIN=$(PWD)/build/bin go build -tags mock_prover -o $(PWD)/build/bin/roller $(PWD)/cmd

View File

@@ -15,5 +15,5 @@ func TestRunRoller(t *testing.T) {
// wait result
roller.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("roller version %s", version.Version))
roller.RunApp(nil)
roller.RunApp(false)
}

View File

@@ -3,7 +3,7 @@ module scroll-tech/roller
go 1.18
require (
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.10.2
go.etcd.io/bbolt v1.3.6
@@ -25,7 +25,7 @@ require (
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/sys v0.4.0 // indirect
golang.org/x/crypto v0.4.0 // indirect
golang.org/x/sys v0.3.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -323,8 +323,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
@@ -398,8 +398,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -512,8 +512,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=

View File

@@ -5,8 +5,8 @@ import (
"crypto/ecdsa"
"errors"
"fmt"
"math"
"sort"
"strings"
"sync/atomic"
"time"
@@ -29,6 +29,8 @@ import (
var (
// retry connecting to coordinator
retryWait = time.Second * 10
// net normal close
errNormalClose = errors.New("use of closed network connection")
)
// Roller contains websocket conn to coordinator, Stack, unix-socket to ipc-prover.
@@ -105,16 +107,10 @@ func (r *Roller) Start() {
// Register registers Roller to the coordinator through Websocket.
func (r *Roller) Register() error {
timestamp := time.Now().Unix()
if timestamp < 0 || timestamp > math.MaxUint32 {
panic("Expected current time to be between the years 1970 and 2106")
}
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: r.cfg.RollerName,
Timestamp: uint32(timestamp),
Timestamp: time.Now().UnixMilli(),
PublicKey: r.PublicKey(),
Version: version.Version,
},
@@ -192,6 +188,9 @@ func (r *Roller) ProveLoop() {
time.Sleep(time.Second * 3)
continue
}
if strings.Contains(err.Error(), errNormalClose.Error()) {
return
}
log.Error("prove failed", "error", err)
}
}
@@ -257,34 +256,25 @@ func (r *Roller) prove() error {
}
}()
r.signAndSubmitProof(proofMsg)
return nil
ok, serr := r.signAndSubmitProof(proofMsg)
if !ok {
log.Error("submit proof to coordinator failed", "task ID", proofMsg.ID)
}
return serr
}
func (r *Roller) signAndSubmitProof(msg *message.ProofDetail) {
func (r *Roller) signAndSubmitProof(msg *message.ProofDetail) (bool, error) {
authZkProof := &message.ProofMsg{ProofDetail: msg}
if err := authZkProof.Sign(r.priv); err != nil {
log.Error("sign proof error", "err", err)
return
return false, err
}
// Retry SubmitProof several times.
for i := 0; i < 3; i++ {
// When the roller is disconnected from the coordinator,
// wait until the roller reconnects to the coordinator.
for atomic.LoadInt64(&r.isDisconnected) == 1 {
time.Sleep(retryWait)
}
ok, serr := r.client.SubmitProof(context.Background(), authZkProof)
if !ok {
log.Error("submit proof to coordinator failed", "task ID", msg.ID)
return
}
if serr == nil {
return
}
log.Error("submit proof to coordinator error", "task ID", msg.ID, "error", serr)
// When the roller is disconnected from the coordinator,
// wait until the roller reconnects to the coordinator.
for atomic.LoadInt64(&r.isDisconnected) == 1 {
time.Sleep(retryWait)
}
return r.client.SubmitProof(context.Background(), authZkProof)
}
// Stop closes the websocket connection.

View File

@@ -80,8 +80,7 @@ func free(t *testing.T) {
}
type appAPI interface {
WaitResult(timeout time.Duration, keyword string) bool
RunApp(waitResult func() bool)
RunApp(parallel bool)
WaitExit()
ExpectWithTimeout(parallel bool, timeout time.Duration, keyword string)
}
@@ -104,7 +103,7 @@ func runDBCliApp(t *testing.T, option, keyword string) {
// Wait expect result.
app.ExpectWithTimeout(true, time.Second*3, keyword)
app.RunApp(nil)
app.RunApp(false)
}
func runRollerApp(t *testing.T, args ...string) appAPI {

View File

@@ -28,16 +28,19 @@ func testStartProcess(t *testing.T) {
// Start bridge process.
bridgeCmd := runBridgeApp(t)
bridgeCmd.RunApp(func() bool { return bridgeCmd.WaitResult(time.Second*20, "Start bridge successfully") })
bridgeCmd.ExpectWithTimeout(true, time.Second*20, "Start bridge successfully")
bridgeCmd.RunApp(true)
// Start coordinator process.
coordinatorCmd := runCoordinatorApp(t, "--ws", "--ws.port", "8391")
coordinatorCmd.RunApp(func() bool { return coordinatorCmd.WaitResult(time.Second*20, "Start coordinator successfully") })
coordinatorCmd.ExpectWithTimeout(true, time.Second*20, "Start coordinator successfully")
coordinatorCmd.RunApp(true)
// Start roller process.
rollerCmd := runRollerApp(t)
rollerCmd.ExpectWithTimeout(true, time.Second*40, "roller start successfully")
rollerCmd.ExpectWithTimeout(true, time.Second*60, "register to coordinator successfully!")
rollerCmd.RunApp(func() bool { return rollerCmd.WaitResult(time.Second*40, "roller start successfully") })
rollerCmd.RunApp(true)
rollerCmd.WaitExit()
bridgeCmd.WaitExit()