mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b977e5a62f | ||
|
|
1b77f9044a | ||
|
|
46adbc7c0c | ||
|
|
9ee65119d8 | ||
|
|
fb1c800532 | ||
|
|
2baad2ecad |
File diff suppressed because one or more lines are too long
@@ -53,11 +53,11 @@ func (m *MsgProofUpdater) Start() {
|
||||
continue
|
||||
}
|
||||
latestBatchIndexWithProof, err := m.l2SentMsgOrm.GetLatestL2SentMsgBatchIndex(m.ctx)
|
||||
log.Info("latest batc with proof", "batch_index", latestBatchIndexWithProof)
|
||||
if err != nil {
|
||||
log.Error("MsgProofUpdater: Can not get latest L2SentMsgBatchIndex: ", "err", err)
|
||||
continue
|
||||
}
|
||||
log.Info("latest batch with proof", "batch_index", latestBatchIndexWithProof)
|
||||
var start uint64
|
||||
if latestBatchIndexWithProof < 0 {
|
||||
start = 1
|
||||
|
||||
@@ -2,6 +2,7 @@ package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -102,6 +103,9 @@ func (l *L2SentMsg) GetLatestL2SentMsgBatchIndex(ctx context.Context) (int64, er
|
||||
Select("batch_index").
|
||||
First(&result).
|
||||
Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return -1, nil
|
||||
}
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("L2SentMsg.GetLatestL2SentMsgBatchIndex error: %w", err)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
@@ -76,6 +75,14 @@ func GetBatchRangeFromCalldataV2(calldata []byte) (uint64, uint64, uint64, error
|
||||
method := backendabi.ScrollChainV2ABI.Methods["commitBatch"]
|
||||
values, err := method.Inputs.Unpack(calldata[4:])
|
||||
if err != nil {
|
||||
// special case: import genesis batch
|
||||
method = backendabi.ScrollChainV2ABI.Methods["importGenesisBatch"]
|
||||
_, err2 := method.Inputs.Unpack(calldata[4:])
|
||||
if err2 == nil {
|
||||
// genesis batch
|
||||
return 0, 0, 0, nil
|
||||
}
|
||||
// none of "commitBatch" and "importGenesisBatch" match, give up
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
args := commitBatchArgs{}
|
||||
@@ -110,48 +117,3 @@ func GetBatchRangeFromCalldataV2(calldata []byte) (uint64, uint64, uint64, error
|
||||
|
||||
return batchIndex, startBlock, finishBlock, err
|
||||
}
|
||||
|
||||
// GetBatchRangeFromCalldataV1 find the block range from calldata, both inclusive.
|
||||
func GetBatchRangeFromCalldataV1(calldata []byte) ([]uint64, []uint64, []uint64, error) {
|
||||
var batchIndices []uint64
|
||||
var startBlocks []uint64
|
||||
var finishBlocks []uint64
|
||||
if bytes.Equal(calldata[0:4], common.Hex2Bytes("cb905499")) {
|
||||
// commitBatches
|
||||
method := backendabi.ScrollChainABI.Methods["commitBatches"]
|
||||
values, err := method.Inputs.Unpack(calldata[4:])
|
||||
if err != nil {
|
||||
return batchIndices, startBlocks, finishBlocks, err
|
||||
}
|
||||
args := make([]backendabi.IScrollChainBatch, len(values))
|
||||
err = method.Inputs.Copy(&args, values)
|
||||
if err != nil {
|
||||
return batchIndices, startBlocks, finishBlocks, err
|
||||
}
|
||||
|
||||
for i := 0; i < len(args); i++ {
|
||||
batchIndices = append(batchIndices, args[i].BatchIndex)
|
||||
startBlocks = append(startBlocks, args[i].Blocks[0].BlockNumber)
|
||||
finishBlocks = append(finishBlocks, args[i].Blocks[len(args[i].Blocks)-1].BlockNumber)
|
||||
}
|
||||
} else if bytes.Equal(calldata[0:4], common.Hex2Bytes("8c73235d")) {
|
||||
// commitBatch
|
||||
method := backendabi.ScrollChainABI.Methods["commitBatch"]
|
||||
values, err := method.Inputs.Unpack(calldata[4:])
|
||||
if err != nil {
|
||||
return batchIndices, startBlocks, finishBlocks, err
|
||||
}
|
||||
|
||||
args := backendabi.IScrollChainBatch{}
|
||||
err = method.Inputs.Copy(&args, values)
|
||||
if err != nil {
|
||||
return batchIndices, startBlocks, finishBlocks, err
|
||||
}
|
||||
batchIndices = append(batchIndices, args.BatchIndex)
|
||||
startBlocks = append(startBlocks, args.Blocks[0].BlockNumber)
|
||||
finishBlocks = append(finishBlocks, args.Blocks[len(args.Blocks)-1].BlockNumber)
|
||||
} else {
|
||||
return batchIndices, startBlocks, finishBlocks, errors.New("invalid selector")
|
||||
}
|
||||
return batchIndices, startBlocks, finishBlocks, nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package utils_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -34,31 +33,11 @@ func TestGetBatchRangeFromCalldataV2(t *testing.T) {
|
||||
assert.Equal(t, start, uint64(10))
|
||||
assert.Equal(t, finish, uint64(20))
|
||||
assert.Equal(t, batchIndex, uint64(2))
|
||||
}
|
||||
|
||||
func TestGetBatchRangeFromCalldataV1(t *testing.T) {
|
||||
calldata, err := os.ReadFile("../testdata/commit-batches-0x3095e91db7ba4a6fbf4654d607db322e58ff5579c502219c8024acaea74cf311.txt")
|
||||
// genesis batch
|
||||
batchIndex, start, finish, err = utils.GetBatchRangeFromCalldataV2(common.Hex2Bytes("3fdeecb200000000000000000000000000000000000000000000000000000000000000402dcb5308098d24a37fc1487a229fcedb09fa4343ede39cbad365bc925535bb09000000000000000000000000000000000000000000000000000000000000005900000000000000000000000000000000000000000000000000c252bc9780c4d83cf11f14b8cd03c92c4d18ce07710ba836d31d12da216c8330000000000000000000000000000000000000000000000000000000000000000000000000000000"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// multiple batches
|
||||
batchIndices, startBlocks, finishBlocks, err := utils.GetBatchRangeFromCalldataV1(common.Hex2Bytes(string(calldata[:])))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(batchIndices), 5)
|
||||
assert.Equal(t, len(startBlocks), 5)
|
||||
assert.Equal(t, len(finishBlocks), 5)
|
||||
assert.Equal(t, batchIndices[0], uint64(1))
|
||||
assert.Equal(t, batchIndices[1], uint64(2))
|
||||
assert.Equal(t, batchIndices[2], uint64(3))
|
||||
assert.Equal(t, batchIndices[3], uint64(4))
|
||||
assert.Equal(t, batchIndices[4], uint64(5))
|
||||
assert.Equal(t, startBlocks[0], uint64(1))
|
||||
assert.Equal(t, startBlocks[1], uint64(6))
|
||||
assert.Equal(t, startBlocks[2], uint64(7))
|
||||
assert.Equal(t, startBlocks[3], uint64(19))
|
||||
assert.Equal(t, startBlocks[4], uint64(20))
|
||||
assert.Equal(t, finishBlocks[0], uint64(5))
|
||||
assert.Equal(t, finishBlocks[1], uint64(6))
|
||||
assert.Equal(t, finishBlocks[2], uint64(18))
|
||||
assert.Equal(t, finishBlocks[3], uint64(19))
|
||||
assert.Equal(t, finishBlocks[4], uint64(20))
|
||||
assert.Equal(t, start, uint64(0))
|
||||
assert.Equal(t, finish, uint64(0))
|
||||
assert.Equal(t, batchIndex, uint64(0))
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -15,8 +15,8 @@ func TestEventSignature(t *testing.T) {
|
||||
assert.Equal(L1RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
|
||||
assert.Equal(L1FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
|
||||
|
||||
assert.Equal(L1CommitBatchEventSignature, common.HexToHash("2cdc615c74452778c0fb6184735e014c13aad2b62774fe0b09bd1dcc2cc14a62"))
|
||||
assert.Equal(L1FinalizeBatchEventSignature, common.HexToHash("9d3058a3cb9739a2527f22dd9a4138065844037d3004254952e2458d808cc364"))
|
||||
assert.Equal(L1CommitBatchEventSignature, common.HexToHash("2c32d4ae151744d0bf0b9464a3e897a1d17ed2f1af71f7c9a75f12ce0d28238f"))
|
||||
assert.Equal(L1FinalizeBatchEventSignature, common.HexToHash("26ba82f907317eedc97d0cbef23de76a43dd6edb563bdb6e9407645b950a7a2d"))
|
||||
|
||||
assert.Equal(L1QueueTransactionEventSignature, common.HexToHash("69cfcb8e6d4192b8aba9902243912587f37e550d75c1fa801491fce26717f37e"))
|
||||
|
||||
|
||||
@@ -56,13 +56,13 @@ contract MockBridgeL1 {
|
||||
|
||||
/// @notice Emitted when a new batch is committed.
|
||||
/// @param batchHash The hash of the batch.
|
||||
event CommitBatch(bytes32 indexed batchHash);
|
||||
event CommitBatch(uint256 indexed batchIndex, bytes32 indexed batchHash);
|
||||
|
||||
/// @notice Emitted when a batch is finalized.
|
||||
/// @param batchHash The hash of the batch
|
||||
/// @param stateRoot The state root on layer 2 after this batch.
|
||||
/// @param withdrawRoot The merkle root on layer2 after this batch.
|
||||
event FinalizeBatch(bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
|
||||
event FinalizeBatch(uint256 indexed batchIndex, bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
@@ -130,7 +130,7 @@ contract MockBridgeL1 {
|
||||
|
||||
function commitBatch(
|
||||
uint8 /*version*/,
|
||||
bytes calldata /*parentBatchHeader*/,
|
||||
bytes calldata _parentBatchHeader,
|
||||
bytes[] memory chunks,
|
||||
bytes calldata /*skippedL1MessageBitmap*/
|
||||
) external {
|
||||
@@ -138,6 +138,17 @@ contract MockBridgeL1 {
|
||||
uint256 _chunksLength = chunks.length;
|
||||
require(_chunksLength > 0, "batch is empty");
|
||||
|
||||
// decode batch index
|
||||
uint256 headerLength = _parentBatchHeader.length;
|
||||
uint256 parentBatchPtr;
|
||||
uint256 parentBatchIndex;
|
||||
assembly {
|
||||
parentBatchPtr := mload(0x40)
|
||||
calldatacopy(parentBatchPtr, _parentBatchHeader.offset, headerLength)
|
||||
mstore(0x40, add(parentBatchPtr, headerLength))
|
||||
parentBatchIndex := shr(192, mload(add(parentBatchPtr, 1)))
|
||||
}
|
||||
|
||||
uint256 dataPtr;
|
||||
assembly {
|
||||
dataPtr := mload(0x40)
|
||||
@@ -169,18 +180,29 @@ contract MockBridgeL1 {
|
||||
}
|
||||
bytes32 _batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, 89);
|
||||
committedBatches[0] = _batchHash;
|
||||
emit CommitBatch(_batchHash);
|
||||
emit CommitBatch(parentBatchIndex + 1, _batchHash);
|
||||
}
|
||||
|
||||
function finalizeBatchWithProof(
|
||||
bytes calldata /*batchHeader*/,
|
||||
bytes calldata batchHeader,
|
||||
bytes32 /*prevStateRoot*/,
|
||||
bytes32 postStateRoot,
|
||||
bytes32 withdrawRoot,
|
||||
bytes calldata /*aggrProof*/
|
||||
) external {
|
||||
// decode batch index
|
||||
uint256 headerLength = batchHeader.length;
|
||||
uint256 batchPtr;
|
||||
uint256 batchIndex;
|
||||
assembly {
|
||||
batchPtr := mload(0x40)
|
||||
calldatacopy(batchPtr, batchHeader.offset, headerLength)
|
||||
mstore(0x40, add(batchPtr, headerLength))
|
||||
batchIndex := shr(192, mload(add(batchPtr, 1)))
|
||||
}
|
||||
|
||||
bytes32 _batchHash = committedBatches[0];
|
||||
emit FinalizeBatch(_batchHash, postStateRoot, withdrawRoot);
|
||||
emit FinalizeBatch(batchIndex, _batchHash, postStateRoot, withdrawRoot);
|
||||
}
|
||||
|
||||
/**********************
|
||||
|
||||
24
common/libzkp/impl/Cargo.lock
generated
24
common/libzkp/impl/Cargo.lock
generated
@@ -32,7 +32,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "aggregator"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.2#225db80d26b6a2ed4aa5ad2462c887a58acdfd00"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"ark-std",
|
||||
"env_logger 0.10.0",
|
||||
@@ -432,7 +432,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
|
||||
[[package]]
|
||||
name = "bus-mapping"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.2#225db80d26b6a2ed4aa5ad2462c887a58acdfd00"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -1045,7 +1045,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "eth-types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.2#225db80d26b6a2ed4aa5ad2462c887a58acdfd00"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
@@ -1223,7 +1223,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "external-tracer"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.2#225db80d26b6a2ed4aa5ad2462c887a58acdfd00"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"geth-utils",
|
||||
@@ -1436,7 +1436,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "gadgets"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.2#225db80d26b6a2ed4aa5ad2462c887a58acdfd00"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"digest 0.7.6",
|
||||
"eth-types",
|
||||
@@ -1476,7 +1476,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "geth-utils"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.2#225db80d26b6a2ed4aa5ad2462c887a58acdfd00"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
|
||||
@@ -2074,7 +2074,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "keccak256"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.2#225db80d26b6a2ed4aa5ad2462c887a58acdfd00"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"eth-types",
|
||||
@@ -2261,7 +2261,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mock"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.2#225db80d26b6a2ed4aa5ad2462c887a58acdfd00"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -2276,7 +2276,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mpt-zktrie"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.2#225db80d26b6a2ed4aa5ad2462c887a58acdfd00"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"bus-mapping",
|
||||
"eth-types",
|
||||
@@ -2752,7 +2752,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "prover"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.2#cf95001417faa6dcf80a1aea4def2ecfb39846df"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.3#337089ac40bac756d88b9ae30a3be1f82538b216"
|
||||
dependencies = [
|
||||
"aggregator",
|
||||
"anyhow",
|
||||
@@ -4037,7 +4037,7 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
|
||||
[[package]]
|
||||
name = "types"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.2#cf95001417faa6dcf80a1aea4def2ecfb39846df"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.3#337089ac40bac756d88b9ae30a3be1f82538b216"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"blake2",
|
||||
@@ -4482,7 +4482,7 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
|
||||
[[package]]
|
||||
name = "zkevm-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.2#225db80d26b6a2ed4aa5ad2462c887a58acdfd00"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
|
||||
dependencies = [
|
||||
"array-init",
|
||||
"bus-mapping",
|
||||
|
||||
@@ -18,8 +18,8 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
|
||||
|
||||
[dependencies]
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.2" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.2" }
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.3" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.3" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
|
||||
log = "0.4"
|
||||
|
||||
@@ -33,7 +33,7 @@ func (r ProofType) String() string {
|
||||
case ProofTypeBatch:
|
||||
return "proof type batch"
|
||||
default:
|
||||
return "illegal proof type"
|
||||
return fmt.Sprintf("illegal proof type: %d", r)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -118,7 +118,7 @@ func TestProveTypeString(t *testing.T) {
|
||||
assert.Equal(t, "proof type batch", proofTypeBatch.String())
|
||||
|
||||
illegalProof := ProofType(3)
|
||||
assert.Equal(t, "illegal proof type", illegalProof.String())
|
||||
assert.Equal(t, "illegal proof type: 3", illegalProof.String())
|
||||
}
|
||||
|
||||
func TestProofMsgPublicKey(t *testing.T) {
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.1.3"
|
||||
var tag = "v4.1.9"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -87,7 +87,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
if errors.Is(err, ErrValidatorFailureProofMsgStatusNotOk) {
|
||||
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
@@ -134,23 +134,21 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
}
|
||||
|
||||
if verifyErr != nil || !success {
|
||||
if verifyErr != nil {
|
||||
// TODO: this is only a temp workaround for testnet, we should return err in real cases
|
||||
log.Error("failed to verify zk proof", "proof id", proofMsg.ID, "prover pk", pk, "prove type",
|
||||
proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
}
|
||||
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
|
||||
|
||||
// TODO: Prover needs to be slashed if proof is invalid.
|
||||
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
|
||||
|
||||
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
return nil
|
||||
|
||||
if verifyErr == nil {
|
||||
verifyErr = fmt.Errorf("verification succeeded and it's an invalid proof")
|
||||
}
|
||||
return verifyErr
|
||||
}
|
||||
|
||||
if err := m.closeProofTask(ctx, proofMsg.ID, pk, proofMsg); err != nil {
|
||||
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg.Type)
|
||||
return err
|
||||
}
|
||||
|
||||
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
|
||||
|
||||
@@ -343,8 +343,11 @@ func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, e
|
||||
|
||||
// UpdateUnassignedChunkReturning update the unassigned batch which end_block_number <= height and return the update record
|
||||
func (o *Chunk) UpdateUnassignedChunkReturning(ctx context.Context, height, limit int) ([]*Chunk, error) {
|
||||
if height <= 0 {
|
||||
return nil, errors.New("Chunk.UpdateUnassignedBatchReturning error: height must be larger than zero")
|
||||
}
|
||||
if limit < 0 {
|
||||
return nil, errors.New("limit must not be smaller than zero")
|
||||
return nil, errors.New("Chunk.UpdateUnassignedBatchReturning error: limit must not be smaller than zero")
|
||||
}
|
||||
if limit == 0 {
|
||||
return nil, nil
|
||||
|
||||
@@ -17,6 +17,8 @@ func Route(router *gin.Engine, cfg *config.Config) {
|
||||
func v1(router *gin.RouterGroup, conf *config.Config) {
|
||||
r := router.Group("/v1")
|
||||
|
||||
r.GET("/health", api.HealthCheck.HealthCheck)
|
||||
|
||||
challengeMiddleware := middleware.ChallengeMiddleware(conf)
|
||||
r.GET("/challenge", challengeMiddleware.LoginHandler)
|
||||
|
||||
@@ -26,7 +28,6 @@ func v1(router *gin.RouterGroup, conf *config.Config) {
|
||||
// need jwt token api
|
||||
r.Use(loginMiddleware.MiddlewareFunc())
|
||||
{
|
||||
r.GET("/healthz", api.HealthCheck.HealthCheck)
|
||||
r.POST("/get_task", api.GetTask.GetTasks)
|
||||
r.POST("/submit_proof", api.SubmitProof.SubmitProof)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ package types
|
||||
|
||||
// GetTaskParameter for ProverTasks request parameter
|
||||
type GetTaskParameter struct {
|
||||
ProverHeight int `form:"prover_height" json:"prover_height" binding:"required"`
|
||||
ProverHeight int `form:"prover_height" json:"prover_height"`
|
||||
TaskType int `form:"task_type" json:"task_type"`
|
||||
}
|
||||
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// ProversInfo is assigned provers info of a task (session)
|
||||
type ProversInfo struct {
|
||||
ID string `json:"id"`
|
||||
ProverStatusList []*ProverStatus `json:"provers"`
|
||||
StartTimestamp int64 `json:"start_timestamp"`
|
||||
ProveType message.ProofType `json:"prove_type,omitempty"`
|
||||
}
|
||||
|
||||
// ProverStatus is the prover name and prover prove status
|
||||
type ProverStatus struct {
|
||||
PublicKey string `json:"public_key"`
|
||||
Name string `json:"name"`
|
||||
Status types.ProverProveStatus `json:"status"`
|
||||
}
|
||||
@@ -170,9 +170,7 @@ func testHandshake(t *testing.T) {
|
||||
}()
|
||||
|
||||
chunkProver := newMockProver(t, "prover_chunk_test", coordinatorURL, message.ProofTypeChunk)
|
||||
token := chunkProver.connectToCoordinator(t)
|
||||
assert.NotEmpty(t, token)
|
||||
assert.True(t, chunkProver.healthCheck(t, token, types.Success))
|
||||
assert.True(t, chunkProver.healthCheckSuccess(t))
|
||||
}
|
||||
|
||||
func testFailedHandshake(t *testing.T) {
|
||||
@@ -181,21 +179,17 @@ func testFailedHandshake(t *testing.T) {
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
|
||||
defer func() {
|
||||
proofCollector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
// Try to perform handshake without token
|
||||
chunkProver := newMockProver(t, "prover_chunk_test", coordinatorURL, message.ProofTypeChunk)
|
||||
token := chunkProver.connectToCoordinator(t)
|
||||
assert.NotEmpty(t, token)
|
||||
assert.True(t, chunkProver.healthCheck(t, token, types.Success))
|
||||
assert.True(t, chunkProver.healthCheckSuccess(t))
|
||||
|
||||
// Try to perform handshake with timeouted token
|
||||
// Try to perform handshake with server shutdown
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
time.Sleep(time.Second)
|
||||
batchProver := newMockProver(t, "prover_batch_test", coordinatorURL, message.ProofTypeBatch)
|
||||
token = chunkProver.connectToCoordinator(t)
|
||||
assert.NotEmpty(t, token)
|
||||
<-time.After(time.Duration(tokenTimeout+1) * time.Second)
|
||||
assert.True(t, batchProver.healthCheck(t, token, types.ErrJWTTokenExpired))
|
||||
assert.True(t, batchProver.healthCheckFailure(t))
|
||||
}
|
||||
|
||||
func testValidProof(t *testing.T) {
|
||||
@@ -235,7 +229,7 @@ func testValidProof(t *testing.T) {
|
||||
}
|
||||
proverTask := provers[i].getProverTask(t, proofType)
|
||||
assert.NotNil(t, proverTask)
|
||||
provers[i].submitProof(t, proverTask, proofStatus)
|
||||
provers[i].submitProof(t, proverTask, proofStatus, types.Success)
|
||||
}
|
||||
|
||||
// verify proof status
|
||||
@@ -296,7 +290,7 @@ func testInvalidProof(t *testing.T) {
|
||||
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType)
|
||||
proverTask := provers[i].getProverTask(t, proofType)
|
||||
assert.NotNil(t, proverTask)
|
||||
provers[i].submitProof(t, proverTask, verifiedFailed)
|
||||
provers[i].submitProof(t, proverTask, verifiedFailed, types.ErrCoordinatorHandleZkProofFailure)
|
||||
}
|
||||
|
||||
// verify proof status
|
||||
@@ -357,7 +351,7 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType)
|
||||
proverTask := provers[i].getProverTask(t, proofType)
|
||||
assert.NotNil(t, proverTask)
|
||||
provers[i].submitProof(t, proverTask, generatedFailed)
|
||||
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure)
|
||||
}
|
||||
|
||||
// verify proof status
|
||||
@@ -431,12 +425,12 @@ func testTimeoutProof(t *testing.T) {
|
||||
chunkProver2 := newMockProver(t, "prover_test"+strconv.Itoa(2), coordinatorURL, message.ProofTypeChunk)
|
||||
proverChunkTask2 := chunkProver2.getProverTask(t, message.ProofTypeChunk)
|
||||
assert.NotNil(t, proverChunkTask2)
|
||||
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess)
|
||||
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success)
|
||||
|
||||
batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch)
|
||||
proverBatchTask2 := batchProver2.getProverTask(t, message.ProofTypeBatch)
|
||||
assert.NotNil(t, proverBatchTask2)
|
||||
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess)
|
||||
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success)
|
||||
|
||||
// verify proof status, it should be verified now, because second prover sent valid proof
|
||||
chunkProofStatus2, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
|
||||
@@ -108,17 +108,27 @@ func (r *mockProver) login(t *testing.T, challengeString string) string {
|
||||
return loginData.Token
|
||||
}
|
||||
|
||||
func (r *mockProver) healthCheck(t *testing.T, token string, errCode int) bool {
|
||||
func (r *mockProver) healthCheckSuccess(t *testing.T) bool {
|
||||
var result types.Response
|
||||
client := resty.New()
|
||||
resp, err := client.R().
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
|
||||
SetResult(&result).
|
||||
Get("http://" + r.coordinatorURL + "/coordinator/v1/healthz")
|
||||
Get("http://" + r.coordinatorURL + "/coordinator/v1/health")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode())
|
||||
assert.Equal(t, errCode, result.ErrCode)
|
||||
assert.Equal(t, ctypes.Success, result.ErrCode)
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *mockProver) healthCheckFailure(t *testing.T) bool {
|
||||
var result types.Response
|
||||
client := resty.New()
|
||||
resp, err := client.R().
|
||||
SetResult(&result).
|
||||
Get("http://" + r.coordinatorURL + "/coordinator/v1/health")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, 0, resp.StatusCode())
|
||||
assert.Equal(t, 0, result.ErrCode)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -151,7 +161,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) *t
|
||||
return &result.Data
|
||||
}
|
||||
|
||||
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus) {
|
||||
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int) {
|
||||
proof := &message.ProofMsg{
|
||||
ProofDetail: &message.ProofDetail{
|
||||
ID: proverTaskSchema.TaskID,
|
||||
@@ -206,5 +216,5 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
|
||||
Post("http://" + r.coordinatorURL + "/coordinator/v1/submit_proof")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode())
|
||||
assert.Equal(t, ctypes.Success, result.ErrCode)
|
||||
assert.Equal(t, errCode, result.ErrCode)
|
||||
}
|
||||
|
||||
@@ -36,8 +36,8 @@ type LoginResponse struct {
|
||||
|
||||
// GetTaskRequest defines the request structure for GetTask API
|
||||
type GetTaskRequest struct {
|
||||
ProverHeight uint64 `json:"prover_height"`
|
||||
TaskType message.ProofType `json:"task_type"`
|
||||
ProverHeight uint64 `json:"prover_height,omitempty"`
|
||||
}
|
||||
|
||||
// GetTaskResponse defines the response structure for GetTask API
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
{
|
||||
"prover_name": "my_prover",
|
||||
"prover_name": "prover-1",
|
||||
"keystore_path": "keystore.json",
|
||||
"keystore_password": "prover-pwd",
|
||||
"db_path": "bbolt_db",
|
||||
"db_path": "unique-db-path-for-prover-1",
|
||||
"core": {
|
||||
"params_path": "params"
|
||||
"params_path": "params",
|
||||
"proof_type": 2
|
||||
},
|
||||
"coordinator": {
|
||||
"base_url": "https://coordinator/v1",
|
||||
"base_url": "http://localhost:8555",
|
||||
"retry_count": 10,
|
||||
"retry_wait_time_sec": 10,
|
||||
"connection_timeout_sec": 30
|
||||
},
|
||||
"l2geth": {
|
||||
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
|
||||
"endpoint": "http://localhost:9999",
|
||||
"confirmations": "0x1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,13 +19,13 @@ type Config struct {
|
||||
Core *ProverCoreConfig `json:"core"`
|
||||
DBPath string `json:"db_path"`
|
||||
Coordinator *CoordinatorConfig `json:"coordinator"`
|
||||
L2Geth *L2GethConfig `json:"l2geth"`
|
||||
L2Geth *L2GethConfig `json:"l2geth,omitempty"` // only for chunk_prover
|
||||
}
|
||||
|
||||
// ProverCoreConfig load zk prover config.
|
||||
type ProverCoreConfig struct {
|
||||
ParamsPath string `json:"params_path"`
|
||||
ProofType message.ProofType `json:"prove_type,omitempty"` // 0: chunk prover (default type), 1: batch prover
|
||||
ProofType message.ProofType `json:"proof_type,omitempty"` // 1: chunk prover (default type), 2: batch prover
|
||||
DumpDir string `json:"dump_dir,omitempty"`
|
||||
}
|
||||
|
||||
|
||||
@@ -36,8 +36,8 @@ type Prover struct {
|
||||
ctx context.Context
|
||||
cfg *config.Config
|
||||
coordinatorClient *client.CoordinatorClient
|
||||
l2GethClient *ethclient.Client
|
||||
stack *store.Stack
|
||||
l2GethClient *ethclient.Client // only applicable for a chunk_prover
|
||||
proverCore *core.ProverCore
|
||||
|
||||
isClosed int64
|
||||
@@ -60,10 +60,16 @@ func NewProver(ctx context.Context, cfg *config.Config) (*Prover, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Collect geth node.
|
||||
l2GethClient, err := ethclient.DialContext(ctx, cfg.L2Geth.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var l2GethClient *ethclient.Client
|
||||
if cfg.Core.ProofType == message.ProofTypeChunk {
|
||||
if cfg.L2Geth == nil || cfg.L2Geth.Endpoint == "" {
|
||||
return nil, errors.New("Missing l2geth config for chunk prover")
|
||||
}
|
||||
// Connect l2geth node. Only applicable for a chunk_prover.
|
||||
l2GethClient, err = ethclient.DialContext(ctx, cfg.L2Geth.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Create prover_core instance
|
||||
@@ -145,6 +151,13 @@ func (r *Prover) proveAndSubmit() error {
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = r.stack.Delete(task.Task.ID)
|
||||
if err != nil {
|
||||
log.Error("prover stack pop failed!", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
var proofMsg *message.ProofDetail
|
||||
if task.Times <= 2 {
|
||||
// If panic times <= 2, try to proof the task.
|
||||
@@ -153,44 +166,37 @@ func (r *Prover) proveAndSubmit() error {
|
||||
}
|
||||
|
||||
log.Info("start to prove task", "task-type", task.Task.Type, "task-id", task.Task.ID)
|
||||
proofMsg = r.prove(task)
|
||||
} else {
|
||||
// when the prover has more than 3 times panic,
|
||||
// it will omit to prove the task, submit StatusProofError and then Delete the task.
|
||||
proofMsg = &message.ProofDetail{
|
||||
Status: message.StatusProofError,
|
||||
Error: "zk proving panic",
|
||||
ID: task.Task.ID,
|
||||
Type: task.Task.Type,
|
||||
proofMsg, err = r.prove(task)
|
||||
if err != nil { // handling error from prove
|
||||
return fmt.Errorf("failed to prove task, task-type: %v, err: %v", task.Task.Type, err)
|
||||
}
|
||||
|
||||
return r.submitProof(proofMsg)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = r.stack.Delete(task.Task.ID)
|
||||
if err != nil {
|
||||
log.Error("prover stack pop failed!", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return r.submitProof(proofMsg)
|
||||
// when the prover has more than 3 times panic,
|
||||
// it will omit to prove the task, submit StatusProofError and then Delete the task.
|
||||
return fmt.Errorf("zk proving panic for task, task-type: %v, task-id: %v", task.Task.Type, task.Task.ID)
|
||||
}
|
||||
|
||||
// fetchTaskFromCoordinator fetches a new task from the server
|
||||
func (r *Prover) fetchTaskFromCoordinator() (*store.ProvingTask, error) {
|
||||
// get the latest confirmed block number
|
||||
latestBlockNumber, err := putils.GetLatestConfirmedBlockNumber(r.ctx, r.l2GethClient, r.cfg.L2Geth.Confirmations)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch latest confirmed block number: %v", err)
|
||||
}
|
||||
|
||||
if latestBlockNumber == 0 {
|
||||
return nil, fmt.Errorf("omit to prove task of the genesis block, latestBlockNumber: %v", latestBlockNumber)
|
||||
}
|
||||
|
||||
// prepare the request
|
||||
req := &client.GetTaskRequest{
|
||||
ProverHeight: latestBlockNumber,
|
||||
TaskType: r.Type(),
|
||||
TaskType: r.Type(),
|
||||
}
|
||||
|
||||
if req.TaskType == message.ProofTypeChunk {
|
||||
// get the latest confirmed block number
|
||||
latestBlockNumber, err := putils.GetLatestConfirmedBlockNumber(r.ctx, r.l2GethClient, r.cfg.L2Geth.Confirmations)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch latest confirmed block number: %v", err)
|
||||
}
|
||||
|
||||
if latestBlockNumber == 0 {
|
||||
return nil, fmt.Errorf("omit to prove task of the genesis block, latestBlockNumber: %v", latestBlockNumber)
|
||||
}
|
||||
req.ProverHeight = latestBlockNumber
|
||||
}
|
||||
|
||||
// send the request
|
||||
@@ -238,8 +244,9 @@ func (r *Prover) fetchTaskFromCoordinator() (*store.ProvingTask, error) {
|
||||
return provingTask, nil
|
||||
}
|
||||
|
||||
func (r *Prover) prove(task *store.ProvingTask) (detail *message.ProofDetail) {
|
||||
detail = &message.ProofDetail{
|
||||
// prove function tries to prove a task. It returns an error if the proof fails.
|
||||
func (r *Prover) prove(task *store.ProvingTask) (*message.ProofDetail, error) {
|
||||
detail := &message.ProofDetail{
|
||||
ID: task.Task.ID,
|
||||
Type: task.Task.Type,
|
||||
Status: message.StatusOk,
|
||||
@@ -249,30 +256,28 @@ func (r *Prover) prove(task *store.ProvingTask) (detail *message.ProofDetail) {
|
||||
case message.ProofTypeChunk:
|
||||
proof, err := r.proveChunk(task)
|
||||
if err != nil {
|
||||
log.Error("prove chunk failed!", "task-id", task.Task.ID, "err", err)
|
||||
detail.Status = message.StatusProofError
|
||||
detail.Error = err.Error()
|
||||
return
|
||||
return detail, err
|
||||
}
|
||||
detail.ChunkProof = proof
|
||||
log.Info("prove chunk successfully!", "task-id", task.Task.ID)
|
||||
return
|
||||
log.Info("prove chunk success", "task-id", task.Task.ID)
|
||||
return detail, nil
|
||||
|
||||
case message.ProofTypeBatch:
|
||||
proof, err := r.proveBatch(task)
|
||||
if err != nil {
|
||||
log.Error("prove batch failed!", "task-id", task.Task.ID, "err", err)
|
||||
detail.Status = message.StatusProofError
|
||||
detail.Error = err.Error()
|
||||
return
|
||||
return detail, err
|
||||
}
|
||||
detail.BatchProof = proof
|
||||
log.Info("prove batch successfully!", "task-id", task.Task.ID)
|
||||
return
|
||||
log.Info("prove batch success", "task-id", task.Task.ID)
|
||||
return detail, nil
|
||||
|
||||
default:
|
||||
log.Error("invalid task type", "task-id", task.Task.ID, "task-type", task.Task.Type)
|
||||
return
|
||||
err := fmt.Errorf("invalid task type: %v", task.Task.Type)
|
||||
return detail, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -282,7 +287,7 @@ func (r *Prover) proveChunk(task *store.ProvingTask) (*message.ChunkProof, error
|
||||
}
|
||||
traces, err := r.getSortedTracesByHashes(task.Task.ChunkTaskDetail.BlockHashes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get traces from eth node failed, block hashes: %v", task.Task.ChunkTaskDetail.BlockHashes)
|
||||
return nil, fmt.Errorf("get traces from eth node failed, block hashes: %v, err: %v", task.Task.ChunkTaskDetail.BlockHashes, err)
|
||||
}
|
||||
return r.proverCore.ProveChunk(task.Task.ID, traces)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user