mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
39 Commits
v4.3.84-en
...
codecv1-sc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e929d53b0b | ||
|
|
e90aa04e8c | ||
|
|
32f0011d74 | ||
|
|
f0fc344303 | ||
|
|
5b827c3c18 | ||
|
|
6b2eb80aa5 | ||
|
|
71f88b04f5 | ||
|
|
bcd9764bcd | ||
|
|
b4f8377a08 | ||
|
|
b52d43caa8 | ||
|
|
201bf401cd | ||
|
|
898ac1d25c | ||
|
|
1336b89fb8 | ||
|
|
73045df037 | ||
|
|
b3093e9eb6 | ||
|
|
3d5250e52d | ||
|
|
b7324c76bc | ||
|
|
be5c58a427 | ||
|
|
97c85209c5 | ||
|
|
48534f8698 | ||
|
|
378ec79d14 | ||
|
|
1054023dd5 | ||
|
|
65481212d5 | ||
|
|
ba289fc651 | ||
|
|
64ed273a1d | ||
|
|
c1059a3f51 | ||
|
|
70aa557968 | ||
|
|
94a3c0a571 | ||
|
|
4c6016f852 | ||
|
|
e53e150341 | ||
|
|
5b6b170db1 | ||
|
|
44c77bcc87 | ||
|
|
364173a8d6 | ||
|
|
2afba3e394 | ||
|
|
53b24f75f8 | ||
|
|
de60ea8f55 | ||
|
|
9ade976ce5 | ||
|
|
4bb9cf89aa | ||
|
|
efe0d7d2fe |
@@ -23,6 +23,7 @@ type FetcherConfig struct {
|
||||
DAIGatewayAddr string `json:"DAIGatewayAddr"`
|
||||
USDCGatewayAddr string `json:"USDCGatewayAddr"`
|
||||
LIDOGatewayAddr string `json:"LIDOGatewayAddr"`
|
||||
PufferGatewayAddr string `json:"PufferGatewayAddr"`
|
||||
ERC721GatewayAddr string `json:"ERC721GatewayAddr"`
|
||||
ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"`
|
||||
ScrollChainAddr string `json:"ScrollChainAddr"`
|
||||
|
||||
@@ -93,6 +93,11 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
f := &L1FetcherLogic{
|
||||
|
||||
@@ -85,7 +85,12 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
|
||||
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
48
common/libzkp/impl/Cargo.lock
generated
48
common/libzkp/impl/Cargo.lock
generated
@@ -31,7 +31,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "aggregator"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"ark-std 0.3.0",
|
||||
"c-kzg",
|
||||
@@ -521,7 +521,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
|
||||
[[package]]
|
||||
name = "bus-mapping"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -535,7 +535,7 @@ dependencies = [
|
||||
"mock",
|
||||
"mpt-zktrie",
|
||||
"num",
|
||||
"poseidon-circuit",
|
||||
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
|
||||
"rand",
|
||||
"revm-precompile",
|
||||
"serde",
|
||||
@@ -1139,7 +1139,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "eth-types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"ethers-core",
|
||||
@@ -1150,7 +1150,7 @@ dependencies = [
|
||||
"itertools 0.11.0",
|
||||
"num",
|
||||
"num-bigint",
|
||||
"poseidon-circuit",
|
||||
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -1293,7 +1293,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "external-tracer"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"geth-utils",
|
||||
@@ -1485,7 +1485,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "gadgets"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"halo2_proofs",
|
||||
@@ -1507,7 +1507,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "geth-utils"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"env_logger 0.10.0",
|
||||
"gobuild",
|
||||
@@ -1684,7 +1684,7 @@ dependencies = [
|
||||
"log",
|
||||
"num-bigint",
|
||||
"num-traits",
|
||||
"poseidon-circuit",
|
||||
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=scroll-dev-1201)",
|
||||
"rand",
|
||||
"rand_chacha",
|
||||
"serde",
|
||||
@@ -2142,7 +2142,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "keccak256"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"env_logger 0.10.0",
|
||||
"eth-types",
|
||||
@@ -2292,7 +2292,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mock"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -2307,7 +2307,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mpt-zktrie"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"halo2-mpt-circuits",
|
||||
@@ -2315,7 +2315,7 @@ dependencies = [
|
||||
"hex",
|
||||
"log",
|
||||
"num-bigint",
|
||||
"poseidon-circuit",
|
||||
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
|
||||
"zktrie",
|
||||
]
|
||||
|
||||
@@ -2671,6 +2671,21 @@ dependencies = [
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "poseidon-circuit"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main#babf5f6a69bec40b2e6523df317c073dcd0b1f97"
|
||||
dependencies = [
|
||||
"bitvec",
|
||||
"ff 0.13.0",
|
||||
"halo2_proofs",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"rand",
|
||||
"rand_xorshift",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "poseidon-circuit"
|
||||
version = "0.1.0"
|
||||
@@ -2754,7 +2769,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "prover"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"aggregator",
|
||||
"anyhow",
|
||||
@@ -4441,7 +4456,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "zkevm-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"array-init",
|
||||
"bus-mapping",
|
||||
@@ -4465,7 +4480,7 @@ dependencies = [
|
||||
"mpt-zktrie",
|
||||
"num",
|
||||
"num-bigint",
|
||||
"poseidon-circuit",
|
||||
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
|
||||
"rand",
|
||||
"rand_chacha",
|
||||
"rand_xorshift",
|
||||
@@ -4494,6 +4509,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"snark-verifier-sdk",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -24,7 +24,8 @@ bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/i
|
||||
|
||||
[dependencies]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.0rc3", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.3", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
|
||||
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
|
||||
@@ -12,6 +12,7 @@ use prover::{
|
||||
utils::{chunk_trace_to_witness_block, init_env_and_log},
|
||||
BatchProof, BlockTrace, ChunkHash, ChunkProof,
|
||||
};
|
||||
use snark_verifier_sdk::verify_evm_calldata;
|
||||
use std::{cell::OnceCell, env, ptr::null};
|
||||
|
||||
static mut PROVER: OnceCell<Prover> = OnceCell::new();
|
||||
@@ -148,11 +149,33 @@ pub unsafe extern "C" fn gen_batch_proof(
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_batch_proof(proof: *const c_char) -> c_char {
|
||||
pub unsafe extern "C" fn verify_batch_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
) -> c_char {
|
||||
let proof = c_char_to_vec(proof);
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
|
||||
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_agg_evm_proof(proof));
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let fork_id = match fork_name_str {
|
||||
"" => 0,
|
||||
"shanghai" => 0,
|
||||
"bernoulli" => 1,
|
||||
_ => {
|
||||
log::warn!("unexpected fork_name {fork_name_str}, treated as bernoulli");
|
||||
1
|
||||
}
|
||||
};
|
||||
let verified = panic_catch(|| {
|
||||
if fork_id == 0 {
|
||||
// before upgrade#2(EIP4844)
|
||||
verify_evm_calldata(
|
||||
include_bytes!("evm_verifier_fork_1.bin").to_vec(),
|
||||
proof.calldata(),
|
||||
)
|
||||
} else {
|
||||
VERIFIER.get().unwrap().verify_agg_evm_proof(proof)
|
||||
}
|
||||
});
|
||||
verified.unwrap_or(false) as c_char
|
||||
}
|
||||
|
||||
|
||||
BIN
common/libzkp/impl/src/evm_verifier_fork_1.bin
Normal file
BIN
common/libzkp/impl/src/evm_verifier_fork_1.bin
Normal file
Binary file not shown.
@@ -3,7 +3,7 @@ void init_batch_verifier(char* params_dir, char* assets_dir);
|
||||
char* get_batch_vk();
|
||||
char* check_chunk_proofs(char* chunk_proofs);
|
||||
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
|
||||
char verify_batch_proof(char* proof);
|
||||
char verify_batch_proof(char* proof, char* fork_name);
|
||||
|
||||
void init_chunk_prover(char* params_dir, char* assets_dir);
|
||||
void init_chunk_verifier(char* params_dir, char* assets_dir);
|
||||
|
||||
@@ -0,0 +1,133 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
const targetTxSize = 126914
|
||||
|
||||
func main() {
|
||||
privateKeyHex := "0000000000000000000000000000000000000000000000000000000000000042"
|
||||
privateKey, err := crypto.HexToECDSA(privateKeyHex)
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid private key: %v", err)
|
||||
}
|
||||
|
||||
client, err := ethclient.Dial("http://localhost:9999")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to the Ethereum client: %v", err)
|
||||
}
|
||||
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(222222))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create transactor with chain ID 222222: %v", err)
|
||||
}
|
||||
|
||||
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to retrieve account nonce: %v", err)
|
||||
}
|
||||
|
||||
totalTxNum := []uint64{2, 3, 4, 5, 6}
|
||||
for _, num := range totalTxNum {
|
||||
prepareAndSendTransactions(client, auth, nonce, num)
|
||||
nonce += num
|
||||
}
|
||||
}
|
||||
|
||||
func prepareAndSendTransactions(client *ethclient.Client, auth *bind.TransactOpts, initialNonce uint64, totalTxNum uint64) error {
|
||||
gasLimit := uint64(5000000)
|
||||
gasPrice := big.NewInt(1000000000)
|
||||
|
||||
var signedTxs []*types.Transaction
|
||||
payloadSum := 0
|
||||
|
||||
dataPayload := make([]byte, targetTxSize/totalTxNum)
|
||||
for i := range dataPayload {
|
||||
dataPayload[i] = 0xff
|
||||
}
|
||||
|
||||
for i := uint64(0); i < totalTxNum-1; i++ {
|
||||
txData := &types.LegacyTx{
|
||||
Nonce: initialNonce + i,
|
||||
GasPrice: gasPrice,
|
||||
Gas: gasLimit,
|
||||
To: &auth.From,
|
||||
Data: dataPayload,
|
||||
}
|
||||
|
||||
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to sign tx: %v", err)
|
||||
}
|
||||
|
||||
rlpTxData, err := rlp.EncodeToBytes(signedTx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to RLP encode the tx: %v", err)
|
||||
}
|
||||
|
||||
payloadSum += len(rlpTxData)
|
||||
signedTxs = append(signedTxs, signedTx)
|
||||
}
|
||||
|
||||
fmt.Println("payload sum", payloadSum)
|
||||
|
||||
lowerBound := 0
|
||||
upperBound := targetTxSize
|
||||
for lowerBound <= upperBound {
|
||||
mid := (lowerBound + upperBound) / 2
|
||||
data := make([]byte, mid)
|
||||
for i := range data {
|
||||
data[i] = 0xff
|
||||
}
|
||||
|
||||
txData := &types.LegacyTx{
|
||||
Nonce: initialNonce + totalTxNum - 1,
|
||||
GasPrice: gasPrice,
|
||||
Gas: gasLimit,
|
||||
To: &auth.From,
|
||||
Data: data,
|
||||
}
|
||||
|
||||
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to sign tx: %v", err)
|
||||
}
|
||||
|
||||
rlpTxData, err := rlp.EncodeToBytes(signedTx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to RLP encode the tx: %v", err)
|
||||
}
|
||||
txSize := len(rlpTxData)
|
||||
|
||||
if payloadSum+txSize < targetTxSize {
|
||||
lowerBound = mid + 1
|
||||
} else if payloadSum+txSize > targetTxSize {
|
||||
upperBound = mid - 1
|
||||
} else {
|
||||
fmt.Println("payloadSum+txSize", payloadSum+txSize)
|
||||
signedTxs = append(signedTxs, signedTx)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, signedTx := range signedTxs {
|
||||
if err := client.SendTransaction(context.Background(), signedTx); err != nil {
|
||||
return fmt.Errorf("failed to send transaction: %v", err)
|
||||
}
|
||||
fmt.Printf("Transaction with nonce %d sent\n", signedTx.Nonce())
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
131
common/types/constructing-target-payload-tx/main.go
Normal file
131
common/types/constructing-target-payload-tx/main.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
const targetTxSize = 120568
|
||||
|
||||
func main() {
|
||||
privateKeyHex := "0000000000000000000000000000000000000000000000000000000000000042"
|
||||
privateKey, err := crypto.HexToECDSA(privateKeyHex)
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid private key: %v", err)
|
||||
}
|
||||
|
||||
client, err := ethclient.Dial("http://localhost:9999")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to the Ethereum client: %v", err)
|
||||
}
|
||||
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(222222))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create transactor with chain ID 222222: %v", err)
|
||||
}
|
||||
|
||||
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to retrieve account nonce: %v", err)
|
||||
}
|
||||
prepareAndSendTransactions(client, auth, nonce, 1)
|
||||
prepareAndSendTransactions(client, auth, nonce+1, 2)
|
||||
prepareAndSendTransactions(client, auth, nonce+1+2, 3)
|
||||
prepareAndSendTransactions(client, auth, nonce+1+2+3, 4)
|
||||
prepareAndSendTransactions(client, auth, nonce+1+2+3+4, 5)
|
||||
prepareAndSendTransactions(client, auth, nonce+1+2+3+4+5, 6)
|
||||
}
|
||||
|
||||
func prepareAndSendTransactions(client *ethclient.Client, auth *bind.TransactOpts, initialNonce uint64, totalTxNum uint64) error {
|
||||
gasLimit := uint64(5000000)
|
||||
gasPrice := big.NewInt(1000000000)
|
||||
|
||||
var signedTxs []*types.Transaction
|
||||
payloadSum := 0
|
||||
|
||||
dataPayload := make([]byte, targetTxSize/totalTxNum)
|
||||
for i := range dataPayload {
|
||||
dataPayload[i] = 0xff
|
||||
}
|
||||
|
||||
for i := uint64(0); i < totalTxNum-1; i++ {
|
||||
txData := &types.LegacyTx{
|
||||
Nonce: initialNonce + i,
|
||||
GasPrice: gasPrice,
|
||||
Gas: gasLimit,
|
||||
To: &auth.From,
|
||||
Data: dataPayload,
|
||||
}
|
||||
|
||||
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to sign tx: %v", err)
|
||||
}
|
||||
|
||||
rlpTxData, err := rlp.EncodeToBytes(signedTx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to RLP encode the tx: %v", err)
|
||||
}
|
||||
|
||||
payloadSum += len(rlpTxData)
|
||||
signedTxs = append(signedTxs, signedTx)
|
||||
}
|
||||
|
||||
fmt.Println("payload sum", payloadSum)
|
||||
|
||||
lowerBound := 0
|
||||
upperBound := targetTxSize
|
||||
for lowerBound <= upperBound {
|
||||
mid := (lowerBound + upperBound) / 2
|
||||
data := make([]byte, mid)
|
||||
for i := range data {
|
||||
data[i] = 0xff
|
||||
}
|
||||
|
||||
txData := &types.LegacyTx{
|
||||
Nonce: initialNonce + totalTxNum - 1,
|
||||
GasPrice: gasPrice,
|
||||
Gas: gasLimit,
|
||||
To: &auth.From,
|
||||
Data: data,
|
||||
}
|
||||
|
||||
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to sign tx: %v", err)
|
||||
}
|
||||
|
||||
rlpTxData, err := rlp.EncodeToBytes(signedTx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to RLP encode the tx: %v", err)
|
||||
}
|
||||
txSize := len(rlpTxData)
|
||||
|
||||
if payloadSum+txSize < targetTxSize {
|
||||
lowerBound = mid + 1
|
||||
} else if payloadSum+txSize > targetTxSize {
|
||||
upperBound = mid - 1
|
||||
} else {
|
||||
fmt.Println("payloadSum+txSize", payloadSum+txSize)
|
||||
signedTxs = append(signedTxs, signedTx)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for i := len(signedTxs) - 1; i >= 0; i-- {
|
||||
if err := client.SendTransaction(context.Background(), signedTxs[i]); err != nil {
|
||||
return fmt.Errorf("failed to send transaction: %v", err)
|
||||
}
|
||||
fmt.Printf("Transaction with nonce %d sent\n", signedTxs[i].Nonce())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -227,18 +227,11 @@ func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
|
||||
}
|
||||
|
||||
// blob payload
|
||||
blob, z, err := constructBlobPayload(batch.Chunks)
|
||||
blob, blobVersionedHash, z, err := constructBlobPayload(batch.Chunks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// blob versioned hash
|
||||
c, err := kzg4844.BlobToCommitment(*blob)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create blob commitment")
|
||||
}
|
||||
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
|
||||
|
||||
daBatch := DABatch{
|
||||
Version: CodecV1Version,
|
||||
BatchIndex: batch.Index,
|
||||
@@ -281,7 +274,7 @@ func computeBatchDataHash(chunks []*encoding.Chunk, totalL1MessagePoppedBefore u
|
||||
}
|
||||
|
||||
// constructBlobPayload constructs the 4844 blob payload.
|
||||
func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Point, error) {
|
||||
func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
|
||||
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
|
||||
metadataLength := 2 + MaxNumChunks*4
|
||||
|
||||
@@ -289,8 +282,8 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
|
||||
blobBytes := make([]byte, metadataLength)
|
||||
|
||||
// challenge digest preimage
|
||||
// 1 hash for metadata and 1 for each chunk
|
||||
challengePreimage := make([]byte, (1+MaxNumChunks)*32)
|
||||
// 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash
|
||||
challengePreimage := make([]byte, (1+MaxNumChunks+1)*32)
|
||||
|
||||
// the chunk data hash used for calculating the challenge preimage
|
||||
var chunkDataHash common.Hash
|
||||
@@ -309,7 +302,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
|
||||
// encode L2 txs into blob payload
|
||||
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, common.Hash{}, nil, err
|
||||
}
|
||||
blobBytes = append(blobBytes, rlpTxData...)
|
||||
}
|
||||
@@ -341,9 +334,19 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
|
||||
// convert raw data to BLSFieldElements
|
||||
blob, err := makeBlobCanonical(blobBytes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
// compute blob versioned hash
|
||||
c, err := kzg4844.BlobToCommitment(*blob)
|
||||
if err != nil {
|
||||
return nil, common.Hash{}, nil, fmt.Errorf("failed to create blob commitment")
|
||||
}
|
||||
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
|
||||
|
||||
// challenge: append blob versioned hash
|
||||
copy(challengePreimage[(1+MaxNumChunks)*32:], blobVersionedHash[:])
|
||||
|
||||
// compute z = challenge_digest % BLS_MODULUS
|
||||
challengeDigest := crypto.Keccak256Hash(challengePreimage)
|
||||
pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), BLSModulus)
|
||||
@@ -354,7 +357,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
|
||||
start := 32 - len(pointBytes)
|
||||
copy(z[start:], pointBytes)
|
||||
|
||||
return blob, &z, nil
|
||||
return blob, blobVersionedHash, &z, nil
|
||||
}
|
||||
|
||||
// makeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements.
|
||||
|
||||
@@ -479,55 +479,55 @@ func TestCodecV1BatchChallenge(t *testing.T) {
|
||||
originalBatch := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
|
||||
batch, err := NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "06138a688f328d13cb9caf0e2046d65bbcf766eab00196fb05e43806c7b26b36", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "0d8e67f882c61159aa99b04ec4f6f3d90cb95cbfba6efd56cefc55ca15b290ef", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1e3f41f46941b3d30bbc482942026b09224636ed63a160738d7ae57a00c99294", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "32da228f4945de828954675f9396debb169bbf336ba93f849a8fc7fee1bc9e58", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
|
||||
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "37c3ab6ad48e99fc0ce8e9de5f9b2c9be832699b293e4243b85d4e42bad0db7a", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "09a37ab43d41bcae3000c090a341e4661a8dc705b3c93d01b9eda3a0b3f8d4a8", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
trace5 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json")
|
||||
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk5}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
trace6 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json")
|
||||
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace6}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk6}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
trace7 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json")
|
||||
chunk7 := &encoding.Chunk{Blocks: []*encoding.Block{trace7}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk7}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
// 15 chunks
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0244c987922db21694e8eb0184c4a5e6f3785fb688224822f1f826874ed5aae2", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "55dac3baa818133cfdce0f97ddbb950e341399756d7b49bc34107dd65ecd3a4b", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
chunk8 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3, trace4}}
|
||||
chunk9 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk8, chunk9}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "03523cd88a7227826e093305cbe4ce237e8df38e2157566fb3742cc39dbc9c43", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "0b14dce4abfdeb3a69a341f7db6b1e16162c20826e6d964a829e20f671030cab", hex.EncodeToString(batch.z[:]))
|
||||
}
|
||||
|
||||
func repeat(element byte, count int) string {
|
||||
@@ -547,31 +547,31 @@ func TestCodecV1BatchChallengeWithStandardTestCases(t *testing.T) {
|
||||
expectedy string
|
||||
}{
|
||||
// single empty chunk
|
||||
{chunks: [][]string{{}}, expectedz: "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", expectedy: "28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df51"},
|
||||
{chunks: [][]string{{}}, expectedz: "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", expectedy: "304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd08"},
|
||||
// single non-empty chunk
|
||||
{chunks: [][]string{{"0x010203"}}, expectedz: "30a9d6cfc2b87fb00d80e7fea28ebb9eff0bd526dbf1da32acfe8c5fd49632ff", expectedy: "723515444cb320fe437b9cea3b51293f5fbcb5913739ad35eab28b1863f7c312"},
|
||||
{chunks: [][]string{{"0x010203"}}, expectedz: "1c1d4bd5153f877d799853080aba243f2c186dd6d6064eaefacfe715c92b6354", expectedy: "24e80ed99526b0d15ba46f7ec682f517576ddae68d5131e5d351f8bae06ea7d3"},
|
||||
// multiple empty chunks
|
||||
{chunks: [][]string{{}, {}}, expectedz: "17772348f946a4e4adfcaf5c1690d078933b6b090ca9a52fab6c7e545b1007ae", expectedy: "05ba9abbc81a1c97f4cdaa683a7e0c731d9dfd88feef8f7b2fcfd79e593662b5"},
|
||||
{chunks: [][]string{{}, {}}, expectedz: "152c9ccfcc2884f9891f7adce2de110cf9f85bfd0e21f0933ae0636390a84d41", expectedy: "5f6f532676e25b49e2eae77513fbeca173a300b434c0a5e24fa554b68e27d582"},
|
||||
// multiple non-empty chunks
|
||||
{chunks: [][]string{{"0x010203"}, {"0x070809"}}, expectedz: "60376321eea0886c29bd97d95851c7b5fbdb064c8adfdadd7678617b32b3ebf2", expectedy: "50cfbcece01cadb4eade40649e17b140b31f96088097e38f020e31dfe6551604"},
|
||||
{chunks: [][]string{{"0x010203"}, {"0x070809"}}, expectedz: "62100f5381179ea7db7aa8fdedb0f7fc7b82730b75432d50ab41f80aeebe45a3", expectedy: "5b1f6e7a54907ddc06871853cf1f5d53bf2de0df7b61d0df84bc2c3fb80320cd"},
|
||||
// empty chunk followed by non-empty chunk
|
||||
{chunks: [][]string{{}, {"0x010203"}}, expectedz: "054539f03564eda9462d582703cde0788e4e27c311582ddfb19835358273a7ca", expectedy: "1fba03580b5908c4c66b48e79c10e7a34e4b27ed37a1a049b3e17e017cad5245"},
|
||||
{chunks: [][]string{{}, {"0x010203"}}, expectedz: "2d94d241c4a2a8d8f02845ca40cfba344f3b42384af2045a75c82e725a184232", expectedy: "302416c177e9e7fe40c3bc4315066c117e27d246b0a33ef68cdda6dd333c485c"},
|
||||
// non-empty chunk followed by empty chunk
|
||||
{chunks: [][]string{{"0x070809"}, {}}, expectedz: "0b82dceaa6ca4b5d704590c921accfd991b56b5ad0212e6a4e63e54915a2053b", expectedy: "2362f3a0c87f0ea11eb898ed608c7f09a42926a058d4c5d111a0f54cad10ebbd"},
|
||||
{chunks: [][]string{{"0x070809"}, {}}, expectedz: "7227567e3b1dbacb48a32bb85e4e99f73e4bd5620ea8cd4f5ac00a364c86af9c", expectedy: "2eb3dfd28362f35f562f779e749a555d2f1f87ddc716e95f04133d25189a391c"},
|
||||
// max number of chunks all empty
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}}, expectedz: "174cd3ba9b2ae8ab789ec0b5b8e0b27ee122256ec1756c383dbf2b5b96903f1b", expectedy: "225cab9658904181671eb7abc342ffc36a6836048b64a67f0fb758439da2567b"},
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}}, expectedz: "1128ac3e22ced6af85be4335e0d03a266946a7cade8047e7fc59d6c8be642321", expectedy: "2d9b16422ce17f328fd00c99349768f0cb0c8648115eb3bd9b7864617ba88059"},
|
||||
// max number of chunks all non-empty
|
||||
{chunks: [][]string{{"0x0a"}, {"0x0a0b"}, {"0x0a0b0c"}, {"0x0a0b0c0d"}, {"0x0a0b0c0d0e"}, {"0x0a0b0c0d0e0f"}, {"0x0a0b0c0d0e0f10"}, {"0x0a0b0c0d0e0f1011"}, {"0x0a0b0c0d0e0f101112"}, {"0x0a0b0c0d0e0f10111213"}, {"0x0a0b0c0d0e0f1011121314"}, {"0x0a0b0c0d0e0f101112131415"}, {"0x0a0b0c0d0e0f10111213141516"}, {"0x0a0b0c0d0e0f1011121314151617"}, {"0x0a0b0c0d0e0f101112131415161718"}}, expectedz: "1e93e961cdfb4bd26a5be48f23af4f1aa8c6bebe57a089d3250f8afb1e988bf8", expectedy: "24ed4791a70b28a6bad21c22d58f82a5ea5f9f9d2bcfc07428b494e9ae93de6e"},
|
||||
{chunks: [][]string{{"0x0a"}, {"0x0a0b"}, {"0x0a0b0c"}, {"0x0a0b0c0d"}, {"0x0a0b0c0d0e"}, {"0x0a0b0c0d0e0f"}, {"0x0a0b0c0d0e0f10"}, {"0x0a0b0c0d0e0f1011"}, {"0x0a0b0c0d0e0f101112"}, {"0x0a0b0c0d0e0f10111213"}, {"0x0a0b0c0d0e0f1011121314"}, {"0x0a0b0c0d0e0f101112131415"}, {"0x0a0b0c0d0e0f10111213141516"}, {"0x0a0b0c0d0e0f1011121314151617"}, {"0x0a0b0c0d0e0f101112131415161718"}}, expectedz: "1a4025a3d74e70b511007dd55a2e252478c48054c6383285e8a176f33d99853b", expectedy: "12071ac2571c11220432a27b8be549392892e9baf4c654748ca206def3843940"},
|
||||
// single chunk blob full
|
||||
{chunks: [][]string{{repeat(123, nRowsData)}}, expectedz: "61405cb0b114dfb4d611be84bedba0fcd2e55615e193e424f1cc7b1af0df3d31", expectedy: "58609bbca10e50489b630ecb5b9347378579ed784d6a10749fd505055d35c3c0"},
|
||||
{chunks: [][]string{{repeat(123, nRowsData)}}, expectedz: "72714cc4a0ca75cee2d543b1f958e3d3dd59ac7df0d9d5617d8117b65295a5f2", expectedy: "4ebb690362bcbc42321309c210c99f2ebdb53b3fcf7cf3b17b78f6cfd1203ed3"},
|
||||
// multiple chunks blob full
|
||||
{chunks: [][]string{{repeat(123, 1111)}, {repeat(231, nRowsData-1111)}}, expectedz: "22533c3ea99536b4b83a89835aa91e6f0d2fc3866c201e18d7ca4b3af92fad61", expectedy: "40d4b71492e1a06ee3c273ef9003c7cb05aed021208871e13fa33302fa0f4dcc"},
|
||||
{chunks: [][]string{{repeat(123, 1111)}, {repeat(231, nRowsData-1111)}}, expectedz: "70eb5b4db503e59413238eef451871c5d12f2bb96c8b96ceca012f4ca0114727", expectedy: "568d0aaf280ec83f9c81ed2d80ecbdf199bd72dafb8a350007d37ea82997e455"},
|
||||
// max number of chunks only last one non-empty not full blob
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData-1111)}}, expectedz: "0e6525c0dd261e8f62342b1139062bb23bc2b8b460163364598fb29e82a4eed5", expectedy: "1db984d6deb5e84bc67d0755aa2da8fe687233147603b4ecba94d0c8463c3836"},
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData-1111)}}, expectedz: "03db68ae16ee88489d52db19e6111b25630c5f23ad7cd14530aacf0cd231d476", expectedy: "24527d0b0e93b3dec0060c7b128975a8088b3104d3a297dc807ab43862a77a1a"},
|
||||
// max number of chunks only last one non-empty full blob
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData)}}, expectedz: "3a638eac98f22f817b84e3d81ccaa3de080f83dc80a5823a3f19320ef3cb6fc8", expectedy: "73ab100278822144e2ed8c9d986e92f7a2662fd18a51bdf96ec55848578b227a"},
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData)}}, expectedz: "677670193f73db499cede572bcb55677f0d2f13d690f9a820bd00bf584c3c241", expectedy: "1d85677f172dbdf4ad3094a17deeb1df4d7d2b7f35ecea44aebffa757811a268"},
|
||||
// max number of chunks but last is empty
|
||||
{chunks: [][]string{{repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {}}, expectedz: "02ef442d99f450559647a7823f1be0e148c75481cc5c703c02a116e8ac531fa8", expectedy: "31743538cfc3ac43d1378a5c497ebc9462c20b4cb4470e0e7a9f7342ea948333"},
|
||||
{chunks: [][]string{{repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {}}, expectedz: "22935042dfe7df771b02c1f5cababfe508869e8f6339dabe25a8a32e37728bb0", expectedy: "48ca66fb5a094401728c3a6a517ffbd72c4d4d9a8c907e2d2f1320812f4d856f"},
|
||||
} {
|
||||
chunks := []*encoding.Chunk{}
|
||||
|
||||
@@ -587,7 +587,7 @@ func TestCodecV1BatchChallengeWithStandardTestCases(t *testing.T) {
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
b, z, err := constructBlobPayload(chunks)
|
||||
b, _, z, err := constructBlobPayload(chunks)
|
||||
assert.NoError(t, err)
|
||||
actualZ := hex.EncodeToString(z[:])
|
||||
assert.Equal(t, tc.expectedz, actualZ)
|
||||
@@ -608,7 +608,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err := batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "06138a688f328d13cb9caf0e2046d65bbcf766eab00196fb05e43806c7b26b363d27683f7aab53cf071e2c8c8f3abfe750d206c048489450d120679cdc823f7db44a38af1f9a6c70cd3ccfbf71968f447aa566bbafb0bbc566fc9eeb42973484802635a1bbd8305d34a46693331bf607a30dad96431f70551dd950c1426131d73ccea6d050d38dea123aad90aa8c0b734c98e8e04bd8ea8f19b415f2d85156d8", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "0d8e67f882c61159aa99b04ec4f6f3d90cb95cbfba6efd56cefc55ca15b290ef423dc493f1dd7c9fbecdffa021ca4649b13e8d72231487034ec6b27e155ecfd7b44a38af1f9a6c70cd3ccfbf71968f447aa566bbafb0bbc566fc9eeb42973484802635a1bbd8305d34a46693331bf607b38542ec811c92d86ff6f3319de06ee60c42655278ccf874f3615f450de730895276828b73db03c553b0bc7e5474a5e0", hex.EncodeToString(verifyData))
|
||||
|
||||
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
|
||||
@@ -617,7 +617,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1e3f41f46941b3d30bbc482942026b09224636ed63a160738d7ae57a00c992946dc7e51a42a31f429bc1f321dcf020b9a661225259522dba186fcfe5dc012191b8aab265dc352e352807a298f7bb99d432c7cd543e63158cbdb8fbf99f3182a71af35ccbed2693c5e0bc5be38d565e86a0b3c76e33edb24eb07faeaa5d3f2b15a55df6ab99abf828b5803f5681dc634602eb7469ee0556563b2eccebf16ec822", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "32da228f4945de828954675f9396debb169bbf336ba93f849a8fc7fee1bc9e5821975f318babe50be728f9b52754d5ce2caa2ba82ba35b5888af1c5f28d23206b8aab265dc352e352807a298f7bb99d432c7cd543e63158cbdb8fbf99f3182a71af35ccbed2693c5e0bc5be38d565e868e0c6fe7bd39baa5ee6339cd334a18af7c680d24e825262499e83b31633b13a9ee89813fae8441630c82bc9dce3f1e07", hex.EncodeToString(verifyData))
|
||||
|
||||
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
|
||||
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
|
||||
@@ -626,7 +626,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "37c3ab6ad48e99fc0ce8e9de5f9b2c9be832699b293e4243b85d4e42bad0db7a24164e6ea8b7946ce5e40d2baa4f6aa0d030076f6074295288133c00e75dafa2afd4e1c55a17dbdf8390b5736158afe238d82f8b696669ba47015fcdfd4d1becd0ff7a47f8f379a4ac8d1741e2d676248f5ca4a9f0d9b7fa48f5f649dc84e928161fd99ad1bd9a9879b05d29c5f718bfb3b0a696a5f3ed50b5b8c6a9d530b3ee", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "09a37ab43d41bcae3000c090a341e4661a8dc705b3c93d01b9eda3a0b3f8d4a8088a01e54e3565d2e91ce6afbadf479330847d9106737875303ce17f17c48722afd4e1c55a17dbdf8390b5736158afe238d82f8b696669ba47015fcdfd4d1becd0ff7a47f8f379a4ac8d1741e2d67624aee03a0f7cdb7807bc7e0b9fb20bc299af2a35e38cda816708b40f2f18db491e14a0f5d9cfe2f4c12e4ca1a219484f17", hex.EncodeToString(verifyData))
|
||||
|
||||
trace5 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json")
|
||||
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
|
||||
@@ -635,7 +635,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
|
||||
|
||||
trace6 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json")
|
||||
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace6}}
|
||||
@@ -644,7 +644,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
|
||||
|
||||
trace7 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json")
|
||||
chunk7 := &encoding.Chunk{Blocks: []*encoding.Block{trace7}}
|
||||
@@ -653,7 +653,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
|
||||
|
||||
// 15 chunks
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2}}
|
||||
@@ -661,7 +661,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0244c987922db21694e8eb0184c4a5e6f3785fb688224822f1f826874ed5aae2613ca15d051a539e3b239027f9bdbd03bd3c66c98afafb674e2a7441912cbe099743324c70e20042de6480f115b215fbba3472a8b994303a99576c1244aa4aec22fdfe6c74ec728aa28a9eb3812bc93291fbc65cfa558e4df12bcde442483d31072000c56f94fe012285bc5832eaee5fe1d47f1e8655539c4500f66207d8edc6", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "55dac3baa818133cfdce0f97ddbb950e341399756d7b49bc34107dd65ecd3a4b54d28f1479467d8b97fb99f5257d3e5d63a81cb2d60e3564fe6ec6066a311c119743324c70e20042de6480f115b215fbba3472a8b994303a99576c1244aa4aec22fdfe6c74ec728aa28a9eb3812bc932a0b603cc94be2007d4b3b17af06b4fb30caf0e574d5abcfc5654079e65154679afad75844396082a7200a4e82462aeed", hex.EncodeToString(verifyData))
|
||||
|
||||
chunk8 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3, trace4}}
|
||||
chunk9 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
|
||||
@@ -670,7 +670,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "03523cd88a7227826e093305cbe4ce237e8df38e2157566fb3742cc39dbc9c4330b3863672052b3d6c6552d121b0b13f97659f49bbfb6d7fed6e4b7076e4a43383bee97f95fbf2d789a8e0fb365c26e141d6a31e43403b4a469d1723128f6d5de5c54e913e143feede32d0af9b6fd6fdae9cb71d402cfe8bc4d659f228c41f0b9d195c5074278a2346204cfaa336f5de2244a3d53e0effa2f49c81924720e84e", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "0b14dce4abfdeb3a69a341f7db6b1e16162c20826e6d964a829e20f671030cab35b73ddb4a78fc4a8540f1d8259512c46e606a701e7ef7742e38cc4562ef53b983bee97f95fbf2d789a8e0fb365c26e141d6a31e43403b4a469d1723128f6d5de5c54e913e143feede32d0af9b6fd6fda28e5610ca6b185d6ac30b53bd83d6366fccb1956daafa90ff6b504a966b119ebb45cb3f7085b7c1d622ee1ad27fcff9", hex.EncodeToString(verifyData))
|
||||
}
|
||||
|
||||
func TestCodecV1BatchSkipBitmap(t *testing.T) {
|
||||
|
||||
91
common/types/message/auth_msg.go
Normal file
91
common/types/message/auth_msg.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type AuthMsg struct {
|
||||
// Message fields
|
||||
Identity *Identity `json:"message"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// Identity contains all the fields to be signed by the prover.
|
||||
type Identity struct {
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
// HardForkName the hard fork name
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *AuthMsg) Verify() (bool, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *AuthMsg) PublicKey() (string, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *Identity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
89
common/types/message/legacy_auth_msg.go
Normal file
89
common/types/message/legacy_auth_msg.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// LegacyAuthMsg is the old auth message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type LegacyAuthMsg struct {
|
||||
// Message fields
|
||||
Identity *LegacyIdentity `json:"message"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// LegacyIdentity contains all the fields to be signed by the prover.
|
||||
type LegacyIdentity struct {
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *LegacyAuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *LegacyAuthMsg) Verify() (bool, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *LegacyAuthMsg) PublicKey() (string, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *LegacyIdentity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
@@ -58,26 +58,6 @@ const (
|
||||
ProofTypeBatch
|
||||
)
|
||||
|
||||
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type AuthMsg struct {
|
||||
// Message fields
|
||||
Identity *Identity `json:"message"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// Identity contains all the fields to be signed by the prover.
|
||||
type Identity struct {
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
}
|
||||
|
||||
// GenerateToken generates token
|
||||
func GenerateToken() (string, error) {
|
||||
b := make([]byte, 16)
|
||||
@@ -87,65 +67,6 @@ func GenerateToken() (string, error) {
|
||||
return hex.EncodeToString(b), nil
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *AuthMsg) Verify() (bool, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *AuthMsg) PublicKey() (string, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *Identity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
|
||||
// ProofMsg is the data structure sent to the coordinator.
|
||||
type ProofMsg struct {
|
||||
*ProofDetail `json:"zkProof"`
|
||||
|
||||
@@ -54,7 +54,7 @@ func TestIdentityHash(t *testing.T) {
|
||||
hash, err := identity.Hash()
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedHash := "83f5e0ad023e9c1de639ab07b9b4cb972ec9dbbd2524794c533a420a5b137721"
|
||||
expectedHash := "9b8b00f5655411ec1d68ba1666261281c5414aedbda932e5b6a9f7f1b114fdf2"
|
||||
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.84"
|
||||
var tag = "v4.3.92"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"license": "MIT",
|
||||
"scripts": {
|
||||
"test:hardhat": "npx hardhat test",
|
||||
"test:forge": "forge test -vvv",
|
||||
"test:forge": "forge test -vvv --evm-version cancun",
|
||||
"test": "yarn test:hardhat && yarn test:forge",
|
||||
"solhint": "./node_modules/.bin/solhint -f table 'src/**/*.sol'",
|
||||
"lint:sol": "./node_modules/.bin/prettier --write 'src/**/*.sol'",
|
||||
|
||||
23
contracts/scripts/foundry/DeployEcc.s.sol
Normal file
23
contracts/scripts/foundry/DeployEcc.s.sol
Normal file
@@ -0,0 +1,23 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity =0.8.24;
|
||||
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
import {console} from "forge-std/console.sol";
|
||||
|
||||
import {Ecc} from "../../src/misc/ecc.sol";
|
||||
|
||||
contract DeployEcc is Script {
|
||||
function run() external {
|
||||
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
|
||||
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
|
||||
Ecc ecc = new Ecc();
|
||||
address L2_ECC_ADDR = address(ecc);
|
||||
vm.stopBroadcast();
|
||||
|
||||
logAddress("L2_ECC_ADDR", L2_ECC_ADDR);
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
}
|
||||
23
contracts/scripts/foundry/DeployHash.s.sol
Normal file
23
contracts/scripts/foundry/DeployHash.s.sol
Normal file
@@ -0,0 +1,23 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity =0.8.24;
|
||||
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
import {console} from "forge-std/console.sol";
|
||||
|
||||
import {Hash} from "../../src/misc/hash.sol";
|
||||
|
||||
contract DeployHash is Script {
|
||||
function run() external {
|
||||
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
|
||||
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
|
||||
Hash hash = new Hash();
|
||||
address L2_HASH_ADDR = address(hash);
|
||||
vm.stopBroadcast();
|
||||
|
||||
logAddress("L2_HASH_ADDR", L2_HASH_ADDR);
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
}
|
||||
@@ -92,10 +92,12 @@ contract DeployL1BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployMultipleVersionRollupVerifier() internal {
|
||||
uint256[] memory _versions = new uint256[](1);
|
||||
address[] memory _verifiers = new address[](1);
|
||||
uint256[] memory _versions = new uint256[](2);
|
||||
address[] memory _verifiers = new address[](2);
|
||||
_versions[0] = 0;
|
||||
_verifiers[0] = address(zkEvmVerifierV1);
|
||||
_versions[1] = 1;
|
||||
_verifiers[1] = address(zkEvmVerifierV1);
|
||||
rollupVerifier = new MultipleVersionRollupVerifier(L1_SCROLL_CHAIN_PROXY_ADDR, _versions, _verifiers);
|
||||
|
||||
logAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR", address(rollupVerifier));
|
||||
|
||||
34
contracts/scripts/relay-skipped-tx.ts
Normal file
34
contracts/scripts/relay-skipped-tx.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
/* eslint-disable node/no-missing-import */
|
||||
import * as dotenv from "dotenv";
|
||||
|
||||
import { ethers } from "hardhat";
|
||||
|
||||
dotenv.config();
|
||||
|
||||
async function main() {
|
||||
const [deployer] = await ethers.getSigners();
|
||||
|
||||
const l1ScrollMessengerAddress = process.env.L1_SCROLL_MESSENGER_PROXY_ADDR!;
|
||||
const l2EccContractAddress = process.env.L2_ECC_ADDR!;
|
||||
const payload = process.env.SKIPPED_TX_PAYLOAD!; // TODO: calc the payload, parse as bytes
|
||||
|
||||
const L1ScrollMessenger = await ethers.getContractAt("L1ScrollMessenger", l1ScrollMessengerAddress, deployer);
|
||||
|
||||
const tx = await L1ScrollMessenger.sendMessage(
|
||||
l2EccContractAddress, // address _to
|
||||
0, // uint256 _value
|
||||
payload, // bytes memory _message
|
||||
100000000 // uint256 _gasLimit
|
||||
);
|
||||
|
||||
console.log(`calling ${l2EccContractAddress} with payload from l1, hash:`, tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
}
|
||||
|
||||
// We recommend this pattern to be able to use async/await everywhere
|
||||
// and properly handle errors.
|
||||
main().catch((error) => {
|
||||
console.error(error);
|
||||
process.exitCode = 1;
|
||||
});
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
pragma solidity ^0.8.24;
|
||||
|
||||
/// @title IScrollChain
|
||||
/// @notice The interface for ScrollChain.
|
||||
interface IScrollChain {
|
||||
/**********
|
||||
* Events *
|
||||
@@ -43,23 +45,23 @@ interface IScrollChain {
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice The latest finalized batch index.
|
||||
/// @return The latest finalized batch index.
|
||||
function lastFinalizedBatchIndex() external view returns (uint256);
|
||||
|
||||
/// @notice Return the batch hash of a committed batch.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return The batch hash of a committed batch.
|
||||
function committedBatches(uint256 batchIndex) external view returns (bytes32);
|
||||
|
||||
/// @notice Return the state root of a committed batch.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return The state root of a committed batch.
|
||||
function finalizedStateRoots(uint256 batchIndex) external view returns (bytes32);
|
||||
|
||||
/// @notice Return the message root of a committed batch.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return The message root of a committed batch.
|
||||
function withdrawRoots(uint256 batchIndex) external view returns (bytes32);
|
||||
|
||||
/// @notice Return whether the batch is finalized by batch index.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return Whether the batch is finalized by batch index.
|
||||
function isBatchFinalized(uint256 batchIndex) external view returns (bool);
|
||||
|
||||
/*****************************
|
||||
|
||||
@@ -8,6 +8,8 @@ import {IScrollChain} from "./IScrollChain.sol";
|
||||
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
|
||||
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
|
||||
|
||||
/// @title MultipleVersionRollupVerifier
|
||||
/// @notice Verifies aggregate zk proofs using the appropriate verifier.
|
||||
contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/**********
|
||||
* Events *
|
||||
@@ -37,7 +39,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
*************/
|
||||
|
||||
/// @notice The address of ScrollChain contract.
|
||||
address immutable scrollChain;
|
||||
address public immutable scrollChain;
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
@@ -58,7 +60,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/// The verifiers are sorted by batchIndex in increasing order.
|
||||
mapping(uint256 => Verifier[]) public legacyVerifiers;
|
||||
|
||||
/// @notice Mapping from verifier version to the lastest used zkevm verifier.
|
||||
/// @notice Mapping from verifier version to the latest used zkevm verifier.
|
||||
mapping(uint256 => Verifier) public latestVerifier;
|
||||
|
||||
/***************
|
||||
@@ -86,6 +88,8 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
*************************/
|
||||
|
||||
/// @notice Return the number of legacy verifiers.
|
||||
/// @param _version The version of legacy verifiers.
|
||||
/// @return The number of legacy verifiers.
|
||||
function legacyVerifiersLength(uint256 _version) external view returns (uint256) {
|
||||
return legacyVerifiers[_version].length;
|
||||
}
|
||||
@@ -93,6 +97,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/// @notice Compute the verifier should be used for specific batch.
|
||||
/// @param _version The version of verifier to query.
|
||||
/// @param _batchIndex The batch index to query.
|
||||
/// @return The address of verifier.
|
||||
function getVerifier(uint256 _version, uint256 _batchIndex) public view returns (address) {
|
||||
// Normally, we will use the latest verifier.
|
||||
Verifier memory _verifier = latestVerifier[_version];
|
||||
@@ -144,6 +149,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
************************/
|
||||
|
||||
/// @notice Update the address of zkevm verifier.
|
||||
/// @param _version The version of the verifier.
|
||||
/// @param _startBatchIndex The start batch index when the verifier will be used.
|
||||
/// @param _verifier The address of new verifier.
|
||||
function updateVerifier(
|
||||
|
||||
@@ -115,11 +115,11 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
*************/
|
||||
|
||||
/// @dev Address of the point evaluation precompile used for EIP-4844 blob verification.
|
||||
address constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
|
||||
address private constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
|
||||
|
||||
/// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
|
||||
/// point evaluation precompile
|
||||
uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
uint256 private constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
|
||||
/// @notice The chain id of the corresponding layer 2 chain.
|
||||
uint64 public immutable layer2ChainId;
|
||||
@@ -236,6 +236,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
*****************************/
|
||||
|
||||
/// @notice Import layer 2 genesis block
|
||||
/// @param _batchHeader The header of the genesis batch.
|
||||
/// @param _stateRoot The state root of the genesis block.
|
||||
function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external {
|
||||
// check genesis batch header length
|
||||
if (_stateRoot == bytes32(0)) revert ErrorStateRootIsZero();
|
||||
@@ -475,7 +477,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
_postStateRoot,
|
||||
_withdrawRoot,
|
||||
_dataHash,
|
||||
_blobDataProof[0:64]
|
||||
_blobDataProof[0:64],
|
||||
_blobVersionedHash
|
||||
)
|
||||
);
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
pragma solidity ^0.8.24;
|
||||
|
||||
/// @title IRollupVerifier
|
||||
/// @notice The interface for rollup verifier.
|
||||
interface IRollupVerifier {
|
||||
/// @notice Verify aggregate zk proof.
|
||||
/// @param batchIndex The batch index to verify.
|
||||
|
||||
@@ -199,7 +199,7 @@ library PatriciaMerkleTrieVerifier {
|
||||
}
|
||||
|
||||
// decodes all RLP encoded data and stores their DATA items
|
||||
// [length - 128 bits | calldata offset - 128 bits] in a continous memory region.
|
||||
// [length - 128 bits | calldata offset - 128 bits] in a continuous memory region.
|
||||
// Expects that the RLP starts with a list that defines the length
|
||||
// of the whole RLP region.
|
||||
function decodeFlat(_ptr) -> ptr, memStart, nItems, hash {
|
||||
@@ -505,7 +505,7 @@ library PatriciaMerkleTrieVerifier {
|
||||
}
|
||||
|
||||
// the one and only boundary check
|
||||
// in case an attacker crafted a malicous payload
|
||||
// in case an attacker crafted a malicious payload
|
||||
// and succeeds in the prior verification steps
|
||||
// then this should catch any bogus accesses
|
||||
if iszero(eq(ptr, add(proof.offset, proof.length))) {
|
||||
|
||||
127
contracts/src/misc/ecc.sol
Normal file
127
contracts/src/misc/ecc.sol
Normal file
@@ -0,0 +1,127 @@
|
||||
// SPDX-License-Identifier: GPL-3.0
|
||||
|
||||
pragma solidity =0.8.24;
|
||||
|
||||
contract Ecc {
|
||||
/* ECC Functions */
|
||||
// https://etherscan.io/address/0x41bf00f080ed41fa86201eac56b8afb170d9e36d#code
|
||||
function ecAdd(uint256[2] memory p0, uint256[2] memory p1) public view returns (uint256[2] memory retP) {
|
||||
uint256[4] memory i = [p0[0], p0[1], p1[0], p1[1]];
|
||||
|
||||
assembly {
|
||||
// call ecadd precompile
|
||||
// inputs are: x1, y1, x2, y2
|
||||
if iszero(staticcall(not(0), 0x06, i, 0x80, retP, 0x40)) {
|
||||
revert(0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// https://etherscan.io/address/0x41bf00f080ed41fa86201eac56b8afb170d9e36d#code
|
||||
function ecMul(uint256[2] memory p, uint256 s) public view returns (uint256[2] memory retP) {
|
||||
// With a public key (x, y), this computes p = scalar * (x, y).
|
||||
uint256[3] memory i = [p[0], p[1], s];
|
||||
|
||||
assembly {
|
||||
// call ecmul precompile
|
||||
// inputs are: x, y, scalar
|
||||
if iszero(staticcall(not(0), 0x07, i, 0x60, retP, 0x40)) {
|
||||
revert(0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scroll-tech/scroll/contracts/src/libraries/verifier/RollupVerifier.sol
|
||||
struct G1Point {
|
||||
uint256 x;
|
||||
uint256 y;
|
||||
}
|
||||
struct G2Point {
|
||||
uint256[2] x;
|
||||
uint256[2] y;
|
||||
}
|
||||
|
||||
function ecPairing(G1Point[] memory p1, G2Point[] memory p2) internal view returns (bool) {
|
||||
uint256 length = p1.length * 6;
|
||||
uint256[] memory input = new uint256[](length);
|
||||
uint256[1] memory result;
|
||||
bool ret;
|
||||
|
||||
require(p1.length == p2.length);
|
||||
|
||||
for (uint256 i = 0; i < p1.length; i++) {
|
||||
input[0 + i * 6] = p1[i].x;
|
||||
input[1 + i * 6] = p1[i].y;
|
||||
input[2 + i * 6] = p2[i].x[0];
|
||||
input[3 + i * 6] = p2[i].x[1];
|
||||
input[4 + i * 6] = p2[i].y[0];
|
||||
input[5 + i * 6] = p2[i].y[1];
|
||||
}
|
||||
|
||||
assembly {
|
||||
ret := staticcall(gas(), 8, add(input, 0x20), mul(length, 0x20), result, 0x20)
|
||||
}
|
||||
require(ret);
|
||||
return result[0] != 0;
|
||||
}
|
||||
|
||||
/* Bench */
|
||||
function ecAdds(uint256 n) public {
|
||||
uint256[2] memory p0;
|
||||
p0[0] = 1;
|
||||
p0[1] = 2;
|
||||
uint256[2] memory p1;
|
||||
p1[0] = 1;
|
||||
p1[1] = 2;
|
||||
|
||||
for (uint256 i = 0; i < n; i++) {
|
||||
ecAdd(p0, p1);
|
||||
}
|
||||
}
|
||||
|
||||
function ecMuls(uint256 n) public {
|
||||
uint256[2] memory p0;
|
||||
p0[0] = 1;
|
||||
p0[1] = 2;
|
||||
|
||||
for (uint256 i = 0; i < n; i++) {
|
||||
ecMul(p0, 3);
|
||||
}
|
||||
}
|
||||
|
||||
function ecPairings(uint256 n) public {
|
||||
G1Point[] memory g1_points = new G1Point[](2);
|
||||
G2Point[] memory g2_points = new G2Point[](2);
|
||||
g1_points[0].x = 0x0000000000000000000000000000000000000000000000000000000000000001;
|
||||
g1_points[0].y = 0x0000000000000000000000000000000000000000000000000000000000000002;
|
||||
g2_points[0].x[1] = 0x1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed;
|
||||
g2_points[0].x[0] = 0x198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c2;
|
||||
g2_points[0].y[1] = 0x12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa;
|
||||
g2_points[0].y[0] = 0x090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b;
|
||||
g1_points[1].x = 0x1aa125a22bd902874034e67868aed40267e5575d5919677987e3bc6dd42a32fe;
|
||||
g1_points[1].y = 0x1bacc186725464068956d9a191455c2d6f6db282d83645c610510d8d4efbaee0;
|
||||
g2_points[1].x[1] = 0x1b7734c80605f71f1e2de61e998ce5854ff2abebb76537c3d67e50d71422a852;
|
||||
g2_points[1].x[0] = 0x10d5a1e34b2388a5ebe266033a5e0e63c89084203784da0c6bd9b052a78a2cac;
|
||||
g2_points[1].y[1] = 0x275739c5c2cdbc72e37c689e2ab441ea76c1d284b9c46ae8f5c42ead937819e1;
|
||||
g2_points[1].y[0] = 0x018de34c5b7c3d3d75428bbe050f1449ea3d9961d563291f307a1874f7332e65;
|
||||
|
||||
for (uint256 i = 0; i < n; i++) {
|
||||
ecPairing(g1_points, g2_points);
|
||||
// bool checked = false;
|
||||
// checked = ecPairing(g1_points, g2_points);
|
||||
// require(checked);
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/OpenZeppelin/openzeppelin-contracts/blob/8a0b7bed82d6b8053872c3fd40703efd58f5699d/test/utils/cryptography/ECDSA.test.js#L230
|
||||
function ecRecovers(uint256 n) public {
|
||||
bytes32 hash = 0xb94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9;
|
||||
bytes32 r = 0xe742ff452d41413616a5bf43fe15dd88294e983d3d36206c2712f39083d638bd;
|
||||
uint8 v = 0x1b;
|
||||
bytes32 s = 0xe0a0fc89be718fbc1033e1d30d78be1c68081562ed2e97af876f286f3453231d;
|
||||
|
||||
for (uint256 i = 0; i < n; i++) {
|
||||
ecrecover(hash, v, r, s);
|
||||
}
|
||||
}
|
||||
}
|
||||
34
contracts/src/misc/hash.sol
Normal file
34
contracts/src/misc/hash.sol
Normal file
@@ -0,0 +1,34 @@
|
||||
// SPDX-License-Identifier: GPL-3.0
|
||||
|
||||
pragma solidity =0.8.24;
|
||||
|
||||
contract Hash {
|
||||
function sha256(bytes memory input) public view returns (bytes memory out) {
|
||||
(bool ok, bytes memory out) = address(2).staticcall(input);
|
||||
require(ok);
|
||||
}
|
||||
|
||||
function sha256Yul(bytes memory input) public view returns (bytes memory out) {
|
||||
assembly {
|
||||
// mstore(0, input)
|
||||
if iszero(staticcall(gas(), 2, 0, 32, 0, 32)) {
|
||||
revert(0, 0)
|
||||
}
|
||||
// return(0, 32)
|
||||
}
|
||||
}
|
||||
|
||||
function sha256s(bytes memory input, uint256 n) public {
|
||||
for (uint256 i = 0; i < n; i++) {
|
||||
sha256(input);
|
||||
}
|
||||
}
|
||||
|
||||
function keccak256s(uint256 n) public {
|
||||
bytes32[] memory output = new bytes32[](n);
|
||||
for (uint256 i = 0; i < n; i++) {
|
||||
bytes memory input = abi.encode(i);
|
||||
output[i] = keccak256(input);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -83,6 +83,8 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
|
||||
}
|
||||
|
||||
function testTransferUSDCRoles(address owner) external {
|
||||
hevm.assume(owner != address(0));
|
||||
|
||||
// non-whitelisted caller call, should revert
|
||||
hevm.expectRevert("only circle caller");
|
||||
gateway.transferUSDCRoles(owner);
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"fork_name": "bernoulli",
|
||||
"mock_mode": true,
|
||||
"params_path": "",
|
||||
"assets_path": ""
|
||||
|
||||
@@ -50,6 +50,7 @@ type Config struct {
|
||||
|
||||
// VerifierConfig load zk verifier config.
|
||||
type VerifierConfig struct {
|
||||
ForkName string `json:"fork_name"`
|
||||
MockMode bool `json:"mock_mode"`
|
||||
ParamsPath string `json:"params_path"`
|
||||
AssetsPath string `json:"assets_path"`
|
||||
|
||||
@@ -53,25 +53,44 @@ func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
// recover the public key
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
var publicKey string
|
||||
var err error
|
||||
if v.Message.HardForkName != "" {
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
HardForkName: v.Message.HardForkName,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
}
|
||||
publicKey, err = authMsg.PublicKey()
|
||||
} else {
|
||||
authMsg := message.LegacyAuthMsg{
|
||||
Identity: &message.LegacyIdentity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
}
|
||||
publicKey, err = authMsg.PublicKey()
|
||||
}
|
||||
|
||||
publicKey, err := authMsg.PublicKey()
|
||||
if err != nil {
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
if v.Message.HardForkName == "" {
|
||||
v.Message.HardForkName = "shanghai"
|
||||
}
|
||||
|
||||
return jwt.MapClaims{
|
||||
types.PublicKey: publicKey,
|
||||
types.ProverName: v.Message.ProverName,
|
||||
types.ProverVersion: v.Message.ProverVersion,
|
||||
types.HardForkName: v.Message.HardForkName,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,5 +108,9 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
if proverVersion, ok := claims[types.ProverVersion]; ok {
|
||||
c.Set(types.ProverVersion, proverVersion)
|
||||
}
|
||||
|
||||
if hardForkName, ok := claims[types.HardForkName]; ok {
|
||||
c.Set(types.HardForkName, hardForkName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package api
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -25,6 +26,8 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
|
||||
panic("proof receiver new verifier failure")
|
||||
}
|
||||
|
||||
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap)
|
||||
|
||||
Auth = NewAuthController(db)
|
||||
GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg)
|
||||
SubmitProof = NewSubmitProofController(cfg, db, vf, reg)
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -21,15 +23,21 @@ import (
|
||||
// GetTaskController the get prover task api controller
|
||||
type GetTaskController struct {
|
||||
proverTasks map[message.ProofType]provertask.ProverTask
|
||||
|
||||
getTaskAccessCounter *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewGetTaskController create a get prover task controller
|
||||
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
|
||||
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVK, reg)
|
||||
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVK, reg)
|
||||
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVKMap, reg)
|
||||
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVKMap, reg)
|
||||
|
||||
ptc := &GetTaskController{
|
||||
proverTasks: make(map[message.ProofType]provertask.ProverTask),
|
||||
getTaskAccessCounter: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_get_task_access_count",
|
||||
Help: "Multi dimensions get task counter.",
|
||||
}, []string{coordinatorType.LabelProverName, coordinatorType.LabelProverPublicKey, coordinatorType.LabelProverVersion}),
|
||||
}
|
||||
|
||||
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
|
||||
@@ -38,6 +46,28 @@ func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *
|
||||
return ptc
|
||||
}
|
||||
|
||||
func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error {
|
||||
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
|
||||
if !publicKeyExist {
|
||||
return fmt.Errorf("get public key from context failed")
|
||||
}
|
||||
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
|
||||
if !proverNameExist {
|
||||
return fmt.Errorf("get prover name from context failed")
|
||||
}
|
||||
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
|
||||
if !proverVersionExist {
|
||||
return fmt.Errorf("get prover version from context failed")
|
||||
}
|
||||
|
||||
ptc.getTaskAccessCounter.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverPublicKey: publicKey.(string),
|
||||
coordinatorType.LabelProverName: proverName.(string),
|
||||
coordinatorType.LabelProverVersion: proverVersion.(string),
|
||||
}).Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTasks get assigned chunk/batch task
|
||||
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
var getTaskParameter coordinatorType.GetTaskParameter
|
||||
@@ -55,6 +85,10 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := ptc.incGetTaskAccessCounter(ctx); err != nil {
|
||||
log.Warn("get_task access counter inc failed", "error", err.Error())
|
||||
}
|
||||
|
||||
result, err := proverTask.Assign(ctx, &getTaskParameter)
|
||||
if err != nil {
|
||||
nerr := fmt.Errorf("return prover task err:%w", err)
|
||||
|
||||
@@ -31,16 +31,17 @@ type BatchProverTask struct {
|
||||
|
||||
batchAttemptsExceedTotal prometheus.Counter
|
||||
batchTaskGetTaskTotal *prometheus.CounterVec
|
||||
batchTaskGetTaskProver *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewBatchProverTask new a batch collector
|
||||
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *BatchProverTask {
|
||||
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *BatchProverTask {
|
||||
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
|
||||
log.Info("new batch prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
|
||||
|
||||
bp := &BatchProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
vk: vk,
|
||||
vkMap: vkMap,
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
nameForkMap: nameForkMap,
|
||||
@@ -58,6 +59,7 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
Name: "coordinator_batch_get_task_total",
|
||||
Help: "Total number of batch get task.",
|
||||
}, []string{"fork_name"}),
|
||||
batchTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "batch"),
|
||||
}
|
||||
return bp
|
||||
}
|
||||
@@ -69,9 +71,9 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
hardForkNumber, err := bp.getHardForkNumberByName(getTaskParameter.HardForkName)
|
||||
hardForkNumber, err := bp.getHardForkNumberByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName)
|
||||
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -83,7 +85,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
if fromBlockNum != 0 {
|
||||
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, fromBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork start chunk index", "forkName", getTaskParameter.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
|
||||
log.Error("failed to get fork start chunk index", "forkName", taskCtx.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if startChunk == nil {
|
||||
@@ -93,8 +95,8 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
if toBlockNum != math.MaxInt64 {
|
||||
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, toBlockNum)
|
||||
if err != nil {
|
||||
log.Error("failed to get fork end chunk index", "forkName", getTaskParameter.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork end chunk index", "forkName", taskCtx.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if toChunk != nil {
|
||||
@@ -179,7 +181,12 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
bp.batchTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc()
|
||||
bp.batchTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
|
||||
bp.batchTaskGetTaskProver.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverName: proverTask.ProverName,
|
||||
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
|
||||
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
|
||||
}).Inc()
|
||||
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
@@ -29,15 +29,16 @@ type ChunkProverTask struct {
|
||||
|
||||
chunkAttemptsExceedTotal prometheus.Counter
|
||||
chunkTaskGetTaskTotal *prometheus.CounterVec
|
||||
chunkTaskGetTaskProver *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewChunkProverTask new a chunk prover task
|
||||
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *ChunkProverTask {
|
||||
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *ChunkProverTask {
|
||||
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
|
||||
log.Info("new chunk prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
|
||||
cp := &ChunkProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
vk: vk,
|
||||
vkMap: vkMap,
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
nameForkMap: nameForkMap,
|
||||
@@ -55,6 +56,7 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
Name: "coordinator_chunk_get_task_total",
|
||||
Help: "Total number of chunk get task.",
|
||||
}, []string{"fork_name"}),
|
||||
chunkTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "chunk"),
|
||||
}
|
||||
return cp
|
||||
}
|
||||
@@ -66,9 +68,9 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
hardForkNumber, err := cp.getHardForkNumberByName(getTaskParameter.HardForkName)
|
||||
hardForkNumber, err := cp.getHardForkNumberByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName)
|
||||
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -151,7 +153,12 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
cp.chunkTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc()
|
||||
cp.chunkTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
|
||||
cp.chunkTaskGetTaskProver.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverName: proverTask.ProverName,
|
||||
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
|
||||
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
|
||||
}).Inc()
|
||||
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
@@ -2,8 +2,12 @@ package provertask
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/version"
|
||||
@@ -13,11 +17,12 @@ import (
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// ErrCoordinatorInternalFailure coordinator internal db failure
|
||||
var ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
|
||||
|
||||
// ErrHardForkName indicates client request with the wrong hard fork name
|
||||
var ErrHardForkName = fmt.Errorf("wrong hard fork name")
|
||||
var (
|
||||
// ErrCoordinatorInternalFailure coordinator internal db failure
|
||||
ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
|
||||
// ErrHardForkName indicates client request with the wrong hard fork name
|
||||
ErrHardForkName = fmt.Errorf("wrong hard fork name")
|
||||
)
|
||||
|
||||
// ProverTask the interface of a collector who send data to prover
|
||||
type ProverTask interface {
|
||||
@@ -28,8 +33,8 @@ type ProverTask interface {
|
||||
type BaseProverTask struct {
|
||||
cfg *config.Config
|
||||
db *gorm.DB
|
||||
vk string
|
||||
|
||||
vkMap map[string]string
|
||||
nameForkMap map[string]uint64
|
||||
forkHeights []uint64
|
||||
|
||||
@@ -44,6 +49,7 @@ type proverTaskContext struct {
|
||||
PublicKey string
|
||||
ProverName string
|
||||
ProverVersion string
|
||||
HardForkName string
|
||||
}
|
||||
|
||||
// checkParameter check the prover task parameter illegal
|
||||
@@ -68,12 +74,24 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
|
||||
}
|
||||
ptc.ProverVersion = proverVersion.(string)
|
||||
|
||||
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
|
||||
if !hardForkNameExist {
|
||||
return nil, fmt.Errorf("get hard fork name from context failed")
|
||||
}
|
||||
ptc.HardForkName = hardForkName.(string)
|
||||
|
||||
if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string))
|
||||
}
|
||||
|
||||
vk, vkExist := b.vkMap[ptc.HardForkName]
|
||||
if !vkExist {
|
||||
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
|
||||
}
|
||||
|
||||
// if the prover has a different vk
|
||||
if getTaskParameter.VK != b.vk {
|
||||
if getTaskParameter.VK != vk {
|
||||
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
|
||||
// if the prover reports a different prover version
|
||||
if !version.CheckScrollProverVersion(proverVersion.(string)) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
|
||||
@@ -115,3 +133,22 @@ func (b *BaseProverTask) getHardForkNumberByName(forkName string) (uint64, error
|
||||
|
||||
return hardForkNumber, nil
|
||||
}
|
||||
|
||||
var (
|
||||
getTaskCounterInitOnce sync.Once
|
||||
getTaskCounterVec *prometheus.CounterVec = nil
|
||||
)
|
||||
|
||||
func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec {
|
||||
getTaskCounterInitOnce.Do(func() {
|
||||
getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_get_task_count",
|
||||
Help: "Multi dimensions get task counter.",
|
||||
}, []string{"task_type",
|
||||
coordinatorType.LabelProverName,
|
||||
coordinatorType.LabelProverPublicKey,
|
||||
coordinatorType.LabelProverVersion})
|
||||
})
|
||||
|
||||
return getTaskCounterVec.MustCurryWith(prometheus.Labels{"task_type": taskType})
|
||||
}
|
||||
|
||||
@@ -134,6 +134,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
if len(pv) == 0 {
|
||||
return fmt.Errorf("get ProverVersion from context failed")
|
||||
}
|
||||
hardForkName := ctx.GetString(coordinatorType.HardForkName)
|
||||
|
||||
var proverTask *orm.ProverTask
|
||||
var err error
|
||||
@@ -156,20 +157,19 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName,
|
||||
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec)
|
||||
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec, "hardForkName", hardForkName)
|
||||
|
||||
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter); err != nil {
|
||||
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter, hardForkName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.verifierTotal.WithLabelValues(pv).Inc()
|
||||
|
||||
var success bool
|
||||
success := true
|
||||
var verifyErr error
|
||||
if proofMsg.Type == message.ProofTypeChunk {
|
||||
success, verifyErr = m.verifier.VerifyChunkProof(proofMsg.ChunkProof)
|
||||
} else if proofMsg.Type == message.ProofTypeBatch {
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof)
|
||||
// only verify batch proof. chunk proof verifier have been disabled after Bernoulli
|
||||
if proofMsg.Type == message.ProofTypeBatch {
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof, hardForkName)
|
||||
}
|
||||
|
||||
if verifyErr != nil || !success {
|
||||
@@ -178,7 +178,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
|
||||
|
||||
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
"prover pk", pk, "forkName", hardForkName, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
|
||||
if verifyErr != nil {
|
||||
return ErrValidatorFailureVerifiedFailed
|
||||
@@ -189,7 +189,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds())
|
||||
|
||||
log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec)
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "forkName", hardForkName)
|
||||
|
||||
if err := m.closeProofTask(ctx, proverTask, proofMsg, proofTimeSec); err != nil {
|
||||
m.proofSubmitFailure.Inc()
|
||||
@@ -221,7 +221,7 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) (err error) {
|
||||
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter, forkName string) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
m.validateFailureTotal.Inc()
|
||||
@@ -240,7 +240,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
"cannot submit valid proof for a prover task twice",
|
||||
"taskType", proverTask.TaskType, "hash", proofMsg.ID,
|
||||
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
|
||||
"proverPublicKey", proverTask.ProverPublicKey,
|
||||
"proverPublicKey", proverTask.ProverPublicKey, "forkName", forkName,
|
||||
)
|
||||
return ErrValidatorFailureProverTaskCannotSubmitTwice
|
||||
}
|
||||
@@ -259,7 +259,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
log.Info("proof generated by prover failed",
|
||||
"taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName,
|
||||
"proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType,
|
||||
"failureMessage", failureMsg)
|
||||
"failureMessage", failureMsg, "forkName", forkName)
|
||||
return ErrValidatorFailureProofMsgStatusNotOk
|
||||
}
|
||||
|
||||
@@ -267,13 +267,13 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
|
||||
m.validateFailureProverTaskTimeout.Inc()
|
||||
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "taskType", proverTask.TaskType,
|
||||
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
|
||||
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec, "forkName", forkName)
|
||||
return ErrValidatorFailureProofTimeout
|
||||
}
|
||||
|
||||
// store the proof to prover task
|
||||
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofMsg); updateTaskProofErr != nil {
|
||||
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk,
|
||||
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk, "forkName", forkName,
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr)
|
||||
}
|
||||
|
||||
@@ -281,7 +281,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) {
|
||||
m.validateFailureProverTaskHaveVerifier.Inc()
|
||||
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID,
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk)
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk, "forkName", forkName)
|
||||
return ErrValidatorFailureTaskHaveVerifiedSuccess
|
||||
}
|
||||
return nil
|
||||
|
||||
BIN
coordinator/internal/logic/verifier/legacy_vk/agg_vk.vkey
Normal file
BIN
coordinator/internal/logic/verifier/legacy_vk/agg_vk.vkey
Normal file
Binary file not shown.
BIN
coordinator/internal/logic/verifier/legacy_vk/chunk_vk.vkey
Normal file
BIN
coordinator/internal/logic/verifier/legacy_vk/chunk_vk.vkey
Normal file
Binary file not shown.
@@ -9,8 +9,26 @@ import (
|
||||
)
|
||||
|
||||
// NewVerifier Sets up a mock verifier.
|
||||
func NewVerifier(_ *config.VerifierConfig) (*Verifier, error) {
|
||||
return &Verifier{}, nil
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
batchVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
chunkVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
batchVKMap[cfg.ForkName] = ""
|
||||
chunkVKMap[cfg.ForkName] = ""
|
||||
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
|
||||
}
|
||||
|
||||
// VerifyChunkProof return a mock verification result for a ChunkProof.
|
||||
@@ -22,7 +40,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
|
||||
}
|
||||
|
||||
// VerifyBatchProof return a mock verification result for a BatchProof.
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ const InvalidTestProof = "this is a invalid proof"
|
||||
|
||||
// Verifier represents a rust ffi to a halo2 verifier.
|
||||
type Verifier struct {
|
||||
cfg *config.VerifierConfig
|
||||
BatchVK string
|
||||
ChunkVK string
|
||||
cfg *config.VerifierConfig
|
||||
ChunkVKMap map[string]string
|
||||
BatchVKMap map[string]string
|
||||
}
|
||||
|
||||
@@ -11,9 +11,11 @@ package verifier
|
||||
import "C" //nolint:typecheck
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"unsafe"
|
||||
@@ -28,7 +30,26 @@ import (
|
||||
// NewVerifier Sets up a rust ffi to call verify.
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
if cfg.MockMode {
|
||||
return &Verifier{cfg: cfg}, nil
|
||||
batchVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
chunkVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
|
||||
batchVKMap[cfg.ForkName] = ""
|
||||
chunkVKMap[cfg.ForkName] = ""
|
||||
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
|
||||
}
|
||||
paramsPathStr := C.CString(cfg.ParamsPath)
|
||||
assetsPathStr := C.CString(cfg.AssetsPath)
|
||||
@@ -40,25 +61,31 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
C.init_batch_verifier(paramsPathStr, assetsPathStr)
|
||||
C.init_chunk_verifier(paramsPathStr, assetsPathStr)
|
||||
|
||||
batchVK, err := readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
|
||||
v := &Verifier{
|
||||
cfg: cfg,
|
||||
ChunkVKMap: make(map[string]string),
|
||||
BatchVKMap: make(map[string]string),
|
||||
}
|
||||
|
||||
batchVK, err := v.readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
chunkVK, err := readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
|
||||
chunkVK, err := v.readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.BatchVKMap[cfg.ForkName] = batchVK
|
||||
v.ChunkVKMap[cfg.ForkName] = chunkVK
|
||||
|
||||
return &Verifier{
|
||||
cfg: cfg,
|
||||
BatchVK: batchVK,
|
||||
ChunkVK: chunkVK,
|
||||
}, nil
|
||||
if err := v.loadEmbedVK(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, batch verifier disabled")
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
@@ -72,13 +99,15 @@ func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
log.Info("Start to verify batch proof", "forkName", forkName)
|
||||
proofStr := C.CString(string(buf))
|
||||
forkNameStr := C.CString(forkName)
|
||||
defer func() {
|
||||
C.free(unsafe.Pointer(proofStr))
|
||||
C.free(unsafe.Pointer(forkNameStr))
|
||||
}()
|
||||
|
||||
log.Info("Start to verify batch proof ...")
|
||||
verified := C.verify_batch_proof(proofStr)
|
||||
verified := C.verify_batch_proof(proofStr, forkNameStr)
|
||||
return verified != 0, nil
|
||||
}
|
||||
|
||||
@@ -107,7 +136,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
|
||||
return verified != 0, nil
|
||||
}
|
||||
|
||||
func readVK(filePat string) (string, error) {
|
||||
func (v *Verifier) readVK(filePat string) (string, error) {
|
||||
f, err := os.Open(filePat)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -118,3 +147,26 @@ func readVK(filePat string) (string, error) {
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(byt), nil
|
||||
}
|
||||
|
||||
//go:embed legacy_vk/*
|
||||
var legacyVKFS embed.FS
|
||||
|
||||
func (v *Verifier) loadEmbedVK() error {
|
||||
batchVKBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/agg_vk.vkey")
|
||||
if err != nil {
|
||||
log.Error("load embed batch vk failure", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
chunkVkBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/chunk_vk.vkey")
|
||||
if err != nil {
|
||||
log.Error("load embed chunk vk failure", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
v.BatchVKMap["shanghai"] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap["shanghai"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
v.BatchVKMap[""] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap[""] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -34,7 +33,7 @@ func TestFFI(t *testing.T) {
|
||||
AssetsPath: *assetsPath,
|
||||
}
|
||||
|
||||
v, err := verifier.NewVerifier(cfg)
|
||||
v, err := NewVerifier(cfg)
|
||||
as.NoError(err)
|
||||
|
||||
chunkProof1 := readChunkProof(*chunkProofPath1, as)
|
||||
@@ -50,7 +49,7 @@ func TestFFI(t *testing.T) {
|
||||
t.Log("Verified chunk proof 2")
|
||||
|
||||
batchProof := readBatchProof(*batchProofPath, as)
|
||||
batchOk, err := v.VerifyBatchProof(batchProof)
|
||||
batchOk, err := v.VerifyBatchProof(batchProof, "bernoulli")
|
||||
as.NoError(err)
|
||||
as.True(batchOk)
|
||||
t.Log("Verified batch proof")
|
||||
|
||||
@@ -24,6 +24,7 @@ type Batch struct {
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
@@ -54,6 +55,10 @@ type Batch struct {
|
||||
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
@@ -248,6 +253,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: daBatch.Hash().Hex(),
|
||||
DataHash: daBatch.DataHash.Hex(),
|
||||
StartChunkHash: startDAChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endDAChunkHash.Hex(),
|
||||
@@ -262,6 +268,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
ActiveAttempts: 0,
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -48,6 +48,10 @@ type Chunk struct {
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
|
||||
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
|
||||
@@ -300,6 +304,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
TotalAttempts: 0,
|
||||
ActiveAttempts: 0,
|
||||
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -9,6 +9,8 @@ const (
|
||||
ProverName = "prover_name"
|
||||
// ProverVersion the prover version for context
|
||||
ProverVersion = "prover_version"
|
||||
// HardForkName the fork name for context
|
||||
HardForkName = "hard_fork_name"
|
||||
)
|
||||
|
||||
// Message the login message struct
|
||||
@@ -16,6 +18,7 @@ type Message struct {
|
||||
Challenge string `form:"challenge" json:"challenge" binding:"required"`
|
||||
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
|
||||
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
|
||||
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
|
||||
}
|
||||
|
||||
// LoginParameter for /login api
|
||||
|
||||
@@ -2,7 +2,6 @@ package types
|
||||
|
||||
// GetTaskParameter for ProverTasks request parameter
|
||||
type GetTaskParameter struct {
|
||||
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
|
||||
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
|
||||
TaskType int `form:"task_type" json:"task_type"`
|
||||
VK string `form:"vk" json:"vk"`
|
||||
|
||||
10
coordinator/internal/types/metric.go
Normal file
10
coordinator/internal/types/metric.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package types
|
||||
|
||||
var (
|
||||
// LabelProverName label name for prover name; common label name using in prometheus metrics, same rule applies to below.
|
||||
LabelProverName = "prover_name"
|
||||
// LabelProverPublicKey label name for prover public key
|
||||
LabelProverPublicKey = "prover_pubkey"
|
||||
// LabelProverVersion label name for prover version
|
||||
LabelProverVersion = "prover_version"
|
||||
)
|
||||
@@ -96,8 +96,10 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
ChainID: 111,
|
||||
},
|
||||
ProverManager: &config.ProverManager{
|
||||
ProversPerSession: proversPerSession,
|
||||
Verifier: &config.VerifierConfig{MockMode: true},
|
||||
ProversPerSession: proversPerSession,
|
||||
Verifier: &config.VerifierConfig{
|
||||
MockMode: true,
|
||||
},
|
||||
BatchCollectionTimeSec: 10,
|
||||
ChunkCollectionTimeSec: 10,
|
||||
MaxVerifierWorkers: 10,
|
||||
@@ -113,6 +115,8 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
var chainConf params.ChainConfig
|
||||
for forkName, forkNumber := range nameForkMap {
|
||||
switch forkName {
|
||||
case "shanghai":
|
||||
chainConf.ShanghaiBlock = big.NewInt(forkNumber)
|
||||
case "bernoulli":
|
||||
chainConf.BernoulliBlock = big.NewInt(forkNumber)
|
||||
case "london":
|
||||
@@ -258,12 +262,12 @@ func testGetTaskBlocked(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", chunkProver.publicKey(), chunkProver.proverName, chunkProver.proverVersion)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("get empty prover task")
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
@@ -274,12 +278,12 @@ func testGetTaskBlocked(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedErr = fmt.Errorf("get empty prover task")
|
||||
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", batchProver.publicKey(), batchProver.proverName, batchProver.proverVersion)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
}
|
||||
@@ -299,12 +303,12 @@ func testOutdatedProverVersion(t *testing.T) {
|
||||
assert.True(t, chunkProver.healthCheckSuccess(t))
|
||||
|
||||
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, chunkProver.proverVersion)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, batchProver.proverVersion)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
}
|
||||
@@ -358,7 +362,7 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
{
|
||||
name: "noTaskForkBatchProverVersionLessThanHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
|
||||
exceptTaskNumber: 0,
|
||||
proverForkNames: []string{"", ""},
|
||||
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
|
||||
@@ -448,7 +452,7 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
{ // hard fork 3, prover1:2 prover2:3 block [2-3]
|
||||
name: "twoTaskForkChunkProverVersionMiddleHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"london": forkNumberThree},
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
@@ -457,7 +461,7 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
{
|
||||
name: "twoTaskForkBatchProverVersionMiddleHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"london": forkNumberThree},
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
@@ -466,7 +470,7 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
{ // hard fork 2, prover 2 block [2-3]
|
||||
name: "oneTaskForkChunkProverVersionLessThanHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"london": forkNumberThree},
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
|
||||
exceptTaskNumber: 1,
|
||||
proverForkNames: []string{"", ""},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
|
||||
@@ -534,7 +538,7 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
getTaskNumber++
|
||||
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success)
|
||||
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success, tt.proverForkNames[i])
|
||||
}
|
||||
assert.Equal(t, getTaskNumber, tt.exceptTaskNumber)
|
||||
})
|
||||
@@ -577,7 +581,7 @@ func testValidProof(t *testing.T) {
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
assert.NotNil(t, proverTask)
|
||||
provers[i].submitProof(t, proverTask, proofStatus, types.Success)
|
||||
provers[i].submitProof(t, proverTask, proofStatus, types.Success, "istanbul")
|
||||
}
|
||||
|
||||
// verify proof status
|
||||
@@ -643,34 +647,21 @@ func testInvalidProof(t *testing.T) {
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create mock provers.
|
||||
provers := make([]*mockProver, 2)
|
||||
for i := 0; i < len(provers); i++ {
|
||||
var proofType message.ProofType
|
||||
if i%2 == 0 {
|
||||
proofType = message.ProofTypeChunk
|
||||
} else {
|
||||
proofType = message.ProofTypeBatch
|
||||
}
|
||||
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
|
||||
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul")
|
||||
assert.NotNil(t, proverTask)
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
provers[i].submitProof(t, proverTask, verifiedFailed, types.ErrCoordinatorHandleZkProofFailure)
|
||||
}
|
||||
proofType := message.ProofTypeBatch
|
||||
provingStatus := verifiedFailed
|
||||
expectErrCode := types.ErrCoordinatorHandleZkProofFailure
|
||||
prover := newMockProver(t, "prover_test", coordinatorURL, proofType, version.Version)
|
||||
proverTask, errCode, errMsg := prover.getProverTask(t, proofType, "istanbul")
|
||||
assert.NotNil(t, proverTask)
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
prover.submitProof(t, proverTask, provingStatus, expectErrCode, "istanbul")
|
||||
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(1500 * time.Millisecond)
|
||||
tickStop = time.Tick(time.Minute)
|
||||
)
|
||||
|
||||
var (
|
||||
chunkProofStatus types.ProvingStatus
|
||||
tick = time.Tick(1500 * time.Millisecond)
|
||||
tickStop = time.Tick(time.Minute)
|
||||
batchProofStatus types.ProvingStatus
|
||||
chunkActiveAttempts int16
|
||||
chunkMaxAttempts int16
|
||||
batchActiveAttempts int16
|
||||
batchMaxAttempts int16
|
||||
)
|
||||
@@ -678,24 +669,17 @@ func testInvalidProof(t *testing.T) {
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned {
|
||||
if batchProofStatus == types.ProvingTaskAssigned {
|
||||
return
|
||||
}
|
||||
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, int(chunkMaxAttempts))
|
||||
assert.Equal(t, 0, int(chunkActiveAttempts))
|
||||
|
||||
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, int(batchMaxAttempts))
|
||||
assert.Equal(t, 0, int(batchActiveAttempts))
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String(), "batchProofStatus", batchProofStatus.String())
|
||||
t.Error("failed to check proof status", "batchProofStatus", batchProofStatus.String())
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -735,7 +719,7 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
assert.NotNil(t, proverTask)
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure)
|
||||
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure, "istanbul")
|
||||
}
|
||||
|
||||
// verify proof status
|
||||
@@ -858,14 +842,14 @@ func testTimeoutProof(t *testing.T) {
|
||||
assert.NotNil(t, proverChunkTask2)
|
||||
assert.Equal(t, chunkTask2ErrCode, types.Success)
|
||||
assert.Equal(t, chunkTask2ErrMsg, "")
|
||||
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success)
|
||||
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success, "istanbul")
|
||||
|
||||
batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch, version.Version)
|
||||
proverBatchTask2, batchTask2ErrCode, batchTask2ErrMsg := batchProver2.getProverTask(t, message.ProofTypeBatch, "istanbul")
|
||||
assert.NotNil(t, proverBatchTask2)
|
||||
assert.Equal(t, batchTask2ErrCode, types.Success)
|
||||
assert.Equal(t, batchTask2ErrMsg, "")
|
||||
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success)
|
||||
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success, "istanbul")
|
||||
|
||||
// verify proof status, it should be verified now, because second prover sent valid proof
|
||||
chunkProofStatus2, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
|
||||
@@ -51,9 +51,9 @@ func newMockProver(t *testing.T, proverName string, coordinatorURL string, proof
|
||||
}
|
||||
|
||||
// connectToCoordinator sets up a websocket client to connect to the prover manager.
|
||||
func (r *mockProver) connectToCoordinator(t *testing.T) string {
|
||||
func (r *mockProver) connectToCoordinator(t *testing.T, forkName string) string {
|
||||
challengeString := r.challenge(t)
|
||||
return r.login(t, challengeString)
|
||||
return r.login(t, challengeString, forkName)
|
||||
}
|
||||
|
||||
func (r *mockProver) challenge(t *testing.T) string {
|
||||
@@ -76,18 +76,32 @@ func (r *mockProver) challenge(t *testing.T) string {
|
||||
return loginData.Token
|
||||
}
|
||||
|
||||
func (r *mockProver) login(t *testing.T, challengeString string) string {
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
},
|
||||
func (r *mockProver) login(t *testing.T, challengeString string, forkName string) string {
|
||||
var body string
|
||||
if forkName != "" {
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
HardForkName: forkName,
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\", \"hard_fork_name\":\"%s\"},\"signature\":\"%s\"}",
|
||||
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Identity.HardForkName, authMsg.Signature)
|
||||
} else {
|
||||
authMsg := message.LegacyAuthMsg{
|
||||
Identity: &message.LegacyIdentity{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
|
||||
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
|
||||
body := fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
|
||||
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
|
||||
|
||||
var result ctypes.Response
|
||||
client := resty.New()
|
||||
@@ -137,7 +151,7 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool {
|
||||
|
||||
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, forkName string) (*types.GetTaskSchema, int, string) {
|
||||
// get task from coordinator
|
||||
token := r.connectToCoordinator(t)
|
||||
token := r.connectToCoordinator(t, forkName)
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
type response struct {
|
||||
@@ -151,7 +165,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo
|
||||
resp, err := client.R().
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
|
||||
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "hard_fork_name": forkName}).
|
||||
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType)}).
|
||||
SetResult(&result).
|
||||
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
|
||||
assert.NoError(t, err)
|
||||
@@ -160,9 +174,11 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo
|
||||
}
|
||||
|
||||
// Testing expected errors returned by coordinator.
|
||||
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) {
|
||||
//
|
||||
//nolint:unparam
|
||||
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType, forkName string) (int, string) {
|
||||
// get task from coordinator
|
||||
token := r.connectToCoordinator(t)
|
||||
token := r.connectToCoordinator(t, forkName)
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
type response struct {
|
||||
@@ -185,7 +201,7 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
|
||||
return result.ErrCode, result.ErrMsg
|
||||
}
|
||||
|
||||
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int) {
|
||||
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int, forkName string) {
|
||||
proofMsgStatus := message.StatusOk
|
||||
if proofStatus == generatedFailed {
|
||||
proofMsgStatus = message.StatusProofError
|
||||
@@ -228,7 +244,7 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
|
||||
submitProof.Proof = string(encodeData)
|
||||
}
|
||||
|
||||
token := r.connectToCoordinator(t)
|
||||
token := r.connectToCoordinator(t, forkName)
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
submitProofData, err := json.Marshal(submitProof)
|
||||
|
||||
@@ -59,20 +59,20 @@ func testResetDB(t *testing.T) {
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
// total number of tables.
|
||||
assert.Equal(t, int64(16), cur)
|
||||
assert.Equal(t, int64(17), cur)
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
assert.NoError(t, Migrate(pgDB))
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(16), cur)
|
||||
assert.Equal(t, int64(17), cur)
|
||||
}
|
||||
|
||||
func testRollback(t *testing.T) {
|
||||
version, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(16), version)
|
||||
assert.Equal(t, int64(17), version)
|
||||
|
||||
assert.NoError(t, Rollback(pgDB, nil))
|
||||
|
||||
|
||||
27
database/migrate/migrations/00017_add_blob_meta_data.sql
Normal file
27
database/migrate/migrations/00017_add_blob_meta_data.sql
Normal file
@@ -0,0 +1,27 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
ALTER TABLE chunk
|
||||
ADD COLUMN crc_max INTEGER DEFAULT 0,
|
||||
ADD COLUMN blob_size INTEGER DEFAULT 0;
|
||||
|
||||
ALTER TABLE batch
|
||||
ADD COLUMN data_hash VARCHAR DEFAULT '',
|
||||
ADD COLUMN blob_data_proof BYTEA DEFAULT NULL,
|
||||
ADD COLUMN blob_size INTEGER DEFAULT 0;
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
|
||||
ALTER TABLE IF EXISTS batch
|
||||
DROP COLUMN data_hash,
|
||||
DROP COLUMN blob_data_proof,
|
||||
DROP COLUMN blob_size;
|
||||
|
||||
ALTER TABLE IF EXISTS chunk
|
||||
DROP COLUMN crc_max,
|
||||
DROP COLUMN blob_size;
|
||||
|
||||
-- +goose StatementEnd
|
||||
@@ -21,14 +21,15 @@ import (
|
||||
type CoordinatorClient struct {
|
||||
client *resty.Client
|
||||
|
||||
proverName string
|
||||
priv *ecdsa.PrivateKey
|
||||
proverName string
|
||||
hardForkName string
|
||||
priv *ecdsa.PrivateKey
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewCoordinatorClient constructs a new CoordinatorClient.
|
||||
func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) {
|
||||
func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, hardForkName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) {
|
||||
client := resty.New().
|
||||
SetTimeout(time.Duration(cfg.ConnectionTimeoutSec) * time.Second).
|
||||
SetRetryCount(cfg.RetryCount).
|
||||
@@ -50,9 +51,10 @@ func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv
|
||||
"retry wait time (second)", cfg.RetryWaitTimeSec)
|
||||
|
||||
return &CoordinatorClient{
|
||||
client: client,
|
||||
proverName: proverName,
|
||||
priv: priv,
|
||||
client: client,
|
||||
proverName: proverName,
|
||||
hardForkName: hardForkName,
|
||||
priv: priv,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -83,6 +85,7 @@ func (c *CoordinatorClient) Login(ctx context.Context) error {
|
||||
ProverVersion: version.Version,
|
||||
ProverName: c.proverName,
|
||||
Challenge: challengeResult.Data.Token,
|
||||
HardForkName: c.hardForkName,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -97,10 +100,12 @@ func (c *CoordinatorClient) Login(ctx context.Context) error {
|
||||
Challenge string `json:"challenge"`
|
||||
ProverName string `json:"prover_name"`
|
||||
ProverVersion string `json:"prover_version"`
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
}{
|
||||
Challenge: authMsg.Identity.Challenge,
|
||||
ProverName: authMsg.Identity.ProverName,
|
||||
ProverVersion: authMsg.Identity.ProverVersion,
|
||||
HardForkName: authMsg.Identity.HardForkName,
|
||||
},
|
||||
Signature: authMsg.Signature,
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ type LoginRequest struct {
|
||||
Challenge string `json:"challenge"`
|
||||
ProverName string `json:"prover_name"`
|
||||
ProverVersion string `json:"prover_version"`
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
} `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
@@ -41,7 +42,6 @@ type LoginResponse struct {
|
||||
|
||||
// GetTaskRequest defines the request structure for GetTask API
|
||||
type GetTaskRequest struct {
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
TaskType message.ProofType `json:"task_type"`
|
||||
ProverHeight uint64 `json:"prover_height,omitempty"`
|
||||
VK string `json:"vk"`
|
||||
|
||||
@@ -82,7 +82,7 @@ func NewProver(ctx context.Context, cfg *config.Config) (*Prover, error) {
|
||||
}
|
||||
log.Info("init prover_core successfully!")
|
||||
|
||||
coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, priv)
|
||||
coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, cfg.HardForkName, priv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -178,8 +178,7 @@ func (r *Prover) proveAndSubmit() error {
|
||||
func (r *Prover) fetchTaskFromCoordinator() (*store.ProvingTask, error) {
|
||||
// prepare the request
|
||||
req := &client.GetTaskRequest{
|
||||
HardForkName: r.cfg.HardForkName,
|
||||
TaskType: r.Type(),
|
||||
TaskType: r.Type(),
|
||||
// we may not be able to get the vk at the first time, so we should pass vk to the coordinator every time we getTask
|
||||
// instead of passing vk when we login
|
||||
VK: r.proverCore.VK,
|
||||
|
||||
@@ -76,7 +76,7 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
})
|
||||
|
||||
log.Info("Start event-watcher successfully")
|
||||
log.Info("Start event-watcher successfully", "version", version.Version)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
@@ -109,7 +109,7 @@ func action(ctx *cli.Context) error {
|
||||
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
|
||||
|
||||
// Finish start all message relayer functions
|
||||
log.Info("Start gas-oracle successfully")
|
||||
log.Info("Start gas-oracle successfully", "version", version.Version)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
@@ -115,7 +115,7 @@ func action(ctx *cli.Context) error {
|
||||
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)
|
||||
|
||||
// Finish start all rollup relayer functions.
|
||||
log.Info("Start rollup-relayer successfully")
|
||||
log.Info("Start rollup-relayer successfully", "version", version.Version)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
217
rollup/construct-commitBatch-calldata/main.go
Normal file
217
rollup/construct-commitBatch-calldata/main.go
Normal file
File diff suppressed because one or more lines are too long
170
rollup/construct-finalizeBatch-calldata/main.go
Normal file
170
rollup/construct-finalizeBatch-calldata/main.go
Normal file
File diff suppressed because one or more lines are too long
103
rollup/dump-batch-info/main.go
Normal file
103
rollup/dump-batch-info/main.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv1"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
func main() {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
if len(os.Args) < 2 {
|
||||
log.Crit("no batch index provided")
|
||||
return
|
||||
}
|
||||
|
||||
batchIndexStr := os.Args[1]
|
||||
batchIndexInt, err := strconv.Atoi(batchIndexStr)
|
||||
if err != nil || batchIndexInt <= 0 {
|
||||
log.Crit("invalid batch index", "indexStr", batchIndexStr, "err", err)
|
||||
return
|
||||
}
|
||||
batchIndex := uint64(batchIndexInt)
|
||||
|
||||
db, err := database.InitDB(&database.Config{
|
||||
DriverName: "postgres",
|
||||
DSN: os.Getenv("DB_DSN"),
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
})
|
||||
if err != nil {
|
||||
log.Crit("failed to init db", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if deferErr := database.CloseDB(db); deferErr != nil {
|
||||
log.Error("failed to close db", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
|
||||
dbBatch, err := batchOrm.GetBatchByIndex(context.Background(), batchIndex)
|
||||
if err != nil {
|
||||
log.Crit("failed to get batch", "index", batchIndex, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
dbParentBatch, err := batchOrm.GetBatchByIndex(context.Background(), batchIndex-1)
|
||||
if err != nil {
|
||||
log.Crit("failed to get batch", "index", batchIndex-1, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
|
||||
if err != nil {
|
||||
log.Crit("failed to fetch chunks", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
chunks := make([]*encoding.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
blocks, err := l2BlockOrm.GetL2BlocksInRange(context.Background(), c.StartBlockNumber, c.EndBlockNumber)
|
||||
if err != nil {
|
||||
log.Crit("failed to fetch blocks", "err", err)
|
||||
return
|
||||
}
|
||||
chunks[i] = &encoding.Chunk{Blocks: blocks}
|
||||
}
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
|
||||
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
daBatch, err := codecv1.NewDABatch(batch)
|
||||
if err != nil {
|
||||
log.Crit("failed to create DA batch", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
blobDataProof, err := daBatch.BlobDataProof()
|
||||
if err != nil {
|
||||
log.Crit("failed to get blob data proof", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("batchMeta", "batchHash", daBatch.Hash().Hex(), "batchDataHash", daBatch.DataHash.Hex(), "blobDataProof", hex.EncodeToString(blobDataProof), "blobData", hex.EncodeToString(daBatch.Blob()[:]))
|
||||
}
|
||||
@@ -64,9 +64,6 @@ type RelayerConfig struct {
|
||||
EnableTestEnvBypassFeatures bool `json:"enable_test_env_bypass_features"`
|
||||
// The timeout in seconds for finalizing a batch without proof, only used when EnableTestEnvBypassFeatures is true.
|
||||
FinalizeBatchWithoutProofTimeoutSec uint64 `json:"finalize_batch_without_proof_timeout_sec"`
|
||||
|
||||
EnableTestEnvSamplingFeature bool `json:"enable_test_env_sampling_feature"`
|
||||
SamplingPercentage uint64 `json:"sampling_percentage,omitempty"`
|
||||
}
|
||||
|
||||
// GasOracleConfig The config for updating gas price oracle.
|
||||
@@ -131,10 +128,6 @@ func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
|
||||
return fmt.Errorf("error converting and checking finalize sender private key: %w", err)
|
||||
}
|
||||
|
||||
if r.SamplingPercentage == 0 {
|
||||
r.SamplingPercentage = 100
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -461,12 +461,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
}
|
||||
}
|
||||
|
||||
if r.cfg.EnableTestEnvSamplingFeature && ((batch.Index % 100) >= r.cfg.SamplingPercentage) {
|
||||
if err := r.finalizeBatch(batch, false); err != nil {
|
||||
log.Error("Failed to finalize skipped batch without proof", "index", batch.Index, "hash", batch.Hash, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
case types.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "hash", batch.Hash)
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
|
||||
@@ -591,6 +585,24 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error
|
||||
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Updating the proving status when finalizing without proof, thus the coordinator could omit this task.
|
||||
// it isn't a necessary step, so don't put in a transaction with UpdateFinalizeTxHashAndRollupStatus
|
||||
if !withProof {
|
||||
txErr := r.db.Transaction(func(tx *gorm.DB) error {
|
||||
if updateErr := r.batchOrm.UpdateProvingStatus(r.ctx, dbBatch.Hash, types.ProvingTaskVerified); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
if updateErr := r.chunkOrm.UpdateProvingStatusByBatchHash(r.ctx, dbBatch.Hash, types.ProvingTaskVerified); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if txErr != nil {
|
||||
log.Error("Updating chunk and batch proving status when finalizing without proof failure", "batchHash", dbBatch.Hash, "err", txErr)
|
||||
}
|
||||
}
|
||||
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -181,9 +182,9 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
|
||||
chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
|
||||
chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
@@ -200,11 +201,30 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
relayer.ProcessCommittedBatches()
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
return err == nil && len(statuses) == 1 && statuses[0] == types.RollupFinalizing
|
||||
time.Sleep(time.Second)
|
||||
|
||||
batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0)
|
||||
if batchErr != nil {
|
||||
return false
|
||||
}
|
||||
chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash)
|
||||
if chunkErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
batchStatus := len(batchInDB) == 1 && types.RollupStatus(batchInDB[0].RollupStatus) == types.RollupFinalizing &&
|
||||
types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified &&
|
||||
types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
return batchStatus && chunkStatus
|
||||
})
|
||||
assert.True(t, ok)
|
||||
relayer.StopSenders()
|
||||
|
||||
@@ -25,6 +25,7 @@ type Batch struct {
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
@@ -53,6 +54,10 @@ type Batch struct {
|
||||
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas;default:0"`
|
||||
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size;default:0"`
|
||||
@@ -257,6 +262,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: batchMeta.BatchHash.Hex(),
|
||||
DataHash: batchMeta.BatchDataHash.Hex(),
|
||||
StartChunkHash: batchMeta.StartChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: batchMeta.EndChunkHash.Hex(),
|
||||
@@ -271,6 +277,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
TotalL1CommitGas: metrics.L1CommitGas,
|
||||
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
|
||||
BlobDataProof: batchMeta.BatchBlobDataProof,
|
||||
BlobSize: metrics.L1CommitBlobSize,
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -44,6 +44,10 @@ type Chunk struct {
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
|
||||
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
|
||||
@@ -140,6 +144,20 @@ func (o *Chunk) GetChunksGEIndex(ctx context.Context, index uint64, limit int) (
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// GetChunksByBatchHash retrieves chunks by batch hash
|
||||
// for test
|
||||
func (o *Chunk) GetChunksByBatchHash(ctx context.Context, batchHash string) ([]*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("batch_hash = ?", batchHash)
|
||||
|
||||
var chunks []*Chunk
|
||||
if err := db.Find(&chunks).Error; err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetChunksByBatchHash error: %w", err)
|
||||
}
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// InsertChunk inserts a new chunk into the database.
|
||||
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, dbTX ...*gorm.DB) (*Chunk, error) {
|
||||
if chunk == nil || len(chunk.Blocks) == 0 {
|
||||
@@ -198,6 +216,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
|
||||
ParentChunkStateRoot: parentChunkStateRoot,
|
||||
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
CrcMax: metrics.CrcMax,
|
||||
BlobSize: metrics.L1CommitBlobSize,
|
||||
}
|
||||
|
||||
db := o.db
|
||||
@@ -242,6 +262,34 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProvingStatusByBatchHash updates the proving_status for chunks within the specified batch_hash
|
||||
func (o *Chunk) UpdateProvingStatusByBatchHash(ctx context.Context, batchHash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proving_status"] = int(status)
|
||||
|
||||
switch status {
|
||||
case types.ProvingTaskAssigned:
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("batch_hash = ?", batchHash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Chunk.UpdateProvingStatusByBatchHash error: %w, batch hash: %v, status: %v", err, batchHash, status.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBatchHashInRange updates the batch_hash for chunks within the specified range (inclusive).
|
||||
// The range is closed, i.e., it includes both start and end indices.
|
||||
func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, batchHash string, dbTX ...*gorm.DB) error {
|
||||
|
||||
@@ -191,10 +191,12 @@ func GetChunkHash(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64, code
|
||||
|
||||
// BatchMetadata represents the metadata of a batch.
|
||||
type BatchMetadata struct {
|
||||
BatchHash common.Hash
|
||||
BatchBytes []byte
|
||||
StartChunkHash common.Hash
|
||||
EndChunkHash common.Hash
|
||||
BatchHash common.Hash
|
||||
BatchDataHash common.Hash
|
||||
BatchBlobDataProof []byte
|
||||
BatchBytes []byte
|
||||
StartChunkHash common.Hash
|
||||
EndChunkHash common.Hash
|
||||
}
|
||||
|
||||
// GetBatchMetadata retrieves the metadata of a batch.
|
||||
@@ -212,9 +214,11 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
|
||||
return nil, fmt.Errorf("failed to create codecv0 DA batch: %w", err)
|
||||
}
|
||||
|
||||
// BatchBlobDataProof is left as empty for codecv0.
|
||||
batchMeta := &BatchMetadata{
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchBytes: daBatch.Encode(),
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchDataHash: daBatch.DataHash,
|
||||
BatchBytes: daBatch.Encode(),
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
@@ -243,9 +247,16 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
|
||||
return nil, fmt.Errorf("failed to create codecv1 DA batch: %w", err)
|
||||
}
|
||||
|
||||
blobDataProof, err := daBatch.BlobDataProof()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv1 blob data proof: %w", err)
|
||||
}
|
||||
|
||||
batchMeta := &BatchMetadata{
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchBytes: daBatch.Encode(),
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchDataHash: daBatch.DataHash,
|
||||
BatchBlobDataProof: blobDataProof,
|
||||
BatchBytes: daBatch.Encode(),
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv1.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
|
||||
@@ -21,6 +21,7 @@ type Batch struct {
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
@@ -49,6 +50,10 @@ type Batch struct {
|
||||
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
@@ -151,6 +156,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: daBatch.Hash().Hex(),
|
||||
DataHash: daBatch.DataHash.Hex(),
|
||||
StartChunkHash: startDAChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endDAChunkHash.Hex(),
|
||||
@@ -163,6 +169,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -43,6 +43,10 @@ type Chunk struct {
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
|
||||
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
|
||||
@@ -150,6 +154,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
ParentChunkStateRoot: parentChunkStateRoot,
|
||||
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
Reference in New Issue
Block a user