mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
6 Commits
codecv1-sc
...
v4.3.91-en
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
af8437aa9d | ||
|
|
133547c91c | ||
|
|
fcbf83bf3d | ||
|
|
330fde44a9 | ||
|
|
122cffe489 | ||
|
|
b4dac7ab82 |
@@ -1,133 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
const targetTxSize = 126914
|
||||
|
||||
func main() {
|
||||
privateKeyHex := "0000000000000000000000000000000000000000000000000000000000000042"
|
||||
privateKey, err := crypto.HexToECDSA(privateKeyHex)
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid private key: %v", err)
|
||||
}
|
||||
|
||||
client, err := ethclient.Dial("http://localhost:9999")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to the Ethereum client: %v", err)
|
||||
}
|
||||
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(222222))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create transactor with chain ID 222222: %v", err)
|
||||
}
|
||||
|
||||
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to retrieve account nonce: %v", err)
|
||||
}
|
||||
|
||||
totalTxNum := []uint64{2, 3, 4, 5, 6}
|
||||
for _, num := range totalTxNum {
|
||||
prepareAndSendTransactions(client, auth, nonce, num)
|
||||
nonce += num
|
||||
}
|
||||
}
|
||||
|
||||
func prepareAndSendTransactions(client *ethclient.Client, auth *bind.TransactOpts, initialNonce uint64, totalTxNum uint64) error {
|
||||
gasLimit := uint64(5000000)
|
||||
gasPrice := big.NewInt(1000000000)
|
||||
|
||||
var signedTxs []*types.Transaction
|
||||
payloadSum := 0
|
||||
|
||||
dataPayload := make([]byte, targetTxSize/totalTxNum)
|
||||
for i := range dataPayload {
|
||||
dataPayload[i] = 0xff
|
||||
}
|
||||
|
||||
for i := uint64(0); i < totalTxNum-1; i++ {
|
||||
txData := &types.LegacyTx{
|
||||
Nonce: initialNonce + i,
|
||||
GasPrice: gasPrice,
|
||||
Gas: gasLimit,
|
||||
To: &auth.From,
|
||||
Data: dataPayload,
|
||||
}
|
||||
|
||||
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to sign tx: %v", err)
|
||||
}
|
||||
|
||||
rlpTxData, err := rlp.EncodeToBytes(signedTx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to RLP encode the tx: %v", err)
|
||||
}
|
||||
|
||||
payloadSum += len(rlpTxData)
|
||||
signedTxs = append(signedTxs, signedTx)
|
||||
}
|
||||
|
||||
fmt.Println("payload sum", payloadSum)
|
||||
|
||||
lowerBound := 0
|
||||
upperBound := targetTxSize
|
||||
for lowerBound <= upperBound {
|
||||
mid := (lowerBound + upperBound) / 2
|
||||
data := make([]byte, mid)
|
||||
for i := range data {
|
||||
data[i] = 0xff
|
||||
}
|
||||
|
||||
txData := &types.LegacyTx{
|
||||
Nonce: initialNonce + totalTxNum - 1,
|
||||
GasPrice: gasPrice,
|
||||
Gas: gasLimit,
|
||||
To: &auth.From,
|
||||
Data: data,
|
||||
}
|
||||
|
||||
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to sign tx: %v", err)
|
||||
}
|
||||
|
||||
rlpTxData, err := rlp.EncodeToBytes(signedTx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to RLP encode the tx: %v", err)
|
||||
}
|
||||
txSize := len(rlpTxData)
|
||||
|
||||
if payloadSum+txSize < targetTxSize {
|
||||
lowerBound = mid + 1
|
||||
} else if payloadSum+txSize > targetTxSize {
|
||||
upperBound = mid - 1
|
||||
} else {
|
||||
fmt.Println("payloadSum+txSize", payloadSum+txSize)
|
||||
signedTxs = append(signedTxs, signedTx)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, signedTx := range signedTxs {
|
||||
if err := client.SendTransaction(context.Background(), signedTx); err != nil {
|
||||
return fmt.Errorf("failed to send transaction: %v", err)
|
||||
}
|
||||
fmt.Printf("Transaction with nonce %d sent\n", signedTx.Nonce())
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
const targetTxSize = 120568
|
||||
|
||||
func main() {
|
||||
privateKeyHex := "0000000000000000000000000000000000000000000000000000000000000042"
|
||||
privateKey, err := crypto.HexToECDSA(privateKeyHex)
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid private key: %v", err)
|
||||
}
|
||||
|
||||
client, err := ethclient.Dial("http://localhost:9999")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to the Ethereum client: %v", err)
|
||||
}
|
||||
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(222222))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create transactor with chain ID 222222: %v", err)
|
||||
}
|
||||
|
||||
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to retrieve account nonce: %v", err)
|
||||
}
|
||||
prepareAndSendTransactions(client, auth, nonce, 1)
|
||||
prepareAndSendTransactions(client, auth, nonce+1, 2)
|
||||
prepareAndSendTransactions(client, auth, nonce+1+2, 3)
|
||||
prepareAndSendTransactions(client, auth, nonce+1+2+3, 4)
|
||||
prepareAndSendTransactions(client, auth, nonce+1+2+3+4, 5)
|
||||
prepareAndSendTransactions(client, auth, nonce+1+2+3+4+5, 6)
|
||||
}
|
||||
|
||||
func prepareAndSendTransactions(client *ethclient.Client, auth *bind.TransactOpts, initialNonce uint64, totalTxNum uint64) error {
|
||||
gasLimit := uint64(5000000)
|
||||
gasPrice := big.NewInt(1000000000)
|
||||
|
||||
var signedTxs []*types.Transaction
|
||||
payloadSum := 0
|
||||
|
||||
dataPayload := make([]byte, targetTxSize/totalTxNum)
|
||||
for i := range dataPayload {
|
||||
dataPayload[i] = 0xff
|
||||
}
|
||||
|
||||
for i := uint64(0); i < totalTxNum-1; i++ {
|
||||
txData := &types.LegacyTx{
|
||||
Nonce: initialNonce + i,
|
||||
GasPrice: gasPrice,
|
||||
Gas: gasLimit,
|
||||
To: &auth.From,
|
||||
Data: dataPayload,
|
||||
}
|
||||
|
||||
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to sign tx: %v", err)
|
||||
}
|
||||
|
||||
rlpTxData, err := rlp.EncodeToBytes(signedTx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to RLP encode the tx: %v", err)
|
||||
}
|
||||
|
||||
payloadSum += len(rlpTxData)
|
||||
signedTxs = append(signedTxs, signedTx)
|
||||
}
|
||||
|
||||
fmt.Println("payload sum", payloadSum)
|
||||
|
||||
lowerBound := 0
|
||||
upperBound := targetTxSize
|
||||
for lowerBound <= upperBound {
|
||||
mid := (lowerBound + upperBound) / 2
|
||||
data := make([]byte, mid)
|
||||
for i := range data {
|
||||
data[i] = 0xff
|
||||
}
|
||||
|
||||
txData := &types.LegacyTx{
|
||||
Nonce: initialNonce + totalTxNum - 1,
|
||||
GasPrice: gasPrice,
|
||||
Gas: gasLimit,
|
||||
To: &auth.From,
|
||||
Data: data,
|
||||
}
|
||||
|
||||
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to sign tx: %v", err)
|
||||
}
|
||||
|
||||
rlpTxData, err := rlp.EncodeToBytes(signedTx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to RLP encode the tx: %v", err)
|
||||
}
|
||||
txSize := len(rlpTxData)
|
||||
|
||||
if payloadSum+txSize < targetTxSize {
|
||||
lowerBound = mid + 1
|
||||
} else if payloadSum+txSize > targetTxSize {
|
||||
upperBound = mid - 1
|
||||
} else {
|
||||
fmt.Println("payloadSum+txSize", payloadSum+txSize)
|
||||
signedTxs = append(signedTxs, signedTx)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for i := len(signedTxs) - 1; i >= 0; i-- {
|
||||
if err := client.SendTransaction(context.Background(), signedTxs[i]); err != nil {
|
||||
return fmt.Errorf("failed to send transaction: %v", err)
|
||||
}
|
||||
fmt.Printf("Transaction with nonce %d sent\n", signedTxs[i].Nonce())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.92"
|
||||
var tag = "v4.3.91"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity =0.8.24;
|
||||
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
import {console} from "forge-std/console.sol";
|
||||
|
||||
import {Ecc} from "../../src/misc/ecc.sol";
|
||||
|
||||
contract DeployEcc is Script {
|
||||
function run() external {
|
||||
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
|
||||
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
|
||||
Ecc ecc = new Ecc();
|
||||
address L2_ECC_ADDR = address(ecc);
|
||||
vm.stopBroadcast();
|
||||
|
||||
logAddress("L2_ECC_ADDR", L2_ECC_ADDR);
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity =0.8.24;
|
||||
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
import {console} from "forge-std/console.sol";
|
||||
|
||||
import {Hash} from "../../src/misc/hash.sol";
|
||||
|
||||
contract DeployHash is Script {
|
||||
function run() external {
|
||||
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
|
||||
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
|
||||
Hash hash = new Hash();
|
||||
address L2_HASH_ADDR = address(hash);
|
||||
vm.stopBroadcast();
|
||||
|
||||
logAddress("L2_HASH_ADDR", L2_HASH_ADDR);
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
}
|
||||
@@ -92,12 +92,10 @@ contract DeployL1BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployMultipleVersionRollupVerifier() internal {
|
||||
uint256[] memory _versions = new uint256[](2);
|
||||
address[] memory _verifiers = new address[](2);
|
||||
uint256[] memory _versions = new uint256[](1);
|
||||
address[] memory _verifiers = new address[](1);
|
||||
_versions[0] = 0;
|
||||
_verifiers[0] = address(zkEvmVerifierV1);
|
||||
_versions[1] = 1;
|
||||
_verifiers[1] = address(zkEvmVerifierV1);
|
||||
rollupVerifier = new MultipleVersionRollupVerifier(L1_SCROLL_CHAIN_PROXY_ADDR, _versions, _verifiers);
|
||||
|
||||
logAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR", address(rollupVerifier));
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
/* eslint-disable node/no-missing-import */
|
||||
import * as dotenv from "dotenv";
|
||||
|
||||
import { ethers } from "hardhat";
|
||||
|
||||
dotenv.config();
|
||||
|
||||
async function main() {
|
||||
const [deployer] = await ethers.getSigners();
|
||||
|
||||
const l1ScrollMessengerAddress = process.env.L1_SCROLL_MESSENGER_PROXY_ADDR!;
|
||||
const l2EccContractAddress = process.env.L2_ECC_ADDR!;
|
||||
const payload = process.env.SKIPPED_TX_PAYLOAD!; // TODO: calc the payload, parse as bytes
|
||||
|
||||
const L1ScrollMessenger = await ethers.getContractAt("L1ScrollMessenger", l1ScrollMessengerAddress, deployer);
|
||||
|
||||
const tx = await L1ScrollMessenger.sendMessage(
|
||||
l2EccContractAddress, // address _to
|
||||
0, // uint256 _value
|
||||
payload, // bytes memory _message
|
||||
100000000 // uint256 _gasLimit
|
||||
);
|
||||
|
||||
console.log(`calling ${l2EccContractAddress} with payload from l1, hash:`, tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
}
|
||||
|
||||
// We recommend this pattern to be able to use async/await everywhere
|
||||
// and properly handle errors.
|
||||
main().catch((error) => {
|
||||
console.error(error);
|
||||
process.exitCode = 1;
|
||||
});
|
||||
@@ -1,127 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-3.0
|
||||
|
||||
pragma solidity =0.8.24;
|
||||
|
||||
contract Ecc {
|
||||
/* ECC Functions */
|
||||
// https://etherscan.io/address/0x41bf00f080ed41fa86201eac56b8afb170d9e36d#code
|
||||
function ecAdd(uint256[2] memory p0, uint256[2] memory p1) public view returns (uint256[2] memory retP) {
|
||||
uint256[4] memory i = [p0[0], p0[1], p1[0], p1[1]];
|
||||
|
||||
assembly {
|
||||
// call ecadd precompile
|
||||
// inputs are: x1, y1, x2, y2
|
||||
if iszero(staticcall(not(0), 0x06, i, 0x80, retP, 0x40)) {
|
||||
revert(0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// https://etherscan.io/address/0x41bf00f080ed41fa86201eac56b8afb170d9e36d#code
|
||||
function ecMul(uint256[2] memory p, uint256 s) public view returns (uint256[2] memory retP) {
|
||||
// With a public key (x, y), this computes p = scalar * (x, y).
|
||||
uint256[3] memory i = [p[0], p[1], s];
|
||||
|
||||
assembly {
|
||||
// call ecmul precompile
|
||||
// inputs are: x, y, scalar
|
||||
if iszero(staticcall(not(0), 0x07, i, 0x60, retP, 0x40)) {
|
||||
revert(0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scroll-tech/scroll/contracts/src/libraries/verifier/RollupVerifier.sol
|
||||
struct G1Point {
|
||||
uint256 x;
|
||||
uint256 y;
|
||||
}
|
||||
struct G2Point {
|
||||
uint256[2] x;
|
||||
uint256[2] y;
|
||||
}
|
||||
|
||||
function ecPairing(G1Point[] memory p1, G2Point[] memory p2) internal view returns (bool) {
|
||||
uint256 length = p1.length * 6;
|
||||
uint256[] memory input = new uint256[](length);
|
||||
uint256[1] memory result;
|
||||
bool ret;
|
||||
|
||||
require(p1.length == p2.length);
|
||||
|
||||
for (uint256 i = 0; i < p1.length; i++) {
|
||||
input[0 + i * 6] = p1[i].x;
|
||||
input[1 + i * 6] = p1[i].y;
|
||||
input[2 + i * 6] = p2[i].x[0];
|
||||
input[3 + i * 6] = p2[i].x[1];
|
||||
input[4 + i * 6] = p2[i].y[0];
|
||||
input[5 + i * 6] = p2[i].y[1];
|
||||
}
|
||||
|
||||
assembly {
|
||||
ret := staticcall(gas(), 8, add(input, 0x20), mul(length, 0x20), result, 0x20)
|
||||
}
|
||||
require(ret);
|
||||
return result[0] != 0;
|
||||
}
|
||||
|
||||
/* Bench */
|
||||
function ecAdds(uint256 n) public {
|
||||
uint256[2] memory p0;
|
||||
p0[0] = 1;
|
||||
p0[1] = 2;
|
||||
uint256[2] memory p1;
|
||||
p1[0] = 1;
|
||||
p1[1] = 2;
|
||||
|
||||
for (uint256 i = 0; i < n; i++) {
|
||||
ecAdd(p0, p1);
|
||||
}
|
||||
}
|
||||
|
||||
function ecMuls(uint256 n) public {
|
||||
uint256[2] memory p0;
|
||||
p0[0] = 1;
|
||||
p0[1] = 2;
|
||||
|
||||
for (uint256 i = 0; i < n; i++) {
|
||||
ecMul(p0, 3);
|
||||
}
|
||||
}
|
||||
|
||||
function ecPairings(uint256 n) public {
|
||||
G1Point[] memory g1_points = new G1Point[](2);
|
||||
G2Point[] memory g2_points = new G2Point[](2);
|
||||
g1_points[0].x = 0x0000000000000000000000000000000000000000000000000000000000000001;
|
||||
g1_points[0].y = 0x0000000000000000000000000000000000000000000000000000000000000002;
|
||||
g2_points[0].x[1] = 0x1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed;
|
||||
g2_points[0].x[0] = 0x198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c2;
|
||||
g2_points[0].y[1] = 0x12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa;
|
||||
g2_points[0].y[0] = 0x090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b;
|
||||
g1_points[1].x = 0x1aa125a22bd902874034e67868aed40267e5575d5919677987e3bc6dd42a32fe;
|
||||
g1_points[1].y = 0x1bacc186725464068956d9a191455c2d6f6db282d83645c610510d8d4efbaee0;
|
||||
g2_points[1].x[1] = 0x1b7734c80605f71f1e2de61e998ce5854ff2abebb76537c3d67e50d71422a852;
|
||||
g2_points[1].x[0] = 0x10d5a1e34b2388a5ebe266033a5e0e63c89084203784da0c6bd9b052a78a2cac;
|
||||
g2_points[1].y[1] = 0x275739c5c2cdbc72e37c689e2ab441ea76c1d284b9c46ae8f5c42ead937819e1;
|
||||
g2_points[1].y[0] = 0x018de34c5b7c3d3d75428bbe050f1449ea3d9961d563291f307a1874f7332e65;
|
||||
|
||||
for (uint256 i = 0; i < n; i++) {
|
||||
ecPairing(g1_points, g2_points);
|
||||
// bool checked = false;
|
||||
// checked = ecPairing(g1_points, g2_points);
|
||||
// require(checked);
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/OpenZeppelin/openzeppelin-contracts/blob/8a0b7bed82d6b8053872c3fd40703efd58f5699d/test/utils/cryptography/ECDSA.test.js#L230
|
||||
function ecRecovers(uint256 n) public {
|
||||
bytes32 hash = 0xb94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9;
|
||||
bytes32 r = 0xe742ff452d41413616a5bf43fe15dd88294e983d3d36206c2712f39083d638bd;
|
||||
uint8 v = 0x1b;
|
||||
bytes32 s = 0xe0a0fc89be718fbc1033e1d30d78be1c68081562ed2e97af876f286f3453231d;
|
||||
|
||||
for (uint256 i = 0; i < n; i++) {
|
||||
ecrecover(hash, v, r, s);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-3.0
|
||||
|
||||
pragma solidity =0.8.24;
|
||||
|
||||
contract Hash {
|
||||
function sha256(bytes memory input) public view returns (bytes memory out) {
|
||||
(bool ok, bytes memory out) = address(2).staticcall(input);
|
||||
require(ok);
|
||||
}
|
||||
|
||||
function sha256Yul(bytes memory input) public view returns (bytes memory out) {
|
||||
assembly {
|
||||
// mstore(0, input)
|
||||
if iszero(staticcall(gas(), 2, 0, 32, 0, 32)) {
|
||||
revert(0, 0)
|
||||
}
|
||||
// return(0, 32)
|
||||
}
|
||||
}
|
||||
|
||||
function sha256s(bytes memory input, uint256 n) public {
|
||||
for (uint256 i = 0; i < n; i++) {
|
||||
sha256(input);
|
||||
}
|
||||
}
|
||||
|
||||
function keccak256s(uint256 n) public {
|
||||
bytes32[] memory output = new bytes32[](n);
|
||||
for (uint256 i = 0; i < n; i++) {
|
||||
bytes memory input = abi.encode(i);
|
||||
output[i] = keccak256(input);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -24,7 +24,6 @@ type Batch struct {
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
@@ -55,10 +54,6 @@ type Batch struct {
|
||||
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
@@ -253,7 +248,6 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: daBatch.Hash().Hex(),
|
||||
DataHash: daBatch.DataHash.Hex(),
|
||||
StartChunkHash: startDAChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endDAChunkHash.Hex(),
|
||||
@@ -268,8 +262,6 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
ActiveAttempts: 0,
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -48,10 +48,6 @@ type Chunk struct {
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
|
||||
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
|
||||
@@ -304,8 +300,6 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
TotalAttempts: 0,
|
||||
ActiveAttempts: 0,
|
||||
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -59,20 +59,20 @@ func testResetDB(t *testing.T) {
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
// total number of tables.
|
||||
assert.Equal(t, int64(17), cur)
|
||||
assert.Equal(t, int64(16), cur)
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
assert.NoError(t, Migrate(pgDB))
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(17), cur)
|
||||
assert.Equal(t, int64(16), cur)
|
||||
}
|
||||
|
||||
func testRollback(t *testing.T) {
|
||||
version, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(17), version)
|
||||
assert.Equal(t, int64(16), version)
|
||||
|
||||
assert.NoError(t, Rollback(pgDB, nil))
|
||||
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
ALTER TABLE chunk
|
||||
ADD COLUMN crc_max INTEGER DEFAULT 0,
|
||||
ADD COLUMN blob_size INTEGER DEFAULT 0;
|
||||
|
||||
ALTER TABLE batch
|
||||
ADD COLUMN data_hash VARCHAR DEFAULT '',
|
||||
ADD COLUMN blob_data_proof BYTEA DEFAULT NULL,
|
||||
ADD COLUMN blob_size INTEGER DEFAULT 0;
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
|
||||
ALTER TABLE IF EXISTS batch
|
||||
DROP COLUMN data_hash,
|
||||
DROP COLUMN blob_data_proof,
|
||||
DROP COLUMN blob_size;
|
||||
|
||||
ALTER TABLE IF EXISTS chunk
|
||||
DROP COLUMN crc_max,
|
||||
DROP COLUMN blob_size;
|
||||
|
||||
-- +goose StatementEnd
|
||||
@@ -76,7 +76,7 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
})
|
||||
|
||||
log.Info("Start event-watcher successfully", "version", version.Version)
|
||||
log.Info("Start event-watcher successfully")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
@@ -109,7 +109,7 @@ func action(ctx *cli.Context) error {
|
||||
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
|
||||
|
||||
// Finish start all message relayer functions
|
||||
log.Info("Start gas-oracle successfully", "version", version.Version)
|
||||
log.Info("Start gas-oracle successfully")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
@@ -115,7 +115,7 @@ func action(ctx *cli.Context) error {
|
||||
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)
|
||||
|
||||
// Finish start all rollup relayer functions.
|
||||
log.Info("Start rollup-relayer successfully", "version", version.Version)
|
||||
log.Info("Start rollup-relayer successfully")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,103 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv1"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
func main() {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
if len(os.Args) < 2 {
|
||||
log.Crit("no batch index provided")
|
||||
return
|
||||
}
|
||||
|
||||
batchIndexStr := os.Args[1]
|
||||
batchIndexInt, err := strconv.Atoi(batchIndexStr)
|
||||
if err != nil || batchIndexInt <= 0 {
|
||||
log.Crit("invalid batch index", "indexStr", batchIndexStr, "err", err)
|
||||
return
|
||||
}
|
||||
batchIndex := uint64(batchIndexInt)
|
||||
|
||||
db, err := database.InitDB(&database.Config{
|
||||
DriverName: "postgres",
|
||||
DSN: os.Getenv("DB_DSN"),
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
})
|
||||
if err != nil {
|
||||
log.Crit("failed to init db", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if deferErr := database.CloseDB(db); deferErr != nil {
|
||||
log.Error("failed to close db", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
|
||||
dbBatch, err := batchOrm.GetBatchByIndex(context.Background(), batchIndex)
|
||||
if err != nil {
|
||||
log.Crit("failed to get batch", "index", batchIndex, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
dbParentBatch, err := batchOrm.GetBatchByIndex(context.Background(), batchIndex-1)
|
||||
if err != nil {
|
||||
log.Crit("failed to get batch", "index", batchIndex-1, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
|
||||
if err != nil {
|
||||
log.Crit("failed to fetch chunks", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
chunks := make([]*encoding.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
blocks, err := l2BlockOrm.GetL2BlocksInRange(context.Background(), c.StartBlockNumber, c.EndBlockNumber)
|
||||
if err != nil {
|
||||
log.Crit("failed to fetch blocks", "err", err)
|
||||
return
|
||||
}
|
||||
chunks[i] = &encoding.Chunk{Blocks: blocks}
|
||||
}
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
|
||||
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
daBatch, err := codecv1.NewDABatch(batch)
|
||||
if err != nil {
|
||||
log.Crit("failed to create DA batch", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
blobDataProof, err := daBatch.BlobDataProof()
|
||||
if err != nil {
|
||||
log.Crit("failed to get blob data proof", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("batchMeta", "batchHash", daBatch.Hash().Hex(), "batchDataHash", daBatch.DataHash.Hex(), "blobDataProof", hex.EncodeToString(blobDataProof), "blobData", hex.EncodeToString(daBatch.Blob()[:]))
|
||||
}
|
||||
@@ -42,4 +42,7 @@ type BatchProposerConfig struct {
|
||||
MaxL1CommitCalldataSizePerBatch uint64 `json:"max_l1_commit_calldata_size_per_batch"`
|
||||
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
|
||||
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
|
||||
|
||||
EnableTestEnvSamplingFeature bool `json:"enable_test_env_sampling_feature,omitempty"`
|
||||
SamplingPercentage uint64 `json:"sampling_percentage,omitempty"`
|
||||
}
|
||||
|
||||
@@ -64,6 +64,9 @@ type RelayerConfig struct {
|
||||
EnableTestEnvBypassFeatures bool `json:"enable_test_env_bypass_features"`
|
||||
// The timeout in seconds for finalizing a batch without proof, only used when EnableTestEnvBypassFeatures is true.
|
||||
FinalizeBatchWithoutProofTimeoutSec uint64 `json:"finalize_batch_without_proof_timeout_sec"`
|
||||
|
||||
EnableTestEnvSamplingFeature bool `json:"enable_test_env_sampling_feature,omitempty"`
|
||||
SamplingPercentage uint64 `json:"sampling_percentage,omitempty"`
|
||||
}
|
||||
|
||||
// GasOracleConfig The config for updating gas price oracle.
|
||||
@@ -128,6 +131,10 @@ func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
|
||||
return fmt.Errorf("error converting and checking finalize sender private key: %w", err)
|
||||
}
|
||||
|
||||
if r.SamplingPercentage == 0 {
|
||||
r.SamplingPercentage = 100
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -464,8 +464,9 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
case types.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "hash", batch.Hash)
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
|
||||
if err := r.finalizeBatch(batch, true); err != nil {
|
||||
log.Error("Failed to finalize batch with proof", "index", batch.Index, "hash", batch.Hash, "err", err)
|
||||
skipProof := r.cfg.EnableTestEnvSamplingFeature && ((batch.Index % 100) >= r.cfg.SamplingPercentage)
|
||||
if err := r.finalizeBatch(batch, !skipProof); err != nil {
|
||||
log.Error("Failed to finalize batch", "index", batch.Index, "hash", batch.Hash, "withProof", !skipProof, "err", err)
|
||||
}
|
||||
|
||||
case types.ProvingTaskFailed:
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/encoding"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
@@ -37,6 +38,7 @@ type BatchProposer struct {
|
||||
gasCostIncreaseMultiplier float64
|
||||
forkMap map[uint64]bool
|
||||
|
||||
cfg *config.BatchProposerConfig
|
||||
chainCfg *params.ChainConfig
|
||||
|
||||
batchProposerCircleTotal prometheus.Counter
|
||||
@@ -74,6 +76,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
|
||||
batchTimeoutSec: cfg.BatchTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
forkMap: forkMap,
|
||||
cfg: cfg,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
@@ -144,6 +147,27 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
|
||||
log.Warn("BatchProposer.UpdateBatchHashInRange update the chunk's batch hash failure", "hash", batch.Hash, "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
|
||||
skipProof := false
|
||||
if p.cfg.EnableTestEnvSamplingFeature && ((batch.Index % 100) >= p.cfg.SamplingPercentage) {
|
||||
skipProof = true
|
||||
}
|
||||
if skipProof {
|
||||
dbErr = p.batchOrm.UpdateProvingStatus(p.ctx, batch.Hash, types.ProvingTaskVerified, dbTX)
|
||||
if dbErr != nil {
|
||||
log.Warn("BatchProposer.updateBatchInfoInDB update batch proving_status failure",
|
||||
"batch hash", batch.Hash, "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
dbErr = p.chunkOrm.UpdateProvingStatusInRange(p.ctx, batch.StartChunkIndex, batch.EndChunkIndex, types.ProvingTaskVerified, dbTX)
|
||||
if dbErr != nil {
|
||||
log.Warn("BatchProposer.updateBatchInfoInDB update chunk proving_status failure",
|
||||
"start chunk index", batch.StartChunkIndex, "end chunk index", batch.EndChunkIndex,
|
||||
"batch hash", batch.Hash, "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -25,7 +25,6 @@ type Batch struct {
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
@@ -54,10 +53,6 @@ type Batch struct {
|
||||
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas;default:0"`
|
||||
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size;default:0"`
|
||||
@@ -262,7 +257,6 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: batchMeta.BatchHash.Hex(),
|
||||
DataHash: batchMeta.BatchDataHash.Hex(),
|
||||
StartChunkHash: batchMeta.StartChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: batchMeta.EndChunkHash.Hex(),
|
||||
@@ -277,8 +271,6 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
TotalL1CommitGas: metrics.L1CommitGas,
|
||||
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
|
||||
BlobDataProof: batchMeta.BatchBlobDataProof,
|
||||
BlobSize: metrics.L1CommitBlobSize,
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -44,10 +44,6 @@ type Chunk struct {
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
|
||||
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
|
||||
@@ -216,8 +212,6 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
|
||||
ParentChunkStateRoot: parentChunkStateRoot,
|
||||
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
CrcMax: metrics.CrcMax,
|
||||
BlobSize: metrics.L1CommitBlobSize,
|
||||
}
|
||||
|
||||
db := o.db
|
||||
@@ -306,3 +300,28 @@ func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (o *Chunk) UpdateProvingStatusInRange(ctx context.Context, startIndex uint64, endIndex uint64, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("index >= ? AND index <= ?", startIndex, endIndex)
|
||||
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proving_status"] = int(status)
|
||||
switch status {
|
||||
case types.ProvingTaskAssigned:
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Chunk.UpdateProvingStatusInRange error: %w, start index: %v, end index: %v, status: %v", err, startIndex, endIndex, status.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -191,12 +191,10 @@ func GetChunkHash(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64, code
|
||||
|
||||
// BatchMetadata represents the metadata of a batch.
|
||||
type BatchMetadata struct {
|
||||
BatchHash common.Hash
|
||||
BatchDataHash common.Hash
|
||||
BatchBlobDataProof []byte
|
||||
BatchBytes []byte
|
||||
StartChunkHash common.Hash
|
||||
EndChunkHash common.Hash
|
||||
BatchHash common.Hash
|
||||
BatchBytes []byte
|
||||
StartChunkHash common.Hash
|
||||
EndChunkHash common.Hash
|
||||
}
|
||||
|
||||
// GetBatchMetadata retrieves the metadata of a batch.
|
||||
@@ -214,11 +212,9 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
|
||||
return nil, fmt.Errorf("failed to create codecv0 DA batch: %w", err)
|
||||
}
|
||||
|
||||
// BatchBlobDataProof is left as empty for codecv0.
|
||||
batchMeta := &BatchMetadata{
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchDataHash: daBatch.DataHash,
|
||||
BatchBytes: daBatch.Encode(),
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchBytes: daBatch.Encode(),
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
@@ -247,16 +243,9 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
|
||||
return nil, fmt.Errorf("failed to create codecv1 DA batch: %w", err)
|
||||
}
|
||||
|
||||
blobDataProof, err := daBatch.BlobDataProof()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv1 blob data proof: %w", err)
|
||||
}
|
||||
|
||||
batchMeta := &BatchMetadata{
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchDataHash: daBatch.DataHash,
|
||||
BatchBlobDataProof: blobDataProof,
|
||||
BatchBytes: daBatch.Encode(),
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchBytes: daBatch.Encode(),
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv1.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
|
||||
@@ -21,7 +21,6 @@ type Batch struct {
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
@@ -50,10 +49,6 @@ type Batch struct {
|
||||
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
@@ -156,7 +151,6 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: daBatch.Hash().Hex(),
|
||||
DataHash: daBatch.DataHash.Hex(),
|
||||
StartChunkHash: startDAChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endDAChunkHash.Hex(),
|
||||
@@ -169,8 +163,6 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -43,10 +43,6 @@ type Chunk struct {
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
|
||||
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
|
||||
@@ -154,8 +150,6 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
ParentChunkStateRoot: parentChunkStateRoot,
|
||||
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
Reference in New Issue
Block a user