Compare commits

..

26 Commits

Author SHA1 Message Date
colinlyguo
e929d53b0b add a new script 2024-04-15 01:16:23 +08:00
colin
e90aa04e8c Merge branch 'update-zkverifier-deployment-script' into codecv1-script 2024-04-13 01:38:34 +08:00
colin
32f0011d74 Merge branch 'develop' into codecv1-script 2024-04-13 01:37:59 +08:00
colinlyguo
f0fc344303 update-rollup-verifier-deployment-script 2024-04-12 23:30:42 +08:00
colinlyguo
be5c58a427 update scripts 2024-04-06 22:04:58 +08:00
colinlyguo
97c85209c5 update tx payload length 2024-04-06 22:00:22 +08:00
colinlyguo
48534f8698 fix 2024-04-06 17:46:55 +08:00
colinlyguo
378ec79d14 fix 2024-04-06 03:26:03 +08:00
colinlyguo
1054023dd5 fix script 2024-04-06 03:21:27 +08:00
colinlyguo
65481212d5 update script 2024-04-06 03:13:22 +08:00
colinlyguo
ba289fc651 update contract 2024-04-05 18:36:36 +08:00
colinlyguo
64ed273a1d fix 2024-04-05 03:03:24 +08:00
colinlyguo
c1059a3f51 fix 2024-04-05 00:20:01 +08:00
colin
70aa557968 Merge branch 'develop' into codecv1-script 2024-04-04 19:03:44 +08:00
colinlyguo
94a3c0a571 update sending script 2024-04-04 18:52:19 +08:00
HAOYUatHZ
4c6016f852 add relay-skipped-tx.ts 2024-04-04 17:29:15 +08:00
HAOYUatHZ
e53e150341 add ecc.sol foundry deployment scripts 2024-04-04 16:30:36 +08:00
HAOYUatHZ
5b6b170db1 add hash.sol foundry deployment scripts 2024-04-04 16:24:40 +08:00
HAOYUatHZ
44c77bcc87 add sha256 & ecc contracts 2024-04-04 16:19:39 +08:00
colinlyguo
364173a8d6 add finalizeBatch calldata script 2024-04-03 16:30:29 +08:00
colinlyguo
2afba3e394 fix scripts 2024-04-03 10:24:02 +08:00
colinlyguo
53b24f75f8 fix a typo 2024-04-03 02:51:01 +08:00
colinlyguo
de60ea8f55 add dump calldata scripts 2024-04-03 02:35:41 +08:00
colinlyguo
9ade976ce5 add large transaction payload script 2024-04-02 19:50:59 +08:00
colinlyguo
4bb9cf89aa dump full blob 2024-04-01 17:11:39 +08:00
colinlyguo
efe0d7d2fe feat: codecv1 script 2024-04-01 15:41:56 +08:00
29 changed files with 1006 additions and 2415 deletions

View File

@@ -46,7 +46,6 @@ jobs:
with:
context: .
file: ./build/dockerfiles/event_watcher.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -91,7 +90,6 @@ jobs:
with:
context: .
file: ./build/dockerfiles/gas_oracle.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -136,7 +134,6 @@ jobs:
with:
context: .
file: ./build/dockerfiles/rollup_relayer.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -181,7 +178,6 @@ jobs:
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -226,7 +222,6 @@ jobs:
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -271,7 +266,6 @@ jobs:
with:
context: .
file: ./build/dockerfiles/coordinator-api.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -316,7 +310,6 @@ jobs:
with:
context: .
file: ./build/dockerfiles/coordinator-cron.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}

View File

@@ -61,11 +61,8 @@ func (t *TestcontainerApps) StartL1GethContainer() error {
req := testcontainers.ContainerRequest{
Image: "scroll_l1geth",
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
WaitingFor: wait.ForAll(
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
),
Cmd: []string{"--log.debug", "ANY"},
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
Cmd: []string{"--log.debug", "ANY"},
}
genericContainerReq := testcontainers.GenericContainerRequest{
ContainerRequest: req,
@@ -88,10 +85,7 @@ func (t *TestcontainerApps) StartL2GethContainer() error {
req := testcontainers.ContainerRequest{
Image: "scroll_l2geth",
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
WaitingFor: wait.ForAll(
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
),
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
}
genericContainerReq := testcontainers.GenericContainerRequest{
ContainerRequest: req,

View File

@@ -0,0 +1,133 @@
package main
import (
"context"
"fmt"
"log"
"math/big"
"time"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rlp"
)
const targetTxSize = 126914
func main() {
privateKeyHex := "0000000000000000000000000000000000000000000000000000000000000042"
privateKey, err := crypto.HexToECDSA(privateKeyHex)
if err != nil {
log.Fatalf("Invalid private key: %v", err)
}
client, err := ethclient.Dial("http://localhost:9999")
if err != nil {
log.Fatalf("Failed to connect to the Ethereum client: %v", err)
}
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(222222))
if err != nil {
log.Fatalf("Failed to create transactor with chain ID 222222: %v", err)
}
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
if err != nil {
log.Fatalf("Failed to retrieve account nonce: %v", err)
}
totalTxNum := []uint64{2, 3, 4, 5, 6}
for _, num := range totalTxNum {
prepareAndSendTransactions(client, auth, nonce, num)
nonce += num
}
}
func prepareAndSendTransactions(client *ethclient.Client, auth *bind.TransactOpts, initialNonce uint64, totalTxNum uint64) error {
gasLimit := uint64(5000000)
gasPrice := big.NewInt(1000000000)
var signedTxs []*types.Transaction
payloadSum := 0
dataPayload := make([]byte, targetTxSize/totalTxNum)
for i := range dataPayload {
dataPayload[i] = 0xff
}
for i := uint64(0); i < totalTxNum-1; i++ {
txData := &types.LegacyTx{
Nonce: initialNonce + i,
GasPrice: gasPrice,
Gas: gasLimit,
To: &auth.From,
Data: dataPayload,
}
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
if err != nil {
log.Fatalf("Failed to sign tx: %v", err)
}
rlpTxData, err := rlp.EncodeToBytes(signedTx)
if err != nil {
log.Fatalf("Failed to RLP encode the tx: %v", err)
}
payloadSum += len(rlpTxData)
signedTxs = append(signedTxs, signedTx)
}
fmt.Println("payload sum", payloadSum)
lowerBound := 0
upperBound := targetTxSize
for lowerBound <= upperBound {
mid := (lowerBound + upperBound) / 2
data := make([]byte, mid)
for i := range data {
data[i] = 0xff
}
txData := &types.LegacyTx{
Nonce: initialNonce + totalTxNum - 1,
GasPrice: gasPrice,
Gas: gasLimit,
To: &auth.From,
Data: data,
}
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
if err != nil {
log.Fatalf("Failed to sign tx: %v", err)
}
rlpTxData, err := rlp.EncodeToBytes(signedTx)
if err != nil {
log.Fatalf("Failed to RLP encode the tx: %v", err)
}
txSize := len(rlpTxData)
if payloadSum+txSize < targetTxSize {
lowerBound = mid + 1
} else if payloadSum+txSize > targetTxSize {
upperBound = mid - 1
} else {
fmt.Println("payloadSum+txSize", payloadSum+txSize)
signedTxs = append(signedTxs, signedTx)
break
}
}
for _, signedTx := range signedTxs {
if err := client.SendTransaction(context.Background(), signedTx); err != nil {
return fmt.Errorf("failed to send transaction: %v", err)
}
fmt.Printf("Transaction with nonce %d sent\n", signedTx.Nonce())
time.Sleep(10 * time.Second)
}
return nil
}

View File

@@ -0,0 +1,131 @@
package main
import (
"context"
"fmt"
"log"
"math/big"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rlp"
)
const targetTxSize = 120568
func main() {
privateKeyHex := "0000000000000000000000000000000000000000000000000000000000000042"
privateKey, err := crypto.HexToECDSA(privateKeyHex)
if err != nil {
log.Fatalf("Invalid private key: %v", err)
}
client, err := ethclient.Dial("http://localhost:9999")
if err != nil {
log.Fatalf("Failed to connect to the Ethereum client: %v", err)
}
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(222222))
if err != nil {
log.Fatalf("Failed to create transactor with chain ID 222222: %v", err)
}
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
if err != nil {
log.Fatalf("Failed to retrieve account nonce: %v", err)
}
prepareAndSendTransactions(client, auth, nonce, 1)
prepareAndSendTransactions(client, auth, nonce+1, 2)
prepareAndSendTransactions(client, auth, nonce+1+2, 3)
prepareAndSendTransactions(client, auth, nonce+1+2+3, 4)
prepareAndSendTransactions(client, auth, nonce+1+2+3+4, 5)
prepareAndSendTransactions(client, auth, nonce+1+2+3+4+5, 6)
}
func prepareAndSendTransactions(client *ethclient.Client, auth *bind.TransactOpts, initialNonce uint64, totalTxNum uint64) error {
gasLimit := uint64(5000000)
gasPrice := big.NewInt(1000000000)
var signedTxs []*types.Transaction
payloadSum := 0
dataPayload := make([]byte, targetTxSize/totalTxNum)
for i := range dataPayload {
dataPayload[i] = 0xff
}
for i := uint64(0); i < totalTxNum-1; i++ {
txData := &types.LegacyTx{
Nonce: initialNonce + i,
GasPrice: gasPrice,
Gas: gasLimit,
To: &auth.From,
Data: dataPayload,
}
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
if err != nil {
log.Fatalf("Failed to sign tx: %v", err)
}
rlpTxData, err := rlp.EncodeToBytes(signedTx)
if err != nil {
log.Fatalf("Failed to RLP encode the tx: %v", err)
}
payloadSum += len(rlpTxData)
signedTxs = append(signedTxs, signedTx)
}
fmt.Println("payload sum", payloadSum)
lowerBound := 0
upperBound := targetTxSize
for lowerBound <= upperBound {
mid := (lowerBound + upperBound) / 2
data := make([]byte, mid)
for i := range data {
data[i] = 0xff
}
txData := &types.LegacyTx{
Nonce: initialNonce + totalTxNum - 1,
GasPrice: gasPrice,
Gas: gasLimit,
To: &auth.From,
Data: data,
}
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
if err != nil {
log.Fatalf("Failed to sign tx: %v", err)
}
rlpTxData, err := rlp.EncodeToBytes(signedTx)
if err != nil {
log.Fatalf("Failed to RLP encode the tx: %v", err)
}
txSize := len(rlpTxData)
if payloadSum+txSize < targetTxSize {
lowerBound = mid + 1
} else if payloadSum+txSize > targetTxSize {
upperBound = mid - 1
} else {
fmt.Println("payloadSum+txSize", payloadSum+txSize)
signedTxs = append(signedTxs, signedTx)
break
}
}
for i := len(signedTxs) - 1; i >= 0; i-- {
if err := client.SendTransaction(context.Background(), signedTxs[i]); err != nil {
return fmt.Errorf("failed to send transaction: %v", err)
}
fmt.Printf("Transaction with nonce %d sent\n", signedTxs[i].Nonce())
}
return nil
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.3.93"
var tag = "v4.3.92"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -0,0 +1,23 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity =0.8.24;
import {Script} from "forge-std/Script.sol";
import {console} from "forge-std/console.sol";
import {Ecc} from "../../src/misc/ecc.sol";
contract DeployEcc is Script {
function run() external {
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
Ecc ecc = new Ecc();
address L2_ECC_ADDR = address(ecc);
vm.stopBroadcast();
logAddress("L2_ECC_ADDR", L2_ECC_ADDR);
}
function logAddress(string memory name, address addr) internal view {
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
}
}

View File

@@ -0,0 +1,23 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity =0.8.24;
import {Script} from "forge-std/Script.sol";
import {console} from "forge-std/console.sol";
import {Hash} from "../../src/misc/hash.sol";
contract DeployHash is Script {
function run() external {
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
Hash hash = new Hash();
address L2_HASH_ADDR = address(hash);
vm.stopBroadcast();
logAddress("L2_HASH_ADDR", L2_HASH_ADDR);
}
function logAddress(string memory name, address addr) internal view {
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
}
}

View File

@@ -92,10 +92,12 @@ contract DeployL1BridgeContracts is Script {
}
function deployMultipleVersionRollupVerifier() internal {
uint256[] memory _versions = new uint256[](1);
address[] memory _verifiers = new address[](1);
uint256[] memory _versions = new uint256[](2);
address[] memory _verifiers = new address[](2);
_versions[0] = 0;
_verifiers[0] = address(zkEvmVerifierV1);
_versions[1] = 1;
_verifiers[1] = address(zkEvmVerifierV1);
rollupVerifier = new MultipleVersionRollupVerifier(L1_SCROLL_CHAIN_PROXY_ADDR, _versions, _verifiers);
logAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR", address(rollupVerifier));

View File

@@ -0,0 +1,34 @@
/* eslint-disable node/no-missing-import */
import * as dotenv from "dotenv";
import { ethers } from "hardhat";
dotenv.config();
async function main() {
const [deployer] = await ethers.getSigners();
const l1ScrollMessengerAddress = process.env.L1_SCROLL_MESSENGER_PROXY_ADDR!;
const l2EccContractAddress = process.env.L2_ECC_ADDR!;
const payload = process.env.SKIPPED_TX_PAYLOAD!; // TODO: calc the payload, parse as bytes
const L1ScrollMessenger = await ethers.getContractAt("L1ScrollMessenger", l1ScrollMessengerAddress, deployer);
const tx = await L1ScrollMessenger.sendMessage(
l2EccContractAddress, // address _to
0, // uint256 _value
payload, // bytes memory _message
100000000 // uint256 _gasLimit
);
console.log(`calling ${l2EccContractAddress} with payload from l1, hash:`, tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
}
// We recommend this pattern to be able to use async/await everywhere
// and properly handle errors.
main().catch((error) => {
console.error(error);
process.exitCode = 1;
});

127
contracts/src/misc/ecc.sol Normal file
View File

@@ -0,0 +1,127 @@
// SPDX-License-Identifier: GPL-3.0
pragma solidity =0.8.24;
contract Ecc {
/* ECC Functions */
// https://etherscan.io/address/0x41bf00f080ed41fa86201eac56b8afb170d9e36d#code
function ecAdd(uint256[2] memory p0, uint256[2] memory p1) public view returns (uint256[2] memory retP) {
uint256[4] memory i = [p0[0], p0[1], p1[0], p1[1]];
assembly {
// call ecadd precompile
// inputs are: x1, y1, x2, y2
if iszero(staticcall(not(0), 0x06, i, 0x80, retP, 0x40)) {
revert(0, 0)
}
}
}
// https://etherscan.io/address/0x41bf00f080ed41fa86201eac56b8afb170d9e36d#code
function ecMul(uint256[2] memory p, uint256 s) public view returns (uint256[2] memory retP) {
// With a public key (x, y), this computes p = scalar * (x, y).
uint256[3] memory i = [p[0], p[1], s];
assembly {
// call ecmul precompile
// inputs are: x, y, scalar
if iszero(staticcall(not(0), 0x07, i, 0x60, retP, 0x40)) {
revert(0, 0)
}
}
}
// scroll-tech/scroll/contracts/src/libraries/verifier/RollupVerifier.sol
struct G1Point {
uint256 x;
uint256 y;
}
struct G2Point {
uint256[2] x;
uint256[2] y;
}
function ecPairing(G1Point[] memory p1, G2Point[] memory p2) internal view returns (bool) {
uint256 length = p1.length * 6;
uint256[] memory input = new uint256[](length);
uint256[1] memory result;
bool ret;
require(p1.length == p2.length);
for (uint256 i = 0; i < p1.length; i++) {
input[0 + i * 6] = p1[i].x;
input[1 + i * 6] = p1[i].y;
input[2 + i * 6] = p2[i].x[0];
input[3 + i * 6] = p2[i].x[1];
input[4 + i * 6] = p2[i].y[0];
input[5 + i * 6] = p2[i].y[1];
}
assembly {
ret := staticcall(gas(), 8, add(input, 0x20), mul(length, 0x20), result, 0x20)
}
require(ret);
return result[0] != 0;
}
/* Bench */
function ecAdds(uint256 n) public {
uint256[2] memory p0;
p0[0] = 1;
p0[1] = 2;
uint256[2] memory p1;
p1[0] = 1;
p1[1] = 2;
for (uint256 i = 0; i < n; i++) {
ecAdd(p0, p1);
}
}
function ecMuls(uint256 n) public {
uint256[2] memory p0;
p0[0] = 1;
p0[1] = 2;
for (uint256 i = 0; i < n; i++) {
ecMul(p0, 3);
}
}
function ecPairings(uint256 n) public {
G1Point[] memory g1_points = new G1Point[](2);
G2Point[] memory g2_points = new G2Point[](2);
g1_points[0].x = 0x0000000000000000000000000000000000000000000000000000000000000001;
g1_points[0].y = 0x0000000000000000000000000000000000000000000000000000000000000002;
g2_points[0].x[1] = 0x1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed;
g2_points[0].x[0] = 0x198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c2;
g2_points[0].y[1] = 0x12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa;
g2_points[0].y[0] = 0x090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b;
g1_points[1].x = 0x1aa125a22bd902874034e67868aed40267e5575d5919677987e3bc6dd42a32fe;
g1_points[1].y = 0x1bacc186725464068956d9a191455c2d6f6db282d83645c610510d8d4efbaee0;
g2_points[1].x[1] = 0x1b7734c80605f71f1e2de61e998ce5854ff2abebb76537c3d67e50d71422a852;
g2_points[1].x[0] = 0x10d5a1e34b2388a5ebe266033a5e0e63c89084203784da0c6bd9b052a78a2cac;
g2_points[1].y[1] = 0x275739c5c2cdbc72e37c689e2ab441ea76c1d284b9c46ae8f5c42ead937819e1;
g2_points[1].y[0] = 0x018de34c5b7c3d3d75428bbe050f1449ea3d9961d563291f307a1874f7332e65;
for (uint256 i = 0; i < n; i++) {
ecPairing(g1_points, g2_points);
// bool checked = false;
// checked = ecPairing(g1_points, g2_points);
// require(checked);
}
}
// https://github.com/OpenZeppelin/openzeppelin-contracts/blob/8a0b7bed82d6b8053872c3fd40703efd58f5699d/test/utils/cryptography/ECDSA.test.js#L230
function ecRecovers(uint256 n) public {
bytes32 hash = 0xb94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9;
bytes32 r = 0xe742ff452d41413616a5bf43fe15dd88294e983d3d36206c2712f39083d638bd;
uint8 v = 0x1b;
bytes32 s = 0xe0a0fc89be718fbc1033e1d30d78be1c68081562ed2e97af876f286f3453231d;
for (uint256 i = 0; i < n; i++) {
ecrecover(hash, v, r, s);
}
}
}

View File

@@ -0,0 +1,34 @@
// SPDX-License-Identifier: GPL-3.0
pragma solidity =0.8.24;
contract Hash {
function sha256(bytes memory input) public view returns (bytes memory out) {
(bool ok, bytes memory out) = address(2).staticcall(input);
require(ok);
}
function sha256Yul(bytes memory input) public view returns (bytes memory out) {
assembly {
// mstore(0, input)
if iszero(staticcall(gas(), 2, 0, 32, 0, 32)) {
revert(0, 0)
}
// return(0, 32)
}
}
function sha256s(bytes memory input, uint256 n) public {
for (uint256 i = 0; i < n; i++) {
sha256(input);
}
}
function keccak256s(uint256 n) public {
bytes32[] memory output = new bytes32[](n);
for (uint256 i = 0; i < n; i++) {
bytes memory input = abi.encode(i);
output[i] = keccak256(input);
}
}
}

View File

@@ -38,7 +38,7 @@ make lint
## Configure
The coordinator behavior can be configured using [`conf/config.json`](conf/config.json). Check the code comments under `ProverManager` in [`internal/config/config.go`](internal/config/config.go) for more details.
The coordinator behavior can be configured using [`config.json`](config.json). Check the code comments under `ProverManager` in [`config/config.go`](config/config.go) for more details.
## Start

View File

@@ -75,18 +75,6 @@ func (*Batch) TableName() string {
return "batch"
}
// GetBatchHashByIndex retrieves the hash of the batch given its index.
// used for the script.
func (o *Batch) GetBatchHashByIndex(ctx context.Context, index uint64) (string, error) {
db := o.db.WithContext(ctx)
var batch Batch
err := db.Select("hash").Where("index = ?", index).First(&batch).Error
if err != nil {
return "", fmt.Errorf("Batch.GetBatchHashByIndex error: %w, index: %v", err, index)
}
return batch.Hash, nil
}
// GetUnassignedBatch retrieves unassigned batch based on the specified limit.
// The returned batch are sorted in ascending order by their index.
func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1,121 +0,0 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/orm"
)
func main() {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
glogger.Verbosity(log.LvlInfo)
log.Root().SetHandler(glogger)
if len(os.Args) < 2 {
log.Crit("no batch index range provided")
return
}
indexRange := os.Args[1]
indices := strings.Split(indexRange, "-")
if len(indices) != 2 {
log.Crit("invalid batch index range format. Use start-end", "providedRange", indexRange)
return
}
startIndex, err := strconv.Atoi(indices[0])
endIndex, err2 := strconv.Atoi(indices[1])
if err != nil || err2 != nil || startIndex > endIndex {
log.Crit("invalid batch index range", "start", indices[0], "end", indices[1], "err", err, "err2", err2)
return
}
db, err := database.InitDB(&database.Config{
DriverName: "postgres",
DSN: os.Getenv("DB_DSN"),
MaxOpenNum: 200,
MaxIdleNum: 20,
})
if err != nil {
log.Crit("failed to init db", "err", err)
}
defer func() {
if deferErr := database.CloseDB(db); deferErr != nil {
log.Error("failed to close db", "err", err)
}
}()
for i := startIndex; i <= endIndex; i++ {
batchIndex := uint64(i)
resultBytes, err := getBatchTask(db, batchIndex)
if err != nil {
log.Crit("failed to get batch task", "batchIndex", batchIndex, "err", err)
continue
}
outputFilename := fmt.Sprintf("batch_task_%d.json", batchIndex)
if err = os.WriteFile(outputFilename, resultBytes, 0644); err != nil {
log.Crit("failed to write output file", "filename", outputFilename, "err", err)
}
}
}
func getBatchTask(db *gorm.DB, batchIndex uint64) ([]byte, error) {
batchHash, err := orm.NewBatch(db).GetBatchHashByIndex(context.Background(), batchIndex)
if err != nil {
err = fmt.Errorf("failed to get batch hash by index: %d err: %w ", batchIndex, err)
return nil, err
}
chunks, err := orm.NewChunk(db).GetChunksByBatchHash(context.Background(), batchHash)
if err != nil {
err = fmt.Errorf("failed to get chunk proofs for batch task id: %s err: %w ", batchHash, err)
return nil, err
}
var chunkProofs []*message.ChunkProof
var chunkInfos []*message.ChunkInfo
for _, chunk := range chunks {
var proof message.ChunkProof
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, batchHash, chunk.Hash)
}
chunkProofs = append(chunkProofs, &proof)
chunkInfo := message.ChunkInfo{
ChainID: 534351, // sepolia
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
PostStateRoot: common.HexToHash(chunk.StateRoot),
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
DataHash: common.HexToHash(chunk.Hash),
IsPadding: false,
}
if proof.ChunkInfo != nil {
chunkInfo.TxBytes = proof.ChunkInfo.TxBytes
}
chunkInfos = append(chunkInfos, &chunkInfo)
}
taskDetail := message.BatchTaskDetail{
ChunkInfos: chunkInfos,
ChunkProofs: chunkProofs,
}
chunkProofsBytes, err := json.MarshalIndent(taskDetail, "", " ")
if err != nil {
return nil, fmt.Errorf("failed to marshal chunk proofs, taskID:%s err:%w", batchHash, err)
}
return chunkProofsBytes, nil
}

View File

@@ -24,7 +24,7 @@ rollup_relayer: ## Builds the rollup_relayer bin
test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
lint: mock_abi ## Lint the files - used for CI
lint: ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
clean: ## Empty out the bin folder

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,103 @@
package main
import (
"context"
"encoding/hex"
"os"
"strconv"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/database"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv1"
"scroll-tech/rollup/internal/orm"
)
func main() {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
glogger.Verbosity(log.LvlInfo)
log.Root().SetHandler(glogger)
if len(os.Args) < 2 {
log.Crit("no batch index provided")
return
}
batchIndexStr := os.Args[1]
batchIndexInt, err := strconv.Atoi(batchIndexStr)
if err != nil || batchIndexInt <= 0 {
log.Crit("invalid batch index", "indexStr", batchIndexStr, "err", err)
return
}
batchIndex := uint64(batchIndexInt)
db, err := database.InitDB(&database.Config{
DriverName: "postgres",
DSN: os.Getenv("DB_DSN"),
MaxOpenNum: 200,
MaxIdleNum: 20,
})
if err != nil {
log.Crit("failed to init db", "err", err)
}
defer func() {
if deferErr := database.CloseDB(db); deferErr != nil {
log.Error("failed to close db", "err", err)
}
}()
l2BlockOrm := orm.NewL2Block(db)
chunkOrm := orm.NewChunk(db)
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.GetBatchByIndex(context.Background(), batchIndex)
if err != nil {
log.Crit("failed to get batch", "index", batchIndex, "err", err)
return
}
dbParentBatch, err := batchOrm.GetBatchByIndex(context.Background(), batchIndex-1)
if err != nil {
log.Crit("failed to get batch", "index", batchIndex-1, "err", err)
return
}
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
if err != nil {
log.Crit("failed to fetch chunks", "err", err)
return
}
chunks := make([]*encoding.Chunk, len(dbChunks))
for i, c := range dbChunks {
blocks, err := l2BlockOrm.GetL2BlocksInRange(context.Background(), c.StartBlockNumber, c.EndBlockNumber)
if err != nil {
log.Crit("failed to fetch blocks", "err", err)
return
}
chunks[i] = &encoding.Chunk{Blocks: blocks}
}
batch := &encoding.Batch{
Index: dbBatch.Index,
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
Chunks: chunks,
}
daBatch, err := codecv1.NewDABatch(batch)
if err != nil {
log.Crit("failed to create DA batch", "err", err)
return
}
blobDataProof, err := daBatch.BlobDataProof()
if err != nil {
log.Crit("failed to get blob data proof", "err", err)
return
}
log.Info("batchMeta", "batchHash", daBatch.Hash().Hex(), "batchDataHash", daBatch.DataHash.Hex(), "blobDataProof", hex.EncodeToString(blobDataProof), "blobData", hex.EncodeToString(daBatch.Blob()[:]))
}

View File

@@ -578,7 +578,7 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error
return err
}
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String())
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash)
// record and sync with db, @todo handle db error
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, dbBatch.Hash, txHash.String(), types.RollupFinalizing); err != nil {