mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
1 Commits
v4.1.62
...
batch-task
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a79cbbe5a1 |
37
.github/scripts/bump_version_dot_go.mjs
vendored
37
.github/scripts/bump_version_dot_go.mjs
vendored
@@ -1,37 +0,0 @@
|
||||
import { URL } from "url";
|
||||
import { readFileSync, writeFileSync } from "fs";
|
||||
|
||||
const versionFilePath = new URL(
|
||||
"../../common/version/version.go",
|
||||
import.meta.url
|
||||
).pathname;
|
||||
|
||||
const versionFileContent = readFileSync(versionFilePath, { encoding: "utf-8" });
|
||||
|
||||
const currentVersion = versionFileContent.match(
|
||||
/var tag = "(?<version>v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+))"/
|
||||
);
|
||||
|
||||
try {
|
||||
parseInt(currentVersion.groups.major);
|
||||
parseInt(currentVersion.groups.minor);
|
||||
parseInt(currentVersion.groups.patch);
|
||||
} catch (err) {
|
||||
console.error(new Error("Failed to parse version in version.go file"));
|
||||
throw err;
|
||||
}
|
||||
|
||||
// prettier-ignore
|
||||
const newVersion = `v${currentVersion.groups.major}.${currentVersion.groups.minor}.${parseInt(currentVersion.groups.patch) + 1}`;
|
||||
|
||||
console.log(
|
||||
`Bump version from ${currentVersion.groups.version} to ${newVersion}`
|
||||
);
|
||||
|
||||
writeFileSync(
|
||||
versionFilePath,
|
||||
versionFileContent.replace(
|
||||
`var tag = "${currentVersion.groups.version}"`,
|
||||
`var tag = "${newVersion}"`
|
||||
)
|
||||
);
|
||||
55
.github/workflows/bump_version.yml
vendored
55
.github/workflows/bump_version.yml
vendored
@@ -1,55 +0,0 @@
|
||||
name: Bump Version
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [develop]
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
|
||||
jobs:
|
||||
try-to-bump:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
- name: check diff
|
||||
id: check_diff
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# fetch develop branch so that we can diff against later
|
||||
git fetch origin develop
|
||||
|
||||
echo 'checking verion changes in diff...'
|
||||
|
||||
# check if version changed in version.go
|
||||
# note: the grep will fail if use \d instead of [0-9]
|
||||
git diff HEAD..origin/develop --text --no-ext-diff --unified=0 --no-prefix common/version/version.go | grep -E '^\+var tag = "v[0-9]+\.[0-9]+\.[0-9]+"$' && true
|
||||
|
||||
exit_code=$?
|
||||
|
||||
# auto bump if version is not bumped manually
|
||||
echo '> require auto version bump?'
|
||||
|
||||
if [ $exit_code -eq 0 ]; then
|
||||
echo '> no, already bumped'
|
||||
echo "result=no-bump" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo '> yes'
|
||||
echo "result=bump" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
- name: Install Node.js 16
|
||||
if: steps.check_diff.outputs.result == 'bump'
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
- name: bump version in common/version/version.go
|
||||
if: steps.check_diff.outputs.result == 'bump'
|
||||
run: node .github/scripts/bump_version_dot_go.mjs
|
||||
- uses: stefanzweifel/git-auto-commit-action@3ea6ae190baf489ba007f7c92608f33ce20ef04a
|
||||
if: steps.check_diff.outputs.result == 'bump'
|
||||
with:
|
||||
commit_message: "chore: auto version bump [bot]"
|
||||
@@ -110,7 +110,7 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches)
|
||||
|
||||
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)
|
||||
go utils.Loop(subCtx, 10*time.Second, l2relayer.ProcessCommittedBatches)
|
||||
|
||||
// Finish start all rollup relayer functions.
|
||||
log.Info("Start rollup-relayer successfully")
|
||||
|
||||
@@ -320,7 +320,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
|
||||
func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// get pending batches from database in ascending order by their index.
|
||||
pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 1)
|
||||
pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 10)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches", "err", err)
|
||||
return
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -12,7 +13,6 @@ import (
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
@@ -91,22 +91,20 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.NotEmpty(t, batch.CommitTxHash)
|
||||
assert.Equal(t, types.RollupCommitting, types.RollupStatus(batch.RollupStatus))
|
||||
|
||||
success := utils.TryTimes(30, func() bool {
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash))
|
||||
return err == nil && receipt.Status == 1
|
||||
})
|
||||
assert.True(t, success)
|
||||
assert.NoError(t, err)
|
||||
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(batch.CommitTxHash))
|
||||
assert.NoError(t, err)
|
||||
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(commitTxReceipt.Logs), 1)
|
||||
|
||||
// fetch rollup events
|
||||
success = utils.TryTimes(30, func() bool {
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0]
|
||||
})
|
||||
assert.True(t, success)
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupCommitted, statuses[0])
|
||||
|
||||
// add dummy proof
|
||||
proof := &message.BatchProof{
|
||||
@@ -120,7 +118,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
@@ -130,20 +128,17 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, batch.FinalizeTxHash)
|
||||
|
||||
success = utils.TryTimes(30, func() bool {
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
return err == nil && receipt.Status == 1
|
||||
})
|
||||
assert.True(t, success)
|
||||
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
assert.NoError(t, err)
|
||||
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
|
||||
|
||||
// fetch rollup events
|
||||
success = utils.TryTimes(30, func() bool {
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0]
|
||||
})
|
||||
assert.True(t, success)
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalized, statuses[0])
|
||||
}
|
||||
|
||||
4
common/libzkp/impl/Cargo.lock
generated
4
common/libzkp/impl/Cargo.lock
generated
@@ -2755,7 +2755,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "prover"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.18#fcf4bf3137ad37becdeb5360b23ac978405c6b2c"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.16#bd901762c4744936586f4a30e27a4b65cea3bb71"
|
||||
dependencies = [
|
||||
"aggregator",
|
||||
"anyhow",
|
||||
@@ -4040,7 +4040,7 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
|
||||
[[package]]
|
||||
name = "types"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.18#fcf4bf3137ad37becdeb5360b23ac978405c6b2c"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.16#bd901762c4744936586f4a30e27a4b65cea3bb71"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"blake2",
|
||||
|
||||
@@ -20,8 +20,8 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
|
||||
|
||||
[dependencies]
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.18" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.18" }
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.16" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.16" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
|
||||
log = "0.4"
|
||||
|
||||
@@ -250,14 +250,12 @@ type ChunkInfo struct {
|
||||
|
||||
// ChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type ChunkProof struct {
|
||||
StorageTrace []byte `json:"storage_trace"`
|
||||
Protocol []byte `json:"protocol"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
StorageTrace []byte `json:"storage_trace"`
|
||||
Protocol []byte `json:"protocol"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"` // cross-reference between cooridinator computation and prover compution
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
@@ -265,8 +263,6 @@ type BatchProof struct {
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// SanityCheck checks whether an BatchProof is in a legal format
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestProofDetailHash(t *testing.T) {
|
||||
}
|
||||
hash, err := proofDetail.Hash()
|
||||
assert.NoError(t, err)
|
||||
expectedHash := "d3b57cb84b0da8043373eeb3612806fb7248d6d1b6e089846ccf3ccce2d9f31c"
|
||||
expectedHash := "72a00232c1fcb100b1b67e6d12cd449e5d2d890e3a66e50f4c23499d4990766f"
|
||||
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var tag = "v4.1.62"
|
||||
var tag = "v4.1.46"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -1,130 +0,0 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import {AccessControlEnumerable} from "@openzeppelin/contracts/access/AccessControlEnumerable.sol";
|
||||
import {EnumerableSet} from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol";
|
||||
|
||||
// solhint-disable no-empty-blocks
|
||||
|
||||
contract ScrollOwner is AccessControlEnumerable {
|
||||
using EnumerableSet for EnumerableSet.Bytes32Set;
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
|
||||
/// @notice Mapping from target address to selector to the list of accessible roles.
|
||||
mapping(address => mapping(bytes4 => EnumerableSet.Bytes32Set)) private targetAccess;
|
||||
|
||||
/**********************
|
||||
* Function Modifiers *
|
||||
**********************/
|
||||
|
||||
modifier hasAccess(
|
||||
address _target,
|
||||
bytes4 _selector,
|
||||
bytes32 _role
|
||||
) {
|
||||
// admin has access to all methods
|
||||
require(_role == DEFAULT_ADMIN_ROLE || targetAccess[_target][_selector].contains(_role), "no access");
|
||||
_;
|
||||
}
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
constructor() {
|
||||
_grantRole(DEFAULT_ADMIN_ROLE, msg.sender);
|
||||
}
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice Return a list of roles which has access to the function.
|
||||
/// @param _target The address of target contract.
|
||||
/// @param _selector The function selector to query.
|
||||
/// @return _roles The list of roles.
|
||||
function callableRoles(address _target, bytes4 _selector) external view returns (bytes32[] memory _roles) {
|
||||
EnumerableSet.Bytes32Set storage _lists = targetAccess[_target][_selector];
|
||||
_roles = new bytes32[](_lists.length());
|
||||
for (uint256 i = 0; i < _roles.length; i++) {
|
||||
_roles[i] = _lists.at(i);
|
||||
}
|
||||
}
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @notice Perform a function call from arbitrary role.
|
||||
/// @param _target The address of target contract.
|
||||
/// @param _value The value passing to target contract.
|
||||
/// @param _data The calldata passing to target contract.
|
||||
/// @param _role The expected role of the caller.
|
||||
function execute(
|
||||
address _target,
|
||||
uint256 _value,
|
||||
bytes calldata _data,
|
||||
bytes32 _role
|
||||
) public payable onlyRole(_role) hasAccess(_target, bytes4(_data[0:4]), _role) {
|
||||
_execute(_target, _value, _data);
|
||||
}
|
||||
|
||||
// allow others to send ether to this contract.
|
||||
receive() external payable {}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Update the access to target contract.
|
||||
/// @param _target The address of target contract.
|
||||
/// @param _selectors The list of function selectors to update.
|
||||
/// @param _role The role to change.
|
||||
/// @param _status True if we are going to add the role, otherwise remove the role.
|
||||
function updateAccess(
|
||||
address _target,
|
||||
bytes4[] memory _selectors,
|
||||
bytes32 _role,
|
||||
bool _status
|
||||
) external onlyRole(DEFAULT_ADMIN_ROLE) {
|
||||
if (_status) {
|
||||
for (uint256 i = 0; i < _selectors.length; i++) {
|
||||
targetAccess[_target][_selectors[i]].add(_role);
|
||||
}
|
||||
} else {
|
||||
for (uint256 i = 0; i < _selectors.length; i++) {
|
||||
targetAccess[_target][_selectors[i]].remove(_role);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
/// @dev Internal function to call contract. If the call reverted, the error will be popped up.
|
||||
/// @param _target The address of target contract.
|
||||
/// @param _value The value passing to target contract.
|
||||
/// @param _data The calldata passing to target contract.
|
||||
function _execute(
|
||||
address _target,
|
||||
uint256 _value,
|
||||
bytes calldata _data
|
||||
) internal {
|
||||
// solhint-disable-next-line avoid-low-level-calls
|
||||
(bool success, ) = address(_target).call{value: _value}(_data);
|
||||
if (!success) {
|
||||
// solhint-disable-next-line no-inline-assembly
|
||||
assembly {
|
||||
let ptr := mload(0x40)
|
||||
let size := returndatasize()
|
||||
returndatacopy(ptr, 0, size)
|
||||
revert(ptr, size)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
|
||||
|
||||
import {ScrollOwner} from "../misc/ScrollOwner.sol";
|
||||
|
||||
contract ScrollOwnerTest is DSTestPlus {
|
||||
event Call();
|
||||
|
||||
ScrollOwner private owner;
|
||||
|
||||
function setUp() public {
|
||||
owner = new ScrollOwner();
|
||||
}
|
||||
|
||||
function testUpdateAccess() external {
|
||||
// not admin, evert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert(
|
||||
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
);
|
||||
owner.updateAccess(address(0), new bytes4[](0), bytes32(0), true);
|
||||
hevm.stopPrank();
|
||||
|
||||
bytes4[] memory _selectors;
|
||||
bytes32[] memory _roles;
|
||||
|
||||
// add access then remove access
|
||||
_roles = owner.callableRoles(address(this), ScrollOwnerTest.revertOnCall.selector);
|
||||
assertEq(0, _roles.length);
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = ScrollOwnerTest.revertOnCall.selector;
|
||||
owner.updateAccess(address(this), _selectors, bytes32(uint256(1)), true);
|
||||
_roles = owner.callableRoles(address(this), ScrollOwnerTest.revertOnCall.selector);
|
||||
assertEq(1, _roles.length);
|
||||
assertEq(_roles[0], bytes32(uint256(1)));
|
||||
owner.updateAccess(address(this), _selectors, bytes32(uint256(1)), false);
|
||||
_roles = owner.callableRoles(address(this), ScrollOwnerTest.revertOnCall.selector);
|
||||
assertEq(0, _roles.length);
|
||||
}
|
||||
|
||||
function testAdminExecute() external {
|
||||
// call with revert
|
||||
hevm.expectRevert("Called");
|
||||
owner.execute(address(this), 0, abi.encodeWithSelector(ScrollOwnerTest.revertOnCall.selector), bytes32(0));
|
||||
|
||||
// call with emit
|
||||
hevm.expectEmit(false, false, false, true);
|
||||
emit Call();
|
||||
owner.execute(address(this), 0, abi.encodeWithSelector(ScrollOwnerTest.emitOnCall.selector), bytes32(0));
|
||||
}
|
||||
|
||||
function testExecute(bytes32 _role) external {
|
||||
hevm.assume(_role != bytes32(0));
|
||||
|
||||
bytes4[] memory _selectors = new bytes4[](2);
|
||||
_selectors[0] = ScrollOwnerTest.revertOnCall.selector;
|
||||
_selectors[1] = ScrollOwnerTest.emitOnCall.selector;
|
||||
|
||||
owner.grantRole(_role, address(this));
|
||||
|
||||
// no access, revert
|
||||
hevm.expectRevert("no access");
|
||||
owner.execute(address(this), 0, abi.encodeWithSelector(ScrollOwnerTest.revertOnCall.selector), _role);
|
||||
|
||||
owner.updateAccess(address(this), _selectors, _role, true);
|
||||
|
||||
// call with revert
|
||||
hevm.expectRevert("Called");
|
||||
owner.execute(address(this), 0, abi.encodeWithSelector(ScrollOwnerTest.revertOnCall.selector), _role);
|
||||
|
||||
// call with emit
|
||||
hevm.expectEmit(false, false, false, true);
|
||||
emit Call();
|
||||
owner.execute(address(this), 0, abi.encodeWithSelector(ScrollOwnerTest.emitOnCall.selector), _role);
|
||||
}
|
||||
|
||||
function revertOnCall() external pure {
|
||||
revert("Called");
|
||||
}
|
||||
|
||||
function emitOnCall() external {
|
||||
emit Call();
|
||||
}
|
||||
}
|
||||
@@ -71,11 +71,6 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
apiSrv := apiServer(ctx, cfg, db, registry)
|
||||
|
||||
log.Info(
|
||||
"coordinator start successfully",
|
||||
"version", version.Version,
|
||||
)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
@@ -81,12 +81,11 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
}
|
||||
// Reset prover manager config for manager test cases.
|
||||
cfg.ProverManager = &coordinatorConfig.ProverManager{
|
||||
ProversPerSession: 1,
|
||||
Verifier: &coordinatorConfig.VerifierConfig{MockMode: true},
|
||||
BatchCollectionTimeSec: 60,
|
||||
ChunkCollectionTimeSec: 60,
|
||||
SessionAttempts: 10,
|
||||
MaxVerifierWorkers: 4,
|
||||
ProversPerSession: 1,
|
||||
Verifier: &coordinatorConfig.VerifierConfig{MockMode: true},
|
||||
CollectionTimeSec: 60,
|
||||
SessionAttempts: 10,
|
||||
MaxVerifierWorkers: 4,
|
||||
}
|
||||
cfg.DB.DSN = base.DBImg.Endpoint()
|
||||
cfg.L2.ChainID = 111
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 5,
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"mock_mode": true,
|
||||
"params_path": "",
|
||||
|
||||
@@ -17,10 +17,8 @@ type ProverManager struct {
|
||||
SessionAttempts uint8 `json:"session_attempts"`
|
||||
// Zk verifier config.
|
||||
Verifier *VerifierConfig `json:"verifier"`
|
||||
// BatchCollectionTimeSec batch Proof collection time (in seconds).
|
||||
BatchCollectionTimeSec int `json:"batch_collection_time_sec"`
|
||||
// ChunkCollectionTimeSec chunk Proof collection time (in seconds).
|
||||
ChunkCollectionTimeSec int `json:"chunk_collection_time_sec"`
|
||||
// Proof collection time (in seconds).
|
||||
CollectionTimeSec int `json:"collection_time_sec"`
|
||||
// Max number of workers in verifier worker pool
|
||||
MaxVerifierWorkers int `json:"max_verifier_workers"`
|
||||
}
|
||||
|
||||
@@ -15,8 +15,7 @@ func TestConfig(t *testing.T) {
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 5,
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"mock_mode": true,
|
||||
"params_path": "",
|
||||
|
||||
@@ -64,7 +64,7 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
|
||||
proofMsg.BatchProof = &tmpBatchProof
|
||||
}
|
||||
|
||||
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg, spp); err != nil {
|
||||
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg); err != nil {
|
||||
nerr := fmt.Errorf("handle zk proof failure, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr, nil)
|
||||
return
|
||||
|
||||
@@ -29,10 +29,8 @@ type Collector struct {
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
|
||||
timeoutBatchCheckerRunTotal prometheus.Counter
|
||||
batchProverTaskTimeoutTotal prometheus.Counter
|
||||
timeoutChunkCheckerRunTotal prometheus.Counter
|
||||
chunkProverTaskTimeoutTotal prometheus.Counter
|
||||
timeoutCheckerRunTotal prometheus.Counter
|
||||
proverTaskTimeoutTotal prometheus.Counter
|
||||
}
|
||||
|
||||
// NewCollector create a collector to cron collect the data to send to prover
|
||||
@@ -46,26 +44,17 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
|
||||
timeoutBatchCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_batch_timeout_checker_run_total",
|
||||
Help: "Total number of batch timeout checker run.",
|
||||
timeoutCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_timeout_checker_run_total",
|
||||
Help: "Total number of timeout checker run.",
|
||||
}),
|
||||
batchProverTaskTimeoutTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_batch_prover_task_timeout_total",
|
||||
Help: "Total number of batch timeout prover task.",
|
||||
}),
|
||||
timeoutChunkCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_chunk_timeout_checker_run_total",
|
||||
Help: "Total number of chunk timeout checker run.",
|
||||
}),
|
||||
chunkProverTaskTimeoutTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_chunk_prover_task_timeout_total",
|
||||
Help: "Total number of chunk timeout prover task.",
|
||||
proverTaskTimeoutTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_prover_task_timeout_total",
|
||||
Help: "Total number of timeout prover task.",
|
||||
}),
|
||||
}
|
||||
|
||||
go c.timeoutBatchProofTask()
|
||||
go c.timeoutChunkProofTask()
|
||||
go c.timeoutProofTask()
|
||||
|
||||
log.Info("Start coordinator successfully.")
|
||||
|
||||
@@ -80,10 +69,10 @@ func (c *Collector) Stop() {
|
||||
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
|
||||
// chunk/batch task to unassigned. then the batch/chunk collector can retry it.
|
||||
|
||||
func (c *Collector) timeoutBatchProofTask() {
|
||||
func (c *Collector) timeoutProofTask() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
nerr := fmt.Errorf("timeout batch proof task panic error:%v", err)
|
||||
nerr := fmt.Errorf("timeout proof task panic error:%v", err)
|
||||
log.Warn(nerr.Error())
|
||||
}
|
||||
}()
|
||||
@@ -92,14 +81,52 @@ func (c *Collector) timeoutBatchProofTask() {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
c.timeoutBatchCheckerRunTotal.Inc()
|
||||
timeout := time.Duration(c.cfg.ProverManager.BatchCollectionTimeSec) * time.Second
|
||||
assignedProverTasks, err := c.proverTaskOrm.GetTimeoutAssignedProverTasks(c.ctx, 10, message.ProofTypeBatch, timeout)
|
||||
c.timeoutCheckerRunTotal.Inc()
|
||||
timeout := time.Duration(c.cfg.ProverManager.CollectionTimeSec) * time.Second
|
||||
assignedProverTasks, err := c.proverTaskOrm.GetTimeoutAssignedProverTasks(c.ctx, 10, timeout)
|
||||
if err != nil {
|
||||
log.Error("get unassigned session info failure", "error", err)
|
||||
break
|
||||
}
|
||||
c.check(assignedProverTasks, c.batchProverTaskTimeoutTotal)
|
||||
|
||||
// here not update the block batch proving status failed, because the collector loop will check
|
||||
// the attempt times. if reach the times, the collector will set the block batch proving status.
|
||||
for _, assignedProverTask := range assignedProverTasks {
|
||||
c.proverTaskTimeoutTotal.Inc()
|
||||
log.Warn("proof task have reach the timeout", "task id", assignedProverTask.TaskID,
|
||||
"prover public key", assignedProverTask.ProverPublicKey, "prover name", assignedProverTask.ProverName, "task type", assignedProverTask.TaskType)
|
||||
err = c.db.Transaction(func(tx *gorm.DB) error {
|
||||
// update prover task proving status as ProverProofInvalid
|
||||
if err = c.proverTaskOrm.UpdateProverTaskProvingStatus(c.ctx, message.ProofType(assignedProverTask.TaskType),
|
||||
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverProofInvalid, tx); err != nil {
|
||||
log.Error("update prover task proving status failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update prover task failure type
|
||||
if err = c.proverTaskOrm.UpdateProverTaskFailureType(c.ctx, message.ProofType(assignedProverTask.TaskType),
|
||||
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverTaskFailureTypeTimeout, tx); err != nil {
|
||||
log.Error("update prover task failure type failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update the task to unassigned, let collector restart it
|
||||
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeChunk {
|
||||
if err = c.chunkOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
|
||||
log.Error("update chunk proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
|
||||
}
|
||||
}
|
||||
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeBatch {
|
||||
if err = c.batchOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
|
||||
log.Error("update batch proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("check task proof is timeout failure", "error", err)
|
||||
}
|
||||
}
|
||||
case <-c.ctx.Done():
|
||||
if c.ctx.Err() != nil {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
@@ -111,81 +138,3 @@ func (c *Collector) timeoutBatchProofTask() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Collector) timeoutChunkProofTask() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
nerr := fmt.Errorf("timeout proof chunk task panic error:%v", err)
|
||||
log.Warn(nerr.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Second * 2)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
c.timeoutChunkCheckerRunTotal.Inc()
|
||||
timeout := time.Duration(c.cfg.ProverManager.ChunkCollectionTimeSec) * time.Second
|
||||
assignedProverTasks, err := c.proverTaskOrm.GetTimeoutAssignedProverTasks(c.ctx, 10, message.ProofTypeChunk, timeout)
|
||||
if err != nil {
|
||||
log.Error("get unassigned session info failure", "error", err)
|
||||
break
|
||||
}
|
||||
c.check(assignedProverTasks, c.chunkProverTaskTimeoutTotal)
|
||||
|
||||
case <-c.ctx.Done():
|
||||
if c.ctx.Err() != nil {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopTimeoutChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout prometheus.Counter) {
|
||||
// here not update the block batch proving status failed, because the collector loop will check
|
||||
// the attempt times. if reach the times, the collector will set the block batch proving status.
|
||||
for _, assignedProverTask := range assignedProverTasks {
|
||||
if c.proverTaskOrm.TaskTimeoutMoreThanOnce(c.ctx, assignedProverTask.TaskID) {
|
||||
log.Warn("Task timeout more than once", "hash", assignedProverTask.TaskID)
|
||||
}
|
||||
|
||||
timeout.Inc()
|
||||
log.Warn("proof task have reach the timeout", "task id", assignedProverTask.TaskID,
|
||||
"prover public key", assignedProverTask.ProverPublicKey, "prover name", assignedProverTask.ProverName, "task type", assignedProverTask.TaskType)
|
||||
err := c.db.Transaction(func(tx *gorm.DB) error {
|
||||
// update prover task proving status as ProverProofInvalid
|
||||
if err := c.proverTaskOrm.UpdateProverTaskProvingStatus(c.ctx, message.ProofType(assignedProverTask.TaskType),
|
||||
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverProofInvalid, tx); err != nil {
|
||||
log.Error("update prover task proving status failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update prover task failure type
|
||||
if err := c.proverTaskOrm.UpdateProverTaskFailureType(c.ctx, message.ProofType(assignedProverTask.TaskType),
|
||||
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverTaskFailureTypeTimeout, tx); err != nil {
|
||||
log.Error("update prover task failure type failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update the task to unassigned, let collector restart it
|
||||
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeChunk {
|
||||
if err := c.chunkOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
|
||||
log.Error("update chunk proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
|
||||
}
|
||||
}
|
||||
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeBatch {
|
||||
if err := c.batchOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
|
||||
log.Error("update batch proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("check task proof is timeout failure", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -121,18 +121,14 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB, reg pro
|
||||
// HandleZkProof handle a ZkProof submitted from a prover.
|
||||
// For now only proving/verifying error will lead to setting status as skipped.
|
||||
// db/unmarshal errors will not because they are errors on the business logic side.
|
||||
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) error {
|
||||
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.ProofMsg) error {
|
||||
m.proofReceivedTotal.Inc()
|
||||
pk := ctx.GetString(coordinatorType.PublicKey)
|
||||
if len(pk) == 0 {
|
||||
return fmt.Errorf("get public key from context failed")
|
||||
}
|
||||
pv := ctx.GetString(coordinatorType.ProverVersion)
|
||||
if len(pk) == 0 {
|
||||
return fmt.Errorf("get ProverVersion from context failed")
|
||||
}
|
||||
|
||||
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndProver(ctx, proofMsg.ID, pk, pv)
|
||||
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndPubKey(ctx, proofMsg.ID, pk)
|
||||
if proverTask == nil || err != nil {
|
||||
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
|
||||
return ErrValidatorFailureProverTaskEmpty
|
||||
@@ -144,7 +140,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
log.Info("handling zk proof", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proverTask.TaskType, "proof time", proofTimeSec)
|
||||
|
||||
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter); err != nil {
|
||||
if err = m.validator(ctx, proverTask, pk, proofMsg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -161,7 +157,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
|
||||
if verifyErr != nil || !success {
|
||||
m.verifierFailureTotal.WithLabelValues(proverVersion).Inc()
|
||||
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
|
||||
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg)
|
||||
|
||||
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
@@ -205,13 +201,8 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
m.validateFailureTotal.Inc()
|
||||
}
|
||||
}()
|
||||
|
||||
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg) error {
|
||||
m.validateFailureTotal.Inc()
|
||||
// Ensure this prover is eligible to participate in the prover task.
|
||||
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofValid {
|
||||
m.validateFailureProverTaskSubmitTwice.Inc()
|
||||
@@ -219,12 +210,8 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
// TODO: Defend invalid proof resubmissions by one of the following two methods:
|
||||
// (i) slash the prover for each submission of invalid proof
|
||||
// (ii) set the maximum failure retry times
|
||||
log.Warn(
|
||||
"cannot submit valid proof for a prover task twice",
|
||||
"proof type", proverTask.TaskType, "hash", proofMsg.ID,
|
||||
"prover name", proverTask.ProverName, "prover version", proverTask.ProverVersion,
|
||||
"prover pk", proverTask.ProverPublicKey,
|
||||
)
|
||||
log.Warn("the prover task cannot submit twice", "hash", proofMsg.ID, "prover pk", proverTask.ProverPublicKey,
|
||||
"prover name", proverTask.ProverName, "proof type", proverTask.TaskType)
|
||||
return ErrValidatorFailureProverTaskCannotSubmitTwice
|
||||
}
|
||||
|
||||
@@ -232,12 +219,14 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
if proofMsg.Status != message.StatusOk {
|
||||
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
|
||||
m.validateFailureProverTaskStatusNotOk.Inc()
|
||||
log.Info("proof generated by prover failed",
|
||||
"prove type", proofMsg.Type, "proof id", proofMsg.ID,
|
||||
"prover name", proverTask.ProverName, "prover version", proverTask.ProverVersion,
|
||||
"prover pk", "failure type", proofParameter.FailureType, "failure message", proofParameter.FailureMsg)
|
||||
log.Info("proof generated by prover failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "error", proofMsg.Error)
|
||||
|
||||
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proofMsg.Type, proofMsg.ID, pk, types.ProverProofInvalid); updateErr != nil {
|
||||
log.Error("proof generated by prover failed update prover task proving status failure", "proof id", proofMsg.ID,
|
||||
"prover name", proverTask.ProverName, "prover pk", pk, "prove type", proofMsg.Type, "error", proofMsg.Error)
|
||||
}
|
||||
return ErrValidatorFailureProofMsgStatusNotOk
|
||||
}
|
||||
|
||||
@@ -265,14 +254,14 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
return nil
|
||||
}
|
||||
|
||||
//func (m *ProofReceiverLogic) proofFailure(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
|
||||
// log.Info("proof failure update proof status", "hash", hash, "public key", pubKey,
|
||||
// "proof type", proofMsg.Type.String(), "status", types.ProvingTaskFailed.String())
|
||||
//
|
||||
// if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskFailed, 0); err != nil {
|
||||
// log.Error("failed to updated proof status ProvingTaskFailed", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
// }
|
||||
//}
|
||||
func (m *ProofReceiverLogic) proofFailure(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
|
||||
log.Info("proof failure update proof status", "hash", hash, "public key", pubKey,
|
||||
"proof type", proofMsg.Type.String(), "status", types.ProvingTaskFailed.String())
|
||||
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskFailed, 0); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskFailed", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
|
||||
log.Info("proof recover update proof status", "hash", hash, "public key", pubKey,
|
||||
|
||||
@@ -113,18 +113,16 @@ func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, hashes []string
|
||||
return proverTasks, nil
|
||||
}
|
||||
|
||||
// GetProverTaskByTaskIDAndProver get prover task taskID and public key
|
||||
func (o *ProverTask) GetProverTaskByTaskIDAndProver(ctx context.Context, taskID, proverPublicKey, proverVersion string) (*ProverTask, error) {
|
||||
// GetProverTaskByTaskIDAndPubKey get prover task taskID and public key
|
||||
func (o *ProverTask) GetProverTaskByTaskIDAndPubKey(ctx context.Context, taskID, proverPublicKey string) (*ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_id", taskID)
|
||||
db = db.Where("prover_public_key", proverPublicKey)
|
||||
db = db.Where("prover_version", proverVersion)
|
||||
db = db.Where("task_id", taskID).Where("prover_public_key", proverPublicKey)
|
||||
|
||||
var proverTask ProverTask
|
||||
err := db.First(&proverTask).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ProverTask.GetProverTaskByTaskIDAndProver err:%w, taskID:%s, pubkey:%s, prover_version:%s", err, taskID, proverPublicKey, proverVersion)
|
||||
return nil, fmt.Errorf("ProverTask.GetProverTaskByTaskIDAndPubKey err:%w, taskID:%s, pubukey:%s", err, taskID, proverPublicKey)
|
||||
}
|
||||
return &proverTask, nil
|
||||
}
|
||||
@@ -144,11 +142,10 @@ func (o *ProverTask) GetProvingStatusByTaskID(ctx context.Context, taskID string
|
||||
}
|
||||
|
||||
// GetTimeoutAssignedProverTasks get the timeout and assigned proving_status prover task
|
||||
func (o *ProverTask) GetTimeoutAssignedProverTasks(ctx context.Context, limit int, taskType message.ProofType, timeout time.Duration) ([]ProverTask, error) {
|
||||
func (o *ProverTask) GetTimeoutAssignedProverTasks(ctx context.Context, limit int, timeout time.Duration) ([]ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("proving_status", int(types.ProverAssigned))
|
||||
db = db.Where("task_type", int(taskType))
|
||||
db = db.Where("assigned_at < ?", utils.NowUTC().Add(-timeout))
|
||||
db = db.Limit(limit)
|
||||
|
||||
@@ -160,25 +157,6 @@ func (o *ProverTask) GetTimeoutAssignedProverTasks(ctx context.Context, limit in
|
||||
return proverTasks, nil
|
||||
}
|
||||
|
||||
// TaskTimeoutMoreThanOnce get the timeout twice task. a temp design
|
||||
func (o *ProverTask) TaskTimeoutMoreThanOnce(ctx context.Context, taskID string) bool {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_id", taskID)
|
||||
db = db.Where("proving_status", int(types.ProverProofInvalid))
|
||||
|
||||
var count int64
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if count >= 1 {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// SetProverTask updates or inserts a ProverTask record.
|
||||
func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask, dbTX ...*gorm.DB) error {
|
||||
db := o.db.WithContext(ctx)
|
||||
@@ -188,8 +166,8 @@ func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask,
|
||||
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}, {Name: "prover_version"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"proving_status", "failure_type", "assigned_at"}),
|
||||
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"prover_version", "proving_status", "failure_type", "assigned_at"}),
|
||||
})
|
||||
|
||||
if err := db.Create(&proverTask).Error; err != nil {
|
||||
|
||||
@@ -2,10 +2,8 @@ package types
|
||||
|
||||
// SubmitProofParameter the SubmitProof api request parameter
|
||||
type SubmitProofParameter struct {
|
||||
TaskID string `form:"task_id" json:"task_id" binding:"required"`
|
||||
TaskType int `form:"task_type" json:"task_type" binding:"required"`
|
||||
Status int `form:"status" json:"status"`
|
||||
Proof string `form:"proof" json:"proof"`
|
||||
FailureType int `form:"failure_type" json:"failure_type"`
|
||||
FailureMsg string `form:"failure_msg" json:"failure_msg"`
|
||||
TaskID string `form:"task_id" json:"task_id" binding:"required"`
|
||||
TaskType int `form:"task_type" json:"task_type" binding:"required"`
|
||||
Status int `form:"status" json:"status"`
|
||||
Proof string `form:"proof" json:"proof"`
|
||||
}
|
||||
|
||||
@@ -76,12 +76,11 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
ChainID: 111,
|
||||
},
|
||||
ProverManager: &config.ProverManager{
|
||||
ProversPerSession: proversPerSession,
|
||||
Verifier: &config.VerifierConfig{MockMode: true},
|
||||
BatchCollectionTimeSec: 10,
|
||||
ChunkCollectionTimeSec: 10,
|
||||
MaxVerifierWorkers: 10,
|
||||
SessionAttempts: 5,
|
||||
ProversPerSession: proversPerSession,
|
||||
Verifier: &config.VerifierConfig{MockMode: true},
|
||||
CollectionTimeSec: 10,
|
||||
MaxVerifierWorkers: 10,
|
||||
SessionAttempts: 5,
|
||||
},
|
||||
Auth: &config.Auth{
|
||||
ChallengeExpireDurationSec: tokenTimeout,
|
||||
@@ -317,7 +316,7 @@ func testInvalidProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskUnassigned && batchProofStatus == types.ProvingTaskUnassigned {
|
||||
if chunkProofStatus == types.ProvingTaskFailed && batchProofStatus == types.ProvingTaskFailed {
|
||||
return
|
||||
}
|
||||
case <-tickStop:
|
||||
@@ -439,7 +438,7 @@ func testTimeoutProof(t *testing.T) {
|
||||
assert.Equal(t, batchProofStatus, types.ProvingTaskAssigned)
|
||||
|
||||
// wait coordinator to reset the prover task proving status
|
||||
time.Sleep(time.Duration(conf.ProverManager.BatchCollectionTimeSec*2) * time.Second)
|
||||
time.Sleep(time.Duration(conf.ProverManager.CollectionTimeSec*2) * time.Second)
|
||||
|
||||
// create second mock prover, that will send valid proof.
|
||||
chunkProver2 := newMockProver(t, "prover_test"+strconv.Itoa(2), coordinatorURL, message.ProofTypeChunk)
|
||||
|
||||
@@ -26,7 +26,7 @@ create table prover_task
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL,
|
||||
|
||||
CONSTRAINT uk_tasktype_taskid_publickey_version UNIQUE (task_type, task_id, prover_public_key, prover_version)
|
||||
CONSTRAINT uk_tasktype_taskid_publickey UNIQUE (task_type, task_id, prover_public_key)
|
||||
);
|
||||
|
||||
comment
|
||||
|
||||
16
go.work.sum
16
go.work.sum
@@ -193,7 +193,7 @@ github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7 h1:6IrxszG5G+O7zhtkWxq6+unVvnrm1fqV2Pe+T95DUzw=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7/go.mod h1:gFnFS95y8HstDP6P9pPwzrxOOC5TRDkwbM+ao15ChAI=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 h1:DuBDHVjgGMPki7bAyh91+3cF1Vh34sAEdH8JQgbc2R0=
|
||||
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
@@ -238,7 +238,7 @@ github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf h1:Yt+4K30SdjOkRoRRm3v
|
||||
github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
|
||||
github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7 h1:kgvzE5wLsLa7XKfV85VZl40QXaMCaeFtHpPwJ8fhotY=
|
||||
github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7/go.mod h1:yRkwfj0CBpOGre+TwBsqPV0IH0Pk73e4PXJOeNDboGs=
|
||||
github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4=
|
||||
github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 h1:+3HCtB74++ClLy8GgjUQYeC8R4ILzVcIe8+5edAJJnE=
|
||||
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7 h1:tYwu/z8Y0NkkzGEh3z21mSWggMg4LwLRFucLS7TjARg=
|
||||
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
|
||||
github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d h1:W1n4DvpzZGOISgp7wWNtraLcHtnmnTwBlJidqtMIuwQ=
|
||||
@@ -277,7 +277,7 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILD
|
||||
github.com/gavv/httpexpect v2.0.0+incompatible h1:1X9kcRshkSKEjNJJxX9Y9mQ5BRfbxU5kORdjhlA1yX8=
|
||||
github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732 h1:AB7YjNrzlVHsYz06zCULVV2zYCEft82P86dSmtwxKL0=
|
||||
github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732/go.mod h1:o/XfIXWi4/GqbQirfRm5uTbXMG5NpqxkxblnbZ+QM9I=
|
||||
github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b/go.mod h1:CDncRYVRSDqwakm282WEkjfaAj1hxU/v5RXxk5nXOiI=
|
||||
github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b h1:vMT47RYsrftsHSTQhqXwC3BYflo38OLC3Y4LtXtLyU0=
|
||||
github.com/getkin/kin-openapi v0.61.0 h1:6awGqF5nG5zkVpMsAih1QH4VgzS8phTxECUWIFo7zko=
|
||||
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM=
|
||||
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs=
|
||||
@@ -301,7 +301,6 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb
|
||||
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
|
||||
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk=
|
||||
@@ -374,7 +373,7 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg=
|
||||
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U=
|
||||
github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
@@ -465,14 +464,13 @@ github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
|
||||
github.com/jsternberg/zap-logfmt v1.0.0 h1:0Dz2s/eturmdUS34GM82JwNEdQ9hPoJgqptcEKcbpzY=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5 h1:PJr+ZMXIecYc1Ey2zucXdR73SMBtgjPgwa31099IMv0=
|
||||
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef h1:2jNeR4YUziVtswNP9sEFAI913cVrzH85T+8Q6LpYbT0=
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM=
|
||||
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=
|
||||
github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c h1:AqsttAyEyIEsNz5WLRwuRwjiT5CMDUfLk6cFJDVPebs=
|
||||
github.com/kataras/blocks v0.0.7 h1:cF3RDY/vxnSRezc7vLFlQFTYXG/yAr1o7WImJuZbzC4=
|
||||
github.com/kataras/blocks v0.0.7/go.mod h1:UJIU97CluDo0f+zEjbnbkeMRlvYORtmc1304EeyXf4I=
|
||||
github.com/kataras/golog v0.1.7 h1:0TY5tHn5L5DlRIikepcaRR/6oInIr9AiWsxzt0vvlBE=
|
||||
@@ -711,7 +709,7 @@ go.opentelemetry.io/otel v1.9.0/go.mod h1:np4EoPGzoPs3O67xUVNoPPcmSvsfOxNlNA4F4A
|
||||
go.opentelemetry.io/otel/trace v1.9.0 h1:oZaCNJUjWcg60VXWee8lJKlqhPbXAPB51URuR47pQYc=
|
||||
go.opentelemetry.io/otel/trace v1.9.0/go.mod h1:2737Q0MuG8q1uILYm2YYVkAyLtOofiTNGg6VODnOiPo=
|
||||
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
|
||||
go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
|
||||
go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
|
||||
golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
@@ -782,7 +780,7 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
|
||||
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
|
||||
@@ -94,8 +94,8 @@ func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask,
|
||||
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}, {Name: "prover_version"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"proving_status", "failure_type", "assigned_at"}),
|
||||
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"prover_version", "proving_status", "failure_type", "assigned_at"}),
|
||||
})
|
||||
|
||||
if err := db.Create(&proverTask).Error; err != nil {
|
||||
|
||||
@@ -52,11 +52,9 @@ func action(ctx *cli.Context) error {
|
||||
r.Start()
|
||||
|
||||
defer r.Stop()
|
||||
log.Info(
|
||||
"prover start successfully",
|
||||
log.Info("prover start successfully",
|
||||
"name", cfg.ProverName, "type", cfg.Core.ProofType,
|
||||
"publickey", r.PublicKey(), "version", version.Version,
|
||||
)
|
||||
"publickey", r.PublicKey(), "version", version.Version)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
scrollTypes "scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/prover/config"
|
||||
@@ -21,67 +20,15 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
paramsPath = flag.String("params", "/assets/test_params", "params dir")
|
||||
proofDumpPath = flag.String("dump", "/assets/proof_data", "the path proofs dump to")
|
||||
tracePath1 = flag.String("trace1", "/assets/traces/1_transfer.json", "chunk trace 1")
|
||||
tracePath2 = flag.String("trace2", "/assets/traces/10_transfer.json", "chunk trace 2")
|
||||
paramsPath = flag.String("params", "/assets/test_params", "params dir")
|
||||
proofDumpPath = flag.String("dump", "/assets/proof_data", "the path proofs dump to")
|
||||
batchTaskDetailPath = flag.String("batch-task-detail", "/assets/traces/full_proof_1.json", "batch-task-detail")
|
||||
)
|
||||
|
||||
func TestFFI(t *testing.T) {
|
||||
as := assert.New(t)
|
||||
|
||||
chunkProverConfig := &config.ProverCoreConfig{
|
||||
DumpDir: *proofDumpPath,
|
||||
ParamsPath: *paramsPath,
|
||||
ProofType: message.ProofTypeChunk,
|
||||
}
|
||||
chunkProverCore, err := core.NewProverCore(chunkProverConfig)
|
||||
as.NoError(err)
|
||||
t.Log("Constructed chunk prover")
|
||||
|
||||
chunkTrace1 := readChunkTrace(*tracePath1, as)
|
||||
chunkTrace2 := readChunkTrace(*tracePath2, as)
|
||||
t.Log("Loaded chunk traces")
|
||||
|
||||
chunkInfo1, err := chunkProverCore.TracesToChunkInfo(chunkTrace1)
|
||||
as.NoError(err)
|
||||
chunkInfo2, err := chunkProverCore.TracesToChunkInfo(chunkTrace2)
|
||||
as.NoError(err)
|
||||
t.Log("Converted to chunk infos")
|
||||
|
||||
wrappedBlock1 := &scrollTypes.WrappedBlock{
|
||||
Header: chunkTrace1[0].Header,
|
||||
Transactions: chunkTrace1[0].Transactions,
|
||||
WithdrawRoot: chunkTrace1[0].WithdrawTrieRoot,
|
||||
}
|
||||
chunk1 := &scrollTypes.Chunk{Blocks: []*scrollTypes.WrappedBlock{wrappedBlock1}}
|
||||
chunkHash1, err := chunk1.Hash(0)
|
||||
as.NoError(err)
|
||||
as.Equal(chunkInfo1.PostStateRoot, wrappedBlock1.Header.Root)
|
||||
as.Equal(chunkInfo1.WithdrawRoot, wrappedBlock1.WithdrawRoot)
|
||||
as.Equal(chunkInfo1.DataHash, chunkHash1)
|
||||
t.Log("Successful to check chunk info 1")
|
||||
|
||||
wrappedBlock2 := &scrollTypes.WrappedBlock{
|
||||
Header: chunkTrace2[0].Header,
|
||||
Transactions: chunkTrace2[0].Transactions,
|
||||
WithdrawRoot: chunkTrace2[0].WithdrawTrieRoot,
|
||||
}
|
||||
chunk2 := &scrollTypes.Chunk{Blocks: []*scrollTypes.WrappedBlock{wrappedBlock2}}
|
||||
chunkHash2, err := chunk2.Hash(chunk1.NumL1Messages(0))
|
||||
as.NoError(err)
|
||||
as.Equal(chunkInfo2.PostStateRoot, wrappedBlock2.Header.Root)
|
||||
as.Equal(chunkInfo2.WithdrawRoot, wrappedBlock2.WithdrawRoot)
|
||||
as.Equal(chunkInfo2.DataHash, chunkHash2)
|
||||
t.Log("Successful to check chunk info 2")
|
||||
|
||||
chunkProof1, err := chunkProverCore.ProveChunk("chunk_proof1", chunkTrace1)
|
||||
as.NoError(err)
|
||||
t.Log("Generated and dumped chunk proof 1")
|
||||
|
||||
chunkProof2, err := chunkProverCore.ProveChunk("chunk_proof2", chunkTrace2)
|
||||
as.NoError(err)
|
||||
t.Log("Generated and dumped chunk proof 2")
|
||||
batchTaskDetail := readBatchTaskDetail(*batchTaskDetailPath, as)
|
||||
|
||||
batchProverConfig := &config.ProverCoreConfig{
|
||||
DumpDir: *proofDumpPath,
|
||||
@@ -91,13 +38,23 @@ func TestFFI(t *testing.T) {
|
||||
batchProverCore, err := core.NewProverCore(batchProverConfig)
|
||||
as.NoError(err)
|
||||
|
||||
chunkInfos := []*message.ChunkInfo{chunkInfo1, chunkInfo2}
|
||||
chunkProofs := []*message.ChunkProof{chunkProof1, chunkProof2}
|
||||
_, err = batchProverCore.ProveBatch("batch_proof", chunkInfos, chunkProofs)
|
||||
_, err = batchProverCore.ProveBatch("batch_proof", batchTaskDetail.ChunkInfos, batchTaskDetail.ChunkProofs)
|
||||
as.NoError(err)
|
||||
t.Log("Generated and dumped batch proof")
|
||||
}
|
||||
|
||||
func readBatchTaskDetail(filePat string, as *assert.Assertions) *message.BatchTaskDetail {
|
||||
f, err := os.Open(filePat)
|
||||
as.NoError(err)
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
d := &message.BatchTaskDetail{}
|
||||
as.NoError(json.Unmarshal(byt, d))
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func readChunkTrace(filePat string, as *assert.Assertions) []*types.BlockTrace {
|
||||
f, err := os.Open(filePat)
|
||||
as.NoError(err)
|
||||
|
||||
Reference in New Issue
Block a user