Compare commits

..

16 Commits

Author SHA1 Message Date
colin
41b07bd05a feat(rollup-relayer): add calldata log in commitBatch failed tx (#760) 2023-08-09 17:28:07 +08:00
maskpp
90dc0911d3 feat: add prover-stats-api docker file (#758) 2023-08-09 17:10:38 +08:00
HAOYUatHZ
3f775ae7bc feat(coordinator): add ProverVersion to ProverTask (#756) 2023-08-09 15:43:29 +08:00
Steven
674b801005 fix: upgrade libzkp to use scroll-prover v0.5.10 (#754) 2023-08-08 21:08:32 +08:00
georgehao
c5b80937ce fix(coordinator): fix get task exceed the attempt times (#753)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-08 12:50:27 +08:00
Xi Lin
ea3e08ab2a fix(contracts): initialize maxGasLimit by vm.env (#752) 2023-08-08 11:53:30 +08:00
Steven
0360f44ff6 fix: upgrade libzkp to use scroll-prover v0.5.8 (#750) 2023-08-07 23:17:19 +08:00
Xinran
1b57982368 fix(prover): fix ZK_VERSION in Makefile (#751) 2023-08-07 22:51:25 +08:00
Steven
b09c2bbecb fix: upgrade to use scroll-prover v0.5.7 (#749) 2023-08-07 20:51:24 +08:00
HAOYUatHZ
7d2a516be1 bump version (#747) 2023-08-07 16:30:56 +08:00
Péter Garamvölgyi
ee55fe3d51 fix: do not update batch rollup_status to FinalizationFailed on tx error (#745) 2023-08-07 16:18:57 +08:00
Steven
09d7764dcb fix: upgrade libzkp to use scroll-prover v0.5.6 (#744) 2023-08-07 12:26:19 +08:00
Xi Lin
4cd199b3b3 test(contracts): add unit tests when num txs < num L1 msgs (#742)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-08-07 05:13:04 +02:00
Péter Garamvölgyi
ced64e8563 refactor: remove debug log (#743) 2023-08-06 21:56:43 +02:00
Péter Garamvölgyi
336d76e0dc fix: Consider skipped messages in block.numTransaction encoding (#741) 2023-08-06 21:45:58 +02:00
Péter Garamvölgyi
a0ca0e6295 feat: commit batch extra logs (#740) 2023-08-06 21:06:35 +02:00
22 changed files with 147 additions and 58 deletions

View File

@@ -377,7 +377,14 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send commitBatch tx to layer1 ", "err", err)
log.Error(
"Failed to send commitBatch tx to layer1",
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(calldata),
"err", err,
)
}
return
}
@@ -424,7 +431,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
return
case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash)
success := false
var parentBatchStateRoot string
if batch.Index > 0 {
@@ -438,24 +444,14 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
parentBatchStateRoot = parentBatch.StateRoot
}
defer func() {
// TODO: need to revisit this and have a more fine-grained error handling
if !success {
log.Info("Failed to upload the proof, change rollup status to RollupFinalizeFailed", "hash", hash)
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizeFailed); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
}
}
}()
aggProof, err := r.batchOrm.GetVerifiedProofByHash(r.ctx, hash)
if err != nil {
log.Warn("get verified proof by hash failed", "hash", hash, "err", err)
log.Error("get verified proof by hash failed", "hash", hash, "err", err)
return
}
if err = aggProof.SanityCheck(); err != nil {
log.Warn("agg_proof sanity check fails", "hash", hash, "error", err)
log.Error("agg_proof sanity check fails", "hash", hash, "error", err)
return
}
@@ -478,8 +474,18 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
finalizeTxHash := &txHash
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("finalizeBatchWithProof in layer1 failed",
"index", batch.Index, "hash", batch.Hash, "err", err)
// This can happen normally if we try to finalize 2 or more
// batches around the same time. The 2nd tx might fail since
// the client does not see the 1st tx's updates at this point.
// TODO: add more fine-grained error handling
log.Error(
"finalizeBatchWithProof in layer1 failed",
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(data),
"err", err,
)
}
return
}
@@ -489,11 +495,10 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
// record and sync with db, @todo handle db error
err = r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed",
log.Error("UpdateFinalizeTxHashAndRollupStatus failed",
"index", batch.Index, "batch hash", batch.Hash,
"tx hash", finalizeTxHash.String(), "err", err)
}
success = true
r.processingFinalization.Store(txID, hash)
case types.ProvingTaskFailed:

View File

@@ -90,10 +90,9 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizeFailed, statuses[0])
// no valid proof, rollup status remains the same
assert.Equal(t, types.RollupCommitted, statuses[0])
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
assert.NoError(t, err)
proof := &message.BatchProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}

View File

@@ -0,0 +1,31 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.19 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover-stats-api/go.* ./prover-stats-api/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
# Support mainland environment.
#ENV GOPROXY="https://goproxy.cn,direct"
RUN go mod download -x
# Build prover-stats-api
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/prover-stats-api/cmd/ && go build -v -p 4 -o /bin/prover-stats-api
# Pull prover-stats-api into a second stage deploy alpine container \
FROM alpine:latest
COPY --from=builder /bin/prover-stats-api /bin/
ENTRYPOINT ["prover-stats-api"]

View File

@@ -32,7 +32,7 @@ dependencies = [
[[package]]
name = "aggregator"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
dependencies = [
"ark-std",
"env_logger 0.10.0",
@@ -432,7 +432,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
[[package]]
name = "bus-mapping"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
dependencies = [
"eth-types",
"ethers-core",
@@ -1048,7 +1048,7 @@ dependencies = [
[[package]]
name = "eth-types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
dependencies = [
"ethers-core",
"ethers-signers",
@@ -1225,7 +1225,7 @@ dependencies = [
[[package]]
name = "external-tracer"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
dependencies = [
"eth-types",
"geth-utils",
@@ -1438,7 +1438,7 @@ dependencies = [
[[package]]
name = "gadgets"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
dependencies = [
"digest 0.7.6",
"eth-types",
@@ -1478,7 +1478,7 @@ dependencies = [
[[package]]
name = "geth-utils"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
dependencies = [
"env_logger 0.9.3",
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
@@ -2076,7 +2076,7 @@ dependencies = [
[[package]]
name = "keccak256"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
dependencies = [
"env_logger 0.9.3",
"eth-types",
@@ -2263,7 +2263,7 @@ dependencies = [
[[package]]
name = "mock"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
dependencies = [
"eth-types",
"ethers-core",
@@ -2278,7 +2278,7 @@ dependencies = [
[[package]]
name = "mpt-zktrie"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
dependencies = [
"bus-mapping",
"eth-types",
@@ -2754,7 +2754,7 @@ dependencies = [
[[package]]
name = "prover"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.3#337089ac40bac756d88b9ae30a3be1f82538b216"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.10#cb8f71475e6aa9fc78a24a832369fecb1c7d2201"
dependencies = [
"aggregator",
"anyhow",
@@ -4039,7 +4039,7 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
[[package]]
name = "types"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.3#337089ac40bac756d88b9ae30a3be1f82538b216"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.10#cb8f71475e6aa9fc78a24a832369fecb1c7d2201"
dependencies = [
"base64 0.13.1",
"blake2",
@@ -4490,7 +4490,7 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
[[package]]
name = "zkevm-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
dependencies = [
"array-init",
"bus-mapping",

View File

@@ -20,8 +20,8 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
[dependencies]
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.3" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.3" }
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.10" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.10" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
log = "0.4"

View File

@@ -169,7 +169,7 @@ func TestBatchHeaderEncode(t *testing.T) {
assert.NotNil(t, batchHeader)
bytes = batchHeader.Encode()
assert.Equal(t, 121, len(bytes))
assert.Equal(t, "010000000000000001000000000000000b000000000000000b457a9e90e8e51ba2de2f66c6b589540b88cf594dac7fa7d04b99cdcfecf24e384136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f9498500000000000000000000000000000000000000000000000000000000000003ff", common.Bytes2Hex(bytes))
assert.Equal(t, "010000000000000001000000000000000b000000000000000b34f419ce7e882295bdb5aec6cce56ffa788a5fed4744d7fbd77e4acbf409f1ca4136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f9498500000000000000000000000000000000000000000000000000000000000003ff", common.Bytes2Hex(bytes))
}
func TestBatchHeaderHash(t *testing.T) {
@@ -230,7 +230,7 @@ func TestBatchHeaderHash(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, batchHeader)
hash = batchHeader.Hash()
assert.Equal(t, "0ec9547c6645d5f0c1254e121f49e93f54525cfda5bfb2236440fb3470f48902", common.Bytes2Hex(hash.Bytes()))
assert.Equal(t, "1c3007880f0eafe74572ede7d164ff1ee5376e9ac9bff6f7fb837b2630cddc9a", common.Bytes2Hex(hash.Bytes()))
}
func TestBatchHeaderDecode(t *testing.T) {

View File

@@ -36,6 +36,17 @@ func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
return *lastQueueIndex - totalL1MessagePoppedBefore + 1
}
// NumL2Transactions returns the number of L2 transactions in this block.
func (w *WrappedBlock) NumL2Transactions() uint64 {
var count uint64
for _, txData := range w.Transactions {
if txData.Type != types.L1MessageTxType {
count++
}
}
return count
}
// Encode encodes the WrappedBlock into RollupV2 BlockContext Encoding.
func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
bytes := make([]byte, 60)
@@ -43,20 +54,25 @@ func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error)
if !w.Header.Number.IsUint64() {
return nil, errors.New("block number is not uint64")
}
if len(w.Transactions) > math.MaxUint16 {
return nil, errors.New("number of transactions exceeds max uint16")
}
// note: numL1Messages includes skipped messages
numL1Messages := w.NumL1Messages(totalL1MessagePoppedBefore)
if numL1Messages > math.MaxUint16 {
return nil, errors.New("number of L1 messages exceeds max uint16")
}
// note: numTransactions includes skipped messages
numL2Transactions := w.NumL2Transactions()
numTransactions := numL1Messages + numL2Transactions
if numTransactions > math.MaxUint16 {
return nil, errors.New("number of transactions exceeds max uint16")
}
binary.BigEndian.PutUint64(bytes[0:], w.Header.Number.Uint64())
binary.BigEndian.PutUint64(bytes[8:], w.Header.Time)
// TODO: [16:47] Currently, baseFee is 0, because we disable EIP-1559.
binary.BigEndian.PutUint64(bytes[48:], w.Header.GasLimit)
binary.BigEndian.PutUint16(bytes[56:], uint16(len(w.Transactions)))
binary.BigEndian.PutUint16(bytes[56:], uint16(numTransactions))
binary.BigEndian.PutUint16(bytes[58:], uint16(numL1Messages))
return bytes, nil

View File

@@ -65,9 +65,10 @@ func TestChunkEncode(t *testing.T) {
hexString = hex.EncodeToString(bytes)
assert.NoError(t, err)
assert.Equal(t, 97, len(bytes))
assert.Equal(t, "01000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000002000b00000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
assert.Equal(t, "01000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a1200000c000b00000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
// Test case 5: when the chunk contains two blocks each with 1 L1MsgTx
// TODO: revise this test, we cannot reuse the same L1MsgTx twice
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock2,
@@ -78,7 +79,7 @@ func TestChunkEncode(t *testing.T) {
hexString = hex.EncodeToString(bytes)
assert.NoError(t, err)
assert.Equal(t, 193, len(bytes))
assert.Equal(t, "02000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000002000b000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000002000000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e105808080808000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
assert.Equal(t, "02000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a1200000c000b000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000001000000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e105808080808000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
}
func TestChunkHash(t *testing.T) {
@@ -133,5 +134,5 @@ func TestChunkHash(t *testing.T) {
}
hash, err = chunk.Hash(0)
assert.NoError(t, err)
assert.Equal(t, "0x42967825696a129e7a83f082097aca982747480956dcaa448c9296e795c9a91a", hash.Hex())
assert.Equal(t, "0x2eb7dd63bf8fc29a0f8c10d16c2ae6f9da446907c79d50f5c164d30dc8526b60", hash.Hex())
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.1.14"
var tag = "v4.1.25"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -22,6 +22,7 @@ contract InitializeL1BridgeContracts is Script {
uint256 CHAIN_ID_L2 = vm.envUint("CHAIN_ID_L2");
uint256 MAX_L2_TX_IN_CHUNK = vm.envUint("MAX_L2_TX_IN_CHUNK");
uint256 MAX_L1_MESSAGE_GAS_LIMIT = vm.envUint("MAX_L1_MESSAGE_GAS_LIMIT");
address L1_ROLLUP_OPERATOR_ADDR = vm.envAddress("L1_ROLLUP_OPERATOR_ADDR");
address L1_FEE_VAULT_ADDR = vm.envAddress("L1_FEE_VAULT_ADDR");
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
@@ -84,7 +85,7 @@ contract InitializeL1BridgeContracts is Script {
L1_SCROLL_CHAIN_PROXY_ADDR,
L1_ENFORCED_TX_GATEWAY_PROXY_ADDR,
L2_GAS_PRICE_ORACLE_PROXY_ADDR,
10000000
MAX_L1_MESSAGE_GAS_LIMIT
);
// initialize L1ScrollMessenger

View File

@@ -492,6 +492,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
// concatenate l2 transaction hashes
uint256 _numTransactionsInBlock = ChunkCodec.numTransactions(blockPtr);
require(_numTransactionsInBlock >= _numL1MessagesInBlock, "num txs less than num L1 msgs");
for (uint256 j = _numL1MessagesInBlock; j < _numTransactionsInBlock; j++) {
bytes32 txHash;
(txHash, l2TxPtr) = ChunkCodec.loadL2TxHash(l2TxPtr);

View File

@@ -106,6 +106,17 @@ contract ScrollChainTest is DSTestPlus {
hevm.expectRevert("invalid chunk length");
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
// num txs less than num L1 msgs, revert
chunk0 = new bytes(1 + 60);
bytes memory bitmap = new bytes(32);
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
chunk0[58] = bytes1(uint8(1)); // numTransactions = 1
chunk0[60] = bytes1(uint8(3)); // numL1Messages = 3
bitmap[31] = bytes1(uint8(7));
chunks[0] = chunk0;
hevm.expectRevert("num txs less than num L1 msgs");
rollup.commitBatch(0, batchHeader0, chunks, bitmap);
// incomplete l2 transaction data, revert
chunk0 = new bytes(1 + 60 + 1);
chunk0[0] = bytes1(uint8(1)); // one block in this chunk

View File

@@ -50,6 +50,11 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("get prover name from contex failed")
}
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
if !proverVersionExist {
return nil, fmt.Errorf("get prover version from contex failed")
}
batchTasks, err := bp.batchOrm.UpdateUnassignedBatchReturning(ctx, 1)
if err != nil {
return nil, fmt.Errorf("failed to get unassigned batch proving tasks, error:%w", err)
@@ -64,7 +69,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
batchTask := batchTasks[0]
log.Info("start batch proof generation session", "id", batchTask.Hash)
log.Info("start batch proof generation session", "id", batchTask.Hash, "public key", publicKey, "prover name", proverName)
if !bp.checkAttemptsExceeded(batchTask.Hash, message.ProofTypeBatch) {
return nil, fmt.Errorf("the batch task id:%s check attempts have reach the maximum", batchTask.Hash)
@@ -75,6 +80,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
ProverPublicKey: publicKey.(string),
TaskType: int16(message.ProofTypeBatch),
ProverName: proverName.(string),
ProverVersion: proverVersion.(string),
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go

View File

@@ -50,6 +50,11 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("get prover name from contex failed")
}
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
if !proverVersionExist {
return nil, fmt.Errorf("get prover version from contex failed")
}
// load and send chunk tasks
chunkTasks, err := cp.chunkOrm.UpdateUnassignedChunkReturning(ctx, getTaskParameter.ProverHeight, 1)
if err != nil {
@@ -66,7 +71,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
chunkTask := chunkTasks[0]
log.Info("start chunk generation session", "id", chunkTask.Hash)
log.Info("start chunk generation session", "id", chunkTask.Hash, "public key", publicKey, "prover name", proverName)
if !cp.checkAttemptsExceeded(chunkTask.Hash, message.ProofTypeChunk) {
return nil, fmt.Errorf("chunk proof hash id:%s check attempts have reach the maximum", chunkTask.Hash)
@@ -77,6 +82,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
ProverPublicKey: publicKey.(string),
TaskType: int16(message.ProofTypeChunk),
ProverName: proverName.(string),
ProverVersion: proverVersion.(string),
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go

View File

@@ -50,7 +50,7 @@ func (b *BaseProverTask) checkAttemptsExceeded(hash string, taskType message.Pro
if len(proverTasks) >= int(b.cfg.ProverManager.SessionAttempts) {
coordinatorSessionsTimeoutTotalCounter.Inc(1)
log.Warn("proof generation prover task %s ended because reach the max attempts", hash)
log.Warn("proof generation prover task reach the max attempts", "hash", hash)
transErr := b.db.Transaction(func(tx *gorm.DB) error {
switch message.ProofType(proverTasks[0].TaskType) {

View File

@@ -294,6 +294,7 @@ func (o *Batch) UpdateUnassignedBatchReturning(ctx context.Context, limit int) (
var batches []*Batch
db = db.Model(&batches).Clauses(clause.Returning{})
db = db.Where("index = (?)", subQueryDB)
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
if err := db.Update("proving_status", types.ProvingTaskAssigned).Error; err != nil {
return nil, fmt.Errorf("Batch.UpdateUnassignedBatchReturning error: %w", err)
}

View File

@@ -364,6 +364,7 @@ func (o *Chunk) UpdateUnassignedChunkReturning(ctx context.Context, height, limi
var chunks []*Chunk
db = db.Model(&chunks).Clauses(clause.Returning{})
db = db.Where("index = (?)", subQueryDB)
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
if err := db.Update("proving_status", types.ProvingTaskAssigned).Error; err != nil {
return nil, fmt.Errorf("Chunk.UpdateUnassignedBatchReturning error: %w", err)
}

View File

@@ -22,6 +22,7 @@ type ProverTask struct {
// prover
ProverPublicKey string `json:"prover_public_key" gorm:"column:prover_public_key"`
ProverName string `json:"prover_name" gorm:"column:prover_name"`
ProverVersion string `json:"prover_version" gorm:"column:prover_version"`
// task
TaskID string `json:"task_id" gorm:"column:task_id"`
@@ -136,7 +137,7 @@ func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask,
db = db.Model(&ProverTask{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}},
DoUpdates: clause.AssignmentColumns([]string{"proving_status", "failure_type", "assigned_at"}),
DoUpdates: clause.AssignmentColumns([]string{"prover_version", "proving_status", "failure_type", "assigned_at"}),
})
if err := db.Create(&proverTask).Error; err != nil {

View File

@@ -8,6 +8,7 @@ create table prover_task
-- prover
prover_public_key VARCHAR NOT NULL,
prover_name VARCHAR NOT NULL,
prover_version VARCHAR NOT NULL,
-- task
task_id VARCHAR NOT NULL,

View File

@@ -1,4 +1,8 @@
.PHONY: clean build test
.PHONY: clean build test docker
IMAGE_NAME=prover-stats-api
IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
build:
GOBIN=$(PWD)/build/bin go build -o $(PWD)/build/bin/prover-stats-api ./cmd
@@ -14,3 +18,6 @@ test:
lint: ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/prover-stats-api.Dockerfile

View File

@@ -20,6 +20,7 @@ type ProverTask struct {
TaskID string `json:"task_id" gorm:"column:task_id"`
ProverPublicKey string `json:"prover_public_key" gorm:"column:prover_public_key"`
ProverName string `json:"prover_name" gorm:"column:prover_name"`
ProverVersion string `json:"prover_version" gorm:"column:prover_version"`
TaskType int16 `json:"task_type" gorm:"column:task_type;default:0"`
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:0"`
FailureType int16 `json:"failure_type" gorm:"column:failure_type;default:0"`
@@ -94,7 +95,7 @@ func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask,
db = db.Model(&ProverTask{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}},
DoUpdates: clause.AssignmentColumns([]string{"proving_status", "failure_type", "assigned_at"}),
DoUpdates: clause.AssignmentColumns([]string{"prover_version", "proving_status", "failure_type", "assigned_at"}),
})
if err := db.Create(&proverTask).Error; err != nil {

View File

@@ -1,21 +1,21 @@
.PHONY: lint docker clean prover mock-prover
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
HALO2_GPU_VERSION=$(shell ./print_halo2gpu_version.sh | sed -n '2p')
ifeq (${HALO2_GPU_VERSION},)
# use halo2_proofs with CPU
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
else
# use halo2_gpu
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_GPU_VERSION}
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_GPU_VERSION}
endif
libzkp: