Compare commits

..

27 Commits

Author SHA1 Message Date
Steven
e8f5251ae2 fix: upgrade libzkp to use scroll-prover v0.5.12 (#769)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-10 20:10:30 +08:00
HAOYUatHZ
d10438ae0f ci(github): temp fix gacts/install-geth-tools (#770) 2023-08-10 19:28:31 +08:00
maskpp
a7ce900aa7 ci(github): add prover_stats_api docker build action (#768)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-10 17:21:27 +08:00
maskpp
49ec0b8fa8 fix(integration test): fix TestCoordinatorProverInteraction (#765)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-08-10 17:20:25 +08:00
georgehao
244e5e915a fix(coordinator): fix update proving_status bug (#759)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-10 17:02:53 +08:00
HAOYUatHZ
dc53d6d022 ci(github): speed up docker builds (#766) 2023-08-10 14:59:51 +08:00
HAOYUatHZ
6dc89a9c0b fix coordinator version in docker (#764) 2023-08-10 00:24:54 +08:00
Steven
9e450a7b0f fix: upgrade libzkp to use scroll-prover v0.5.11 (#761) 2023-08-09 22:48:18 +08:00
HAOYUatHZ
af76593c1d feat(coordinator): reject old-version prover (#757) 2023-08-09 21:25:46 +08:00
colin
a2ae697f79 fix(relayer): increase ProcessCommittedBatches loop time (#762) 2023-08-09 19:59:13 +08:00
colin
41b07bd05a feat(rollup-relayer): add calldata log in commitBatch failed tx (#760) 2023-08-09 17:28:07 +08:00
maskpp
90dc0911d3 feat: add prover-stats-api docker file (#758) 2023-08-09 17:10:38 +08:00
HAOYUatHZ
3f775ae7bc feat(coordinator): add ProverVersion to ProverTask (#756) 2023-08-09 15:43:29 +08:00
Steven
674b801005 fix: upgrade libzkp to use scroll-prover v0.5.10 (#754) 2023-08-08 21:08:32 +08:00
georgehao
c5b80937ce fix(coordinator): fix get task exceed the attempt times (#753)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-08 12:50:27 +08:00
Xi Lin
ea3e08ab2a fix(contracts): initialize maxGasLimit by vm.env (#752) 2023-08-08 11:53:30 +08:00
Steven
0360f44ff6 fix: upgrade libzkp to use scroll-prover v0.5.8 (#750) 2023-08-07 23:17:19 +08:00
Xinran
1b57982368 fix(prover): fix ZK_VERSION in Makefile (#751) 2023-08-07 22:51:25 +08:00
Steven
b09c2bbecb fix: upgrade to use scroll-prover v0.5.7 (#749) 2023-08-07 20:51:24 +08:00
HAOYUatHZ
7d2a516be1 bump version (#747) 2023-08-07 16:30:56 +08:00
Péter Garamvölgyi
ee55fe3d51 fix: do not update batch rollup_status to FinalizationFailed on tx error (#745) 2023-08-07 16:18:57 +08:00
Steven
09d7764dcb fix: upgrade libzkp to use scroll-prover v0.5.6 (#744) 2023-08-07 12:26:19 +08:00
Xi Lin
4cd199b3b3 test(contracts): add unit tests when num txs < num L1 msgs (#742)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-08-07 05:13:04 +02:00
Péter Garamvölgyi
ced64e8563 refactor: remove debug log (#743) 2023-08-06 21:56:43 +02:00
Péter Garamvölgyi
336d76e0dc fix: Consider skipped messages in block.numTransaction encoding (#741) 2023-08-06 21:45:58 +02:00
Péter Garamvölgyi
a0ca0e6295 feat: commit batch extra logs (#740) 2023-08-06 21:06:35 +02:00
HAOYUatHZ
9f73554b31 fix(libzkp): pin ethers-core version (#739)
Co-authored-by: Your Name <you@example.com>
2023-08-07 01:03:27 +08:00
41 changed files with 522 additions and 257 deletions

View File

@@ -43,6 +43,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Lint
working-directory: 'bridge'
run: |
@@ -92,6 +94,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -88,6 +88,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -104,6 +104,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -81,6 +81,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -6,7 +6,7 @@ on:
- v**
jobs:
build-and-push:
event_watcher:
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -27,6 +27,18 @@ jobs:
tags: scrolltech/event-watcher:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
gas_oracle:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push gas_oracle docker
uses: docker/build-push-action@v2
with:
@@ -36,6 +48,18 @@ jobs:
tags: scrolltech/gas-oracle:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
msg_relayer:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push msg_relayer docker
uses: docker/build-push-action@v2
with:
@@ -45,6 +69,18 @@ jobs:
tags: scrolltech/msg-relayer:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
rollup_relayer:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push rollup_relayer docker
uses: docker/build-push-action@v2
with:
@@ -54,6 +90,18 @@ jobs:
tags: scrolltech/rollup-relayer:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
bridgehistoryapi-cross-msg-fetcher:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push bridgehistoryapi-cross-msg-fetcher docker
uses: docker/build-push-action@v2
with:
@@ -63,6 +111,18 @@ jobs:
tags: scrolltech/bridgehistoryapi-cross-msg-fetcher:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
bridgehistoryapi-server:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push bridgehistoryapi-server docker
uses: docker/build-push-action@v2
with:
@@ -72,6 +132,18 @@ jobs:
tags: scrolltech/bridgehistoryapi-server:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
coordinator:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push coordinator docker
uses: docker/build-push-action@v2
with:
@@ -81,3 +153,24 @@ jobs:
tags: scrolltech/coordinator:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
prover-stats-api:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push prover-stats-api docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/prover-stats-api.Dockerfile
push: true
tags: scrolltech/prover-stats-api:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}

View File

@@ -33,6 +33,8 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -110,7 +110,7 @@ func action(ctx *cli.Context) error {
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches)
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
go utils.Loop(subCtx, 60*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully")

View File

@@ -374,20 +374,16 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
// send transaction
txID := batch.Hash + "-commit"
log.Info(
"Sending commitBatch",
"batch.Index", batch.Index,
"batch.Hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(calldata),
"parentBatch.BatchHeader", common.Bytes2Hex(parentBatch.BatchHeader),
"batch.BatchHeader", common.Bytes2Hex(batch.BatchHeader),
)
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send commitBatch tx to layer1 ", "err", err)
}
log.Error(
"Failed to send commitBatch tx to layer1",
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(calldata),
"err", err,
)
return
}
@@ -433,7 +429,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
return
case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash)
success := false
var parentBatchStateRoot string
if batch.Index > 0 {
@@ -447,24 +442,14 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
parentBatchStateRoot = parentBatch.StateRoot
}
defer func() {
// TODO: need to revisit this and have a more fine-grained error handling
if !success {
log.Info("Failed to upload the proof, change rollup status to RollupFinalizeFailed", "hash", hash)
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizeFailed); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
}
}
}()
aggProof, err := r.batchOrm.GetVerifiedProofByHash(r.ctx, hash)
if err != nil {
log.Warn("get verified proof by hash failed", "hash", hash, "err", err)
log.Error("get verified proof by hash failed", "hash", hash, "err", err)
return
}
if err = aggProof.SanityCheck(); err != nil {
log.Warn("agg_proof sanity check fails", "hash", hash, "error", err)
log.Error("agg_proof sanity check fails", "hash", hash, "error", err)
return
}
@@ -487,8 +472,18 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
finalizeTxHash := &txHash
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("finalizeBatchWithProof in layer1 failed",
"index", batch.Index, "hash", batch.Hash, "err", err)
// This can happen normally if we try to finalize 2 or more
// batches around the same time. The 2nd tx might fail since
// the client does not see the 1st tx's updates at this point.
// TODO: add more fine-grained error handling
log.Error(
"finalizeBatchWithProof in layer1 failed",
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(data),
"err", err,
)
}
return
}
@@ -498,11 +493,10 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
// record and sync with db, @todo handle db error
err = r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed",
log.Error("UpdateFinalizeTxHashAndRollupStatus failed",
"index", batch.Index, "batch hash", batch.Hash,
"tx hash", finalizeTxHash.String(), "err", err)
}
success = true
r.processingFinalization.Store(txID, hash)
case types.ProvingTaskFailed:

View File

@@ -90,10 +90,9 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizeFailed, statuses[0])
// no valid proof, rollup status remains the same
assert.Equal(t, types.RollupCommitted, statuses[0])
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
assert.NoError(t, err)
proof := &message.BatchProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}

View File

@@ -37,7 +37,7 @@ COPY . .
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
COPY --from=zkp-builder /app/target/release/libzktrie.so ./coordinator/internal/logic/verifier/lib/
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd && mv internal/logic/verifier/lib /bin/
RUN cd ./coordinator && make coordinator_skip_libzkp && mv ./build/bin/coordinator /bin/coordinator && mv internal/logic/verifier/lib /bin/
# Pull coordinator into a second stage deploy alpine container
FROM ubuntu:20.04
@@ -46,6 +46,6 @@ ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/li
RUN mkdir -p /src/coordinator/internal/logic/verifier/lib
COPY --from=builder /bin/lib /src/coordinator/internal/logic/verifier/lib
COPY --from=builder /bin/coordinator /bin/
RUN /bin/coordinator --version
ENTRYPOINT ["/bin/coordinator"]

View File

@@ -0,0 +1,31 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.19 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover-stats-api/go.* ./prover-stats-api/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
# Support mainland environment.
#ENV GOPROXY="https://goproxy.cn,direct"
RUN go mod download -x
# Build prover-stats-api
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/prover-stats-api/cmd/ && go build -v -p 4 -o /bin/prover-stats-api
# Pull prover-stats-api into a second stage deploy alpine container \
FROM alpine:latest
COPY --from=builder /bin/prover-stats-api /bin/
ENTRYPOINT ["prover-stats-api"]

View File

@@ -32,7 +32,7 @@ dependencies = [
[[package]]
name = "aggregator"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
dependencies = [
"ark-std",
"env_logger 0.10.0",
@@ -432,7 +432,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
[[package]]
name = "bus-mapping"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
dependencies = [
"eth-types",
"ethers-core",
@@ -630,9 +630,12 @@ checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e"
[[package]]
name = "convert_case"
version = "0.5.0"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb4a24b1aaf0fd0ce8b45161144d6f42cd91677fd5940fd431183eb023b3a2b8"
checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca"
dependencies = [
"unicode-segmentation",
]
[[package]]
name = "core-foundation-sys"
@@ -1045,7 +1048,7 @@ dependencies = [
[[package]]
name = "eth-types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
dependencies = [
"ethers-core",
"ethers-signers",
@@ -1138,13 +1141,12 @@ dependencies = [
[[package]]
name = "ethers-core"
version = "0.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ebdd63c828f58aa067f40f9adcbea5e114fb1f90144b3a1e2858e0c9b1ff4e8"
source = "git+https://github.com/scroll-tech/ethers-rs.git?branch=v0.17.0#739ec9a0df8daf536937739c87e85612bd73212f"
dependencies = [
"arrayvec",
"bytes",
"chrono",
"convert_case 0.5.0",
"convert_case 0.6.0",
"elliptic-curve",
"ethabi",
"fastrlp",
@@ -1223,7 +1225,7 @@ dependencies = [
[[package]]
name = "external-tracer"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
dependencies = [
"eth-types",
"geth-utils",
@@ -1436,7 +1438,7 @@ dependencies = [
[[package]]
name = "gadgets"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
dependencies = [
"digest 0.7.6",
"eth-types",
@@ -1476,7 +1478,7 @@ dependencies = [
[[package]]
name = "geth-utils"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
dependencies = [
"env_logger 0.9.3",
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
@@ -2074,7 +2076,7 @@ dependencies = [
[[package]]
name = "keccak256"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
dependencies = [
"env_logger 0.9.3",
"eth-types",
@@ -2261,7 +2263,7 @@ dependencies = [
[[package]]
name = "mock"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
dependencies = [
"eth-types",
"ethers-core",
@@ -2276,7 +2278,7 @@ dependencies = [
[[package]]
name = "mpt-zktrie"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
dependencies = [
"bus-mapping",
"eth-types",
@@ -2752,7 +2754,7 @@ dependencies = [
[[package]]
name = "prover"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.3#337089ac40bac756d88b9ae30a3be1f82538b216"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.12#ef945901bee26c4b79be8af60e259443392368fa"
dependencies = [
"aggregator",
"anyhow",
@@ -4037,7 +4039,7 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
[[package]]
name = "types"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.3#337089ac40bac756d88b9ae30a3be1f82538b216"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.12#ef945901bee26c4b79be8af60e259443392368fa"
dependencies = [
"base64 0.13.1",
"blake2",
@@ -4082,6 +4084,12 @@ dependencies = [
"tinyvec",
]
[[package]]
name = "unicode-segmentation"
version = "1.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36"
[[package]]
name = "unicode-xid"
version = "0.2.4"
@@ -4482,7 +4490,7 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
[[package]]
name = "zkevm-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
dependencies = [
"array-init",
"bus-mapping",

View File

@@ -7,6 +7,8 @@ edition = "2021"
[lib]
crate-type = ["cdylib"]
[patch.crates-io]
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v0.17.0" }
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
@@ -18,8 +20,8 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
[dependencies]
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.3" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.3" }
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.12" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.12" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
log = "0.4"

View File

@@ -169,7 +169,7 @@ func TestBatchHeaderEncode(t *testing.T) {
assert.NotNil(t, batchHeader)
bytes = batchHeader.Encode()
assert.Equal(t, 121, len(bytes))
assert.Equal(t, "010000000000000001000000000000000b000000000000000b457a9e90e8e51ba2de2f66c6b589540b88cf594dac7fa7d04b99cdcfecf24e384136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f9498500000000000000000000000000000000000000000000000000000000000003ff", common.Bytes2Hex(bytes))
assert.Equal(t, "010000000000000001000000000000000b000000000000000b34f419ce7e882295bdb5aec6cce56ffa788a5fed4744d7fbd77e4acbf409f1ca4136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f9498500000000000000000000000000000000000000000000000000000000000003ff", common.Bytes2Hex(bytes))
}
func TestBatchHeaderHash(t *testing.T) {
@@ -230,7 +230,7 @@ func TestBatchHeaderHash(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, batchHeader)
hash = batchHeader.Hash()
assert.Equal(t, "0ec9547c6645d5f0c1254e121f49e93f54525cfda5bfb2236440fb3470f48902", common.Bytes2Hex(hash.Bytes()))
assert.Equal(t, "1c3007880f0eafe74572ede7d164ff1ee5376e9ac9bff6f7fb837b2630cddc9a", common.Bytes2Hex(hash.Bytes()))
}
func TestBatchHeaderDecode(t *testing.T) {

View File

@@ -36,6 +36,17 @@ func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
return *lastQueueIndex - totalL1MessagePoppedBefore + 1
}
// NumL2Transactions returns the number of L2 transactions in this block.
func (w *WrappedBlock) NumL2Transactions() uint64 {
var count uint64
for _, txData := range w.Transactions {
if txData.Type != types.L1MessageTxType {
count++
}
}
return count
}
// Encode encodes the WrappedBlock into RollupV2 BlockContext Encoding.
func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
bytes := make([]byte, 60)
@@ -43,20 +54,25 @@ func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error)
if !w.Header.Number.IsUint64() {
return nil, errors.New("block number is not uint64")
}
if len(w.Transactions) > math.MaxUint16 {
return nil, errors.New("number of transactions exceeds max uint16")
}
// note: numL1Messages includes skipped messages
numL1Messages := w.NumL1Messages(totalL1MessagePoppedBefore)
if numL1Messages > math.MaxUint16 {
return nil, errors.New("number of L1 messages exceeds max uint16")
}
// note: numTransactions includes skipped messages
numL2Transactions := w.NumL2Transactions()
numTransactions := numL1Messages + numL2Transactions
if numTransactions > math.MaxUint16 {
return nil, errors.New("number of transactions exceeds max uint16")
}
binary.BigEndian.PutUint64(bytes[0:], w.Header.Number.Uint64())
binary.BigEndian.PutUint64(bytes[8:], w.Header.Time)
// TODO: [16:47] Currently, baseFee is 0, because we disable EIP-1559.
binary.BigEndian.PutUint64(bytes[48:], w.Header.GasLimit)
binary.BigEndian.PutUint16(bytes[56:], uint16(len(w.Transactions)))
binary.BigEndian.PutUint16(bytes[56:], uint16(numTransactions))
binary.BigEndian.PutUint16(bytes[58:], uint16(numL1Messages))
return bytes, nil

View File

@@ -65,9 +65,10 @@ func TestChunkEncode(t *testing.T) {
hexString = hex.EncodeToString(bytes)
assert.NoError(t, err)
assert.Equal(t, 97, len(bytes))
assert.Equal(t, "01000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000002000b00000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
assert.Equal(t, "01000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a1200000c000b00000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
// Test case 5: when the chunk contains two blocks each with 1 L1MsgTx
// TODO: revise this test, we cannot reuse the same L1MsgTx twice
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock2,
@@ -78,7 +79,7 @@ func TestChunkEncode(t *testing.T) {
hexString = hex.EncodeToString(bytes)
assert.NoError(t, err)
assert.Equal(t, 193, len(bytes))
assert.Equal(t, "02000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000002000b000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000002000000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e105808080808000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
assert.Equal(t, "02000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a1200000c000b000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000001000000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e105808080808000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
}
func TestChunkHash(t *testing.T) {
@@ -133,5 +134,5 @@ func TestChunkHash(t *testing.T) {
}
hash, err = chunk.Hash(0)
assert.NoError(t, err)
assert.Equal(t, "0x42967825696a129e7a83f082097aca982747480956dcaa448c9296e795c9a91a", hash.Hex())
assert.Equal(t, "0x2eb7dd63bf8fc29a0f8c10d16c2ae6f9da446907c79d50f5c164d30dc8526b60", hash.Hex())
}

View File

@@ -126,7 +126,7 @@ const (
ProvingTaskUnassigned
// ProvingTaskAssigned : proving_task is assigned to be proved
ProvingTaskAssigned
// ProvingTaskProved : proof has been returned by prover
// ProvingTaskProved DEPRECATED: proof has been returned by prover
ProvingTaskProved
// ProvingTaskVerified : proof is valid
ProvingTaskVerified

View File

@@ -20,12 +20,17 @@ var (
MessageRelayerApp MockAppName = "message-relayer-test"
// RollupRelayerApp the name of mock rollup-relayer app.
RollupRelayerApp MockAppName = "rollup-relayer-test"
// CoordinatorApp the name of mock coordinator app.
CoordinatorApp MockAppName = "coordinator-test"
// DBCliApp the name of mock database app.
DBCliApp MockAppName = "db_cli-test"
// ProverApp the name of mock prover app.
ProverApp MockAppName = "prover-test"
// CoordinatorApp the name of mock coordinator app.
CoordinatorApp MockAppName = "coordinator-test"
// ChunkProverApp the name of mock chunk prover app.
ChunkProverApp MockAppName = "chunkProver-test"
// BatchProverApp the name of mock batch prover app.
BatchProverApp MockAppName = "batchProver-test"
)
// RegisterSimulation register initializer function for integration-test.

View File

@@ -3,9 +3,10 @@ package version
import (
"fmt"
"runtime/debug"
"strings"
)
var tag = "v4.1.13"
var tag = "v4.1.31"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -19,11 +20,29 @@ var commit = func() string {
}
}
}
return ""
// Set default value for integration test.
return "000000"
}()
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-prover
var ZkVersion string
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-prover and halo2, contacted by a "-"
// The default `000000-000000` is set for integration test, and will be overwritten by coordinator's & prover's actual compilations (see their Makefiles).
var ZkVersion = "000000-000000"
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, prover, contracts and etc.
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZkVersion)
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
func CheckScrollProverVersion(proverVersion string) bool {
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// so split-by-'-' length should be 4
remote := strings.Split(proverVersion, "-")
if len(remote) != 4 {
return false
}
local := strings.Split(Version, "-")
if len(local) != 4 {
return false
}
// compare the `scroll_prover` version
return remote[2] == local[2]
}

View File

@@ -22,6 +22,7 @@ contract InitializeL1BridgeContracts is Script {
uint256 CHAIN_ID_L2 = vm.envUint("CHAIN_ID_L2");
uint256 MAX_L2_TX_IN_CHUNK = vm.envUint("MAX_L2_TX_IN_CHUNK");
uint256 MAX_L1_MESSAGE_GAS_LIMIT = vm.envUint("MAX_L1_MESSAGE_GAS_LIMIT");
address L1_ROLLUP_OPERATOR_ADDR = vm.envAddress("L1_ROLLUP_OPERATOR_ADDR");
address L1_FEE_VAULT_ADDR = vm.envAddress("L1_FEE_VAULT_ADDR");
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
@@ -84,7 +85,7 @@ contract InitializeL1BridgeContracts is Script {
L1_SCROLL_CHAIN_PROXY_ADDR,
L1_ENFORCED_TX_GATEWAY_PROXY_ADDR,
L2_GAS_PRICE_ORACLE_PROXY_ADDR,
10000000
MAX_L1_MESSAGE_GAS_LIMIT
);
// initialize L1ScrollMessenger

View File

@@ -492,6 +492,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
// concatenate l2 transaction hashes
uint256 _numTransactionsInBlock = ChunkCodec.numTransactions(blockPtr);
require(_numTransactionsInBlock >= _numL1MessagesInBlock, "num txs less than num L1 msgs");
for (uint256 j = _numL1MessagesInBlock; j < _numTransactionsInBlock; j++) {
bytes32 txHash;
(txHash, l2TxPtr) = ChunkCodec.loadL2TxHash(l2TxPtr);

View File

@@ -106,6 +106,17 @@ contract ScrollChainTest is DSTestPlus {
hevm.expectRevert("invalid chunk length");
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
// num txs less than num L1 msgs, revert
chunk0 = new bytes(1 + 60);
bytes memory bitmap = new bytes(32);
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
chunk0[58] = bytes1(uint8(1)); // numTransactions = 1
chunk0[60] = bytes1(uint8(3)); // numL1Messages = 3
bitmap[31] = bytes1(uint8(7));
chunks[0] = chunk0;
hevm.expectRevert("num txs less than num L1 msgs");
rollup.commitBatch(0, batchHeader0, chunks, bitmap);
// incomplete l2 transaction data, revert
chunk0 = new bytes(1 + 60 + 1);
chunk0[0] = bytes1(uint8(1)); // one block in this chunk

View File

@@ -1,4 +1,4 @@
.PHONY: lint docker clean coordinator mock_coordinator
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator
IMAGE_NAME=coordinator
IMAGE_VERSION=latest
@@ -25,6 +25,9 @@ libzkp:
coordinator: libzkp ## Builds the Coordinator instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
coordinator_skip_libzkp:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
mock_coordinator: ## Builds the mocked Coordinator instance.
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator ./cmd

View File

@@ -13,6 +13,7 @@ import (
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
@@ -42,12 +43,20 @@ func NewBatchProverTask(cfg *config.Config, db *gorm.DB) *BatchProverTask {
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
if !publicKeyExist {
return nil, fmt.Errorf("get public key from contex failed")
return nil, fmt.Errorf("get public key from context failed")
}
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
if !proverNameExist {
return nil, fmt.Errorf("get prover name from contex failed")
return nil, fmt.Errorf("get prover name from context failed")
}
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
if !proverVersionExist {
return nil, fmt.Errorf("get prover version from context failed")
}
if !version.CheckScrollProverVersion(proverVersion.(string)) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", proverVersion.(string), version.Version)
}
batchTasks, err := bp.batchOrm.UpdateUnassignedBatchReturning(ctx, 1)
@@ -64,7 +73,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
batchTask := batchTasks[0]
log.Info("start batch proof generation session", "id", batchTask.Hash)
log.Info("start batch proof generation session", "id", batchTask.Hash, "public key", publicKey, "prover name", proverName)
if !bp.checkAttemptsExceeded(batchTask.Hash, message.ProofTypeBatch) {
return nil, fmt.Errorf("the batch task id:%s check attempts have reach the maximum", batchTask.Hash)
@@ -75,6 +84,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
ProverPublicKey: publicKey.(string),
TaskType: int16(message.ProofTypeBatch),
ProverName: proverName.(string),
ProverVersion: proverVersion.(string),
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go

View File

@@ -13,6 +13,7 @@ import (
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
@@ -42,12 +43,20 @@ func NewChunkProverTask(cfg *config.Config, db *gorm.DB) *ChunkProverTask {
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
if !publicKeyExist {
return nil, fmt.Errorf("get public key from contex failed")
return nil, fmt.Errorf("get public key from context failed")
}
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
if !proverNameExist {
return nil, fmt.Errorf("get prover name from contex failed")
return nil, fmt.Errorf("get prover name from context failed")
}
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
if !proverVersionExist {
return nil, fmt.Errorf("get prover version from context failed")
}
if !version.CheckScrollProverVersion(proverVersion.(string)) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
// load and send chunk tasks
@@ -66,7 +75,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
chunkTask := chunkTasks[0]
log.Info("start chunk generation session", "id", chunkTask.Hash)
log.Info("start chunk generation session", "id", chunkTask.Hash, "public key", publicKey, "prover name", proverName)
if !cp.checkAttemptsExceeded(chunkTask.Hash, message.ProofTypeChunk) {
return nil, fmt.Errorf("chunk proof hash id:%s check attempts have reach the maximum", chunkTask.Hash)
@@ -77,6 +86,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
ProverPublicKey: publicKey.(string),
TaskType: int16(message.ProofTypeChunk),
ProverName: proverName.(string),
ProverVersion: proverVersion.(string),
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go

View File

@@ -50,7 +50,7 @@ func (b *BaseProverTask) checkAttemptsExceeded(hash string, taskType message.Pro
if len(proverTasks) >= int(b.cfg.ProverManager.SessionAttempts) {
coordinatorSessionsTimeoutTotalCounter.Inc(1)
log.Warn("proof generation prover task %s ended because reach the max attempts", hash)
log.Warn("proof generation prover task reach the max attempts", "hash", hash)
transErr := b.db.Transaction(func(tx *gorm.DB) error {
switch message.ProofType(proverTasks[0].TaskType) {

View File

@@ -2,6 +2,7 @@ package submitproof
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
@@ -34,8 +35,12 @@ var (
ErrValidatorFailureProofMsgStatusNotOk = errors.New("validator failure proof msg status not ok")
// ErrValidatorFailureProverTaskEmpty get none prover task
ErrValidatorFailureProverTaskEmpty = errors.New("validator failure get none prover task for the proof")
// ErrValidatorFailureProverInfoHasProofValid proof is vaild
ErrValidatorFailureProverInfoHasProofValid = errors.New("validator failure prover task info has proof valid")
// ErrValidatorFailureProverTaskCannotSubmitTwice prove task can not submit proof twice
ErrValidatorFailureProverTaskCannotSubmitTwice = errors.New("validator failure prove task cannot submit proof twice")
// ErrValidatorFailureProofTimeout the submit proof is timeout
ErrValidatorFailureProofTimeout = errors.New("validator failure submit proof timeout")
// ErrValidatorFailureTaskHaveVerifiedSuccess have proved success and verified success
ErrValidatorFailureTaskHaveVerifiedSuccess = errors.New("validator failure chunk/batch have proved and verified success")
)
// ProofReceiverLogic the proof receiver logic
@@ -74,7 +79,7 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB) *ProofR
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.ProofMsg) error {
pk := ctx.GetString(coordinatorType.PublicKey)
if len(pk) == 0 {
return fmt.Errorf("get public key from contex failed")
return fmt.Errorf("get public key from context failed")
}
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndPubKey(ctx, proofMsg.ID, pk)
@@ -83,47 +88,15 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
return ErrValidatorFailureProverTaskEmpty
}
if err = m.validator(proverTask, pk, proofMsg); err != nil {
if errors.Is(err, ErrValidatorFailureProofMsgStatusNotOk) {
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
}
return err
}
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
// store proof content
var storeProofErr error
switch proofMsg.Type {
case message.ProofTypeChunk:
storeProofErr = m.db.Transaction(func(tx *gorm.DB) error {
if dbErr := m.chunkOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.ChunkProof, proofTimeSec, tx); dbErr != nil {
return fmt.Errorf("failed to store chunk proof into db, err:%w", dbErr)
}
if dbErr := m.chunkOrm.UpdateProvingStatus(ctx, proofMsg.ID, types.ProvingTaskProved, tx); dbErr != nil {
return fmt.Errorf("failed to update chunk task status as proved, error:%w", dbErr)
}
return nil
})
case message.ProofTypeBatch:
storeProofErr = m.db.Transaction(func(tx *gorm.DB) error {
if dbErr := m.batchOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.BatchProof, proofTimeSec, tx); dbErr != nil {
return fmt.Errorf("failed to store batch proof into db, error:%w", dbErr)
}
if dbErr := m.batchOrm.UpdateProvingStatus(ctx, proofMsg.ID, types.ProvingTaskProved, tx); dbErr != nil {
return fmt.Errorf("failed to update batch task status as proved, error:%w", dbErr)
}
return nil
})
}
if storeProofErr != nil {
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
log.Error("failed to store basic proof into db", "error", storeProofErr)
return storeProofErr
}
log.Info("handling zk proof", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proverTask.TaskType, "proof time", proofTimeSec)
coordinatorProofsReceivedTotalCounter.Inc(1)
if err = m.validator(ctx, proverTask, pk, proofMsg); err != nil {
return err
}
var success bool
var verifyErr error
@@ -134,7 +107,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
}
if verifyErr != nil || !success {
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg)
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
@@ -149,8 +122,10 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec)
if err := m.closeProofTask(ctx, proofMsg.ID, pk, proofMsg); err != nil {
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg.Type)
coordinatorProofsReceivedTotalCounter.Inc(1)
if err := m.closeProofTask(ctx, proofMsg.ID, pk, proofMsg, proofTimeSec); err != nil {
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
return err
}
@@ -178,49 +153,78 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
return nil
}
func (m *ProofReceiverLogic) validator(proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg) error {
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg) error {
// Ensure this prover is eligible to participate in the prover task.
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofValid {
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
// TODO: Defend invalid proof resubmissions by one of the following two methods:
// (i) slash the prover for each submission of invalid proof
// (ii) set the maximum failure retry times
log.Warn("prover has already submitted valid proof in proof session", "prover name", proverTask.ProverName,
"prover pk", proverTask.ProverPublicKey, "proof type", proverTask.TaskType, "proof id", proofMsg.ProofDetail.ID)
return ErrValidatorFailureProverInfoHasProofValid
log.Warn("the prover task cannot submit twice", "hash", proofMsg.ID, "prover pk", proverTask.ProverPublicKey,
"prover name", proverTask.ProverName, "proof type", proverTask.TaskType)
return ErrValidatorFailureProverTaskCannotSubmitTwice
}
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
log.Info("handling zk proof", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proverTask.TaskType, "proof time", proofTimeSec)
if proofMsg.Status != message.StatusOk {
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
log.Info("proof generated by prover failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", proofMsg.Error)
"prover pk", pk, "prove type", proofMsg.Type, "error", proofMsg.Error)
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proofMsg.Type, proofMsg.ID, pk, types.ProverProofInvalid); updateErr != nil {
log.Error("proof generated by prover failed update prover task proving status failure", "proof id", proofMsg.ID,
"prover name", proverTask.ProverName, "prover pk", pk, "prove type", proofMsg.Type, "error", proofMsg.Error)
}
return ErrValidatorFailureProofMsgStatusNotOk
}
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, need skip it
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "proof type", proverTask.TaskType,
"prover name", proverTask.ProverName, "prover public key", pk, "proof time", proofTimeSec)
return ErrValidatorFailureProofTimeout
}
// store the proof to prover task
if updateTaskProofErr := m.updateProverTaskProof(ctx, pk, proofMsg); updateTaskProofErr != nil {
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "public key", pk,
"prover name", proverTask.ProverName, "error", updateTaskProofErr)
}
// if the batch/chunk have proved and verifier success, need skip this submit proof
if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) {
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID,
"proof type", proverTask.TaskType, "prover name", proverTask.ProverName, "prover public key", pk)
return ErrValidatorFailureTaskHaveVerifiedSuccess
}
return nil
}
func (m *ProofReceiverLogic) proofFailure(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsgType, types.ProvingTaskFailed); err != nil {
func (m *ProofReceiverLogic) proofFailure(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
log.Info("proof failure update proof status", "hash", hash, "public key", pubKey,
"proof type", proofMsg.Type.String(), "status", types.ProvingTaskFailed.String())
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskFailed, 0); err != nil {
log.Error("failed to updated proof status ProvingTaskFailed", "hash", hash, "pubKey", pubKey, "error", err)
}
coordinatorSessionsFailedTotalCounter.Inc(1)
}
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsgType, types.ProvingTaskUnassigned); err != nil {
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
log.Info("proof recover update proof status", "hash", hash, "public key", pubKey,
"proof type", proofMsg.Type.String(), "status", types.ProvingTaskUnassigned.String())
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskUnassigned, 0); err != nil {
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", hash, "pubKey", pubKey, "error", err)
}
}
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) error {
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg.Type, types.ProvingTaskVerified); err != nil {
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg, proofTimeSec uint64) error {
log.Info("proof close task update proof status", "hash", hash, "public key", pubKey,
"proof type", proofMsg.Type.String(), "status", types.ProvingTaskVerified.String())
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskVerified, proofTimeSec); err != nil {
log.Error("failed to updated proof status ProvingTaskVerified", "hash", hash, "pubKey", pubKey, "error", err)
return err
}
@@ -228,13 +232,7 @@ func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, hash string, pu
}
// UpdateProofStatus update the chunk/batch task and session info status
func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string, proverPublicKey string, proofMsgType message.ProofType, status types.ProvingStatus) error {
// if the prover task failure type is SessionInfoFailureTimeout,
// just skip update the status because the proof result come too late.
if m.checkIsTimeoutFailure(ctx, hash, proverPublicKey) {
return nil
}
func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string, proverPublicKey string, proofMsg *message.ProofMsg, status types.ProvingStatus, proofTimeSec uint64) error {
var proverTaskStatus types.ProverProveStatus
switch status {
case types.ProvingTaskFailed, types.ProvingTaskUnassigned:
@@ -244,16 +242,31 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string,
}
err := m.db.Transaction(func(tx *gorm.DB) error {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proofMsgType, hash, proverPublicKey, proverTaskStatus, tx); updateErr != nil {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proofMsg.Type, hash, proverPublicKey, proverTaskStatus, tx); updateErr != nil {
return updateErr
}
// if the block batch has proof verified, so the failed status not update block batch proving status
if status == types.ProvingTaskFailed && m.checkIsTaskSuccess(ctx, hash, proofMsgType) {
if status == types.ProvingTaskFailed && m.checkIsTaskSuccess(ctx, hash, proofMsg.Type) {
log.Info("update proof status ProvingTaskFailed skip because other prover have prove success", "hash", hash, "public key", proverPublicKey)
return nil
}
switch proofMsgType {
if status == types.ProvingTaskVerified {
var storeProofErr error
switch proofMsg.Type {
case message.ProofTypeChunk:
storeProofErr = m.chunkOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.ChunkProof, proofTimeSec, tx)
case message.ProofTypeBatch:
storeProofErr = m.batchOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.BatchProof, proofTimeSec, tx)
}
if storeProofErr != nil {
log.Error("failed to store chunk/batch proof into db", "hash", hash, "public key", proverPublicKey, "error", storeProofErr)
return storeProofErr
}
}
switch proofMsg.Type {
case message.ProofTypeChunk:
if err := m.chunkOrm.UpdateProvingStatus(ctx, hash, status, tx); err != nil {
log.Error("failed to update chunk proving_status as failed", "msg.ID", hash, "error", err)
@@ -272,7 +285,7 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string,
return err
}
if status == types.ProvingTaskVerified && proofMsgType == message.ProofTypeChunk {
if status == types.ProvingTaskVerified && proofMsg.Type == message.ProofTypeChunk {
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, hash); checkReadyErr != nil {
log.Error("failed to check are all chunk proofs ready", "error", checkReadyErr)
return checkReadyErr
@@ -302,14 +315,19 @@ func (m *ProofReceiverLogic) checkIsTaskSuccess(ctx context.Context, hash string
return provingStatus == types.ProvingTaskVerified
}
func (m *ProofReceiverLogic) checkIsTimeoutFailure(ctx context.Context, hash, proverPublicKey string) bool {
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndPubKey(ctx, hash, proverPublicKey)
if err != nil {
return false
func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, pk string, proofMsg *message.ProofMsg) error {
// store the proof to prover task
var proofBytes []byte
var marshalErr error
switch proofMsg.Type {
case message.ProofTypeChunk:
proofBytes, marshalErr = json.Marshal(proofMsg.ChunkProof)
case message.ProofTypeBatch:
proofBytes, marshalErr = json.Marshal(proofMsg.BatchProof)
}
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
return true
if len(proofBytes) == 0 || marshalErr != nil {
return fmt.Errorf("updateProverTaskProof marshal proof error:%w", marshalErr)
}
return false
return m.proverTaskOrm.UpdateProverTaskProof(ctx, proofMsg.Type, proofMsg.ID, pk, proofBytes)
}

View File

@@ -294,6 +294,7 @@ func (o *Batch) UpdateUnassignedBatchReturning(ctx context.Context, limit int) (
var batches []*Batch
db = db.Model(&batches).Clauses(clause.Returning{})
db = db.Where("index = (?)", subQueryDB)
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
if err := db.Update("proving_status", types.ProvingTaskAssigned).Error; err != nil {
return nil, fmt.Errorf("Batch.UpdateUnassignedBatchReturning error: %w", err)
}

View File

@@ -364,6 +364,7 @@ func (o *Chunk) UpdateUnassignedChunkReturning(ctx context.Context, height, limi
var chunks []*Chunk
db = db.Model(&chunks).Clauses(clause.Returning{})
db = db.Where("index = (?)", subQueryDB)
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
if err := db.Update("proving_status", types.ProvingTaskAssigned).Error; err != nil {
return nil, fmt.Errorf("Chunk.UpdateUnassignedBatchReturning error: %w", err)
}

View File

@@ -22,6 +22,7 @@ type ProverTask struct {
// prover
ProverPublicKey string `json:"prover_public_key" gorm:"column:prover_public_key"`
ProverName string `json:"prover_name" gorm:"column:prover_name"`
ProverVersion string `json:"prover_version" gorm:"column:prover_version"`
// task
TaskID string `json:"task_id" gorm:"column:task_id"`
@@ -111,6 +112,20 @@ func (o *ProverTask) GetProverTaskByTaskIDAndPubKey(ctx context.Context, taskID,
return &proverTask, nil
}
// GetProvingStatusByTaskID retrieves the proving status of a prover task
func (o *ProverTask) GetProvingStatusByTaskID(ctx context.Context, taskID string) (types.ProverProveStatus, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Select("proving_status")
db = db.Where("task_id = ?", taskID)
var proverTask ProverTask
if err := db.Find(&proverTask).Error; err != nil {
return types.ProverProofInvalid, fmt.Errorf("ProverTask.GetProvingStatusByTaskID error: %w, taskID: %v", err, taskID)
}
return types.ProverProveStatus(proverTask.ProvingStatus), nil
}
// GetAssignedProverTasks get the assigned prover task
func (o *ProverTask) GetAssignedProverTasks(ctx context.Context, limit int) ([]ProverTask, error) {
db := o.db.WithContext(ctx)
@@ -136,7 +151,7 @@ func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask,
db = db.Model(&ProverTask{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}},
DoUpdates: clause.AssignmentColumns([]string{"proving_status", "failure_type", "assigned_at"}),
DoUpdates: clause.AssignmentColumns([]string{"prover_version", "proving_status", "failure_type", "assigned_at"}),
})
if err := db.Create(&proverTask).Error; err != nil {
@@ -145,6 +160,19 @@ func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask,
return nil
}
// UpdateProverTaskProof update the prover task's proof
func (o *ProverTask) UpdateProverTaskProof(ctx context.Context, proofType message.ProofType, taskID string, pk string, proof []byte) error {
db := o.db
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type = ? AND task_id = ? AND prover_public_key = ?", int(proofType), taskID, pk)
if err := db.Update("proof", proof).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateProverTaskProof error: %w, proof type: %v, taskID: %v, prover public key: %v", err, proofType.String(), taskID, pk)
}
return nil
}
// UpdateProverTaskProvingStatus updates the proving_status of a specific ProverTask record.
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.ProverProveStatus, dbTX ...*gorm.DB) error {
db := o.db

View File

@@ -23,6 +23,7 @@ import (
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
@@ -37,10 +38,11 @@ var (
base *docker.App
db *gorm.DB
l2BlockOrm *orm.L2Block
chunkOrm *orm.Chunk
batchOrm *orm.Batch
db *gorm.DB
l2BlockOrm *orm.L2Block
chunkOrm *orm.Chunk
batchOrm *orm.Batch
proverTaskOrm *orm.ProverTask
wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *types.WrappedBlock
@@ -107,6 +109,8 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
}
func setEnv(t *testing.T) {
version.Version = "v1.2.3-aaa-bbb-ccc"
base = docker.NewDockerApp()
base.RunDBImage(t)
@@ -127,6 +131,7 @@ func setEnv(t *testing.T) {
batchOrm = orm.NewBatch(db)
chunkOrm = orm.NewChunk(db)
l2BlockOrm = orm.NewL2Block(db)
proverTaskOrm = orm.NewProverTask(db)
templateBlockTrace, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
@@ -362,8 +367,12 @@ func testProofGeneratedFailed(t *testing.T) {
tickStop = time.Tick(time.Minute)
)
var chunkProofStatus types.ProvingStatus
var batchProofStatus types.ProvingStatus
var (
chunkProofStatus types.ProvingStatus
batchProofStatus types.ProvingStatus
chunkProverTaskProvingStatus types.ProverProveStatus
batchProverTaskProvingStatus types.ProverProveStatus
)
for {
select {
@@ -372,7 +381,15 @@ func testProofGeneratedFailed(t *testing.T) {
assert.NoError(t, err)
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskFailed && batchProofStatus == types.ProvingTaskFailed {
if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned {
return
}
chunkProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
batchProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), batch.Hash)
assert.NoError(t, err)
if chunkProverTaskProvingStatus == types.ProverProofInvalid && batchProverTaskProvingStatus == types.ProverProofInvalid {
return
}
case <-tickStop:

View File

@@ -14,6 +14,7 @@ import (
ctypes "scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/types"
@@ -78,7 +79,7 @@ func (r *mockProver) login(t *testing.T, challengeString string) string {
Identity: &message.Identity{
Challenge: challengeString,
ProverName: "test",
ProverVersion: "v1.0.0",
ProverVersion: version.Version,
},
}
assert.NoError(t, authMsg.SignWithKey(r.privKey))
@@ -162,11 +163,16 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) *t
}
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int) {
proofMsgStatus := message.StatusOk
if proofStatus == generatedFailed {
proofMsgStatus = message.StatusProofError
}
proof := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: proverTaskSchema.TaskID,
Type: message.ProofType(proverTaskSchema.TaskType),
Status: message.RespStatus(proofStatus),
Status: proofMsgStatus,
ChunkProof: &message.ChunkProof{},
BatchProof: &message.BatchProof{},
},

View File

@@ -8,6 +8,7 @@ create table prover_task
-- prover
prover_public_key VARCHAR NOT NULL,
prover_name VARCHAR NOT NULL,
prover_version VARCHAR NOT NULL,
-- task
task_id VARCHAR NOT NULL,

View File

@@ -1,4 +1,8 @@
.PHONY: clean build test
.PHONY: clean build test docker
IMAGE_NAME=prover-stats-api
IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
build:
GOBIN=$(PWD)/build/bin go build -o $(PWD)/build/bin/prover-stats-api ./cmd
@@ -14,3 +18,6 @@ test:
lint: ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/prover-stats-api.Dockerfile

View File

@@ -20,6 +20,7 @@ type ProverTask struct {
TaskID string `json:"task_id" gorm:"column:task_id"`
ProverPublicKey string `json:"prover_public_key" gorm:"column:prover_public_key"`
ProverName string `json:"prover_name" gorm:"column:prover_name"`
ProverVersion string `json:"prover_version" gorm:"column:prover_version"`
TaskType int16 `json:"task_type" gorm:"column:task_type;default:0"`
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:0"`
FailureType int16 `json:"failure_type" gorm:"column:failure_type;default:0"`
@@ -94,7 +95,7 @@ func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask,
db = db.Model(&ProverTask{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}},
DoUpdates: clause.AssignmentColumns([]string{"proving_status", "failure_type", "assigned_at"}),
DoUpdates: clause.AssignmentColumns([]string{"prover_version", "proving_status", "failure_type", "assigned_at"}),
})
if err := db.Create(&proverTask).Error; err != nil {

View File

@@ -1,21 +1,21 @@
.PHONY: lint docker clean prover mock-prover
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
HALO2_GPU_VERSION=$(shell ./print_halo2gpu_version.sh | sed -n '2p')
ifeq (${HALO2_GPU_VERSION},)
# use halo2_proofs with CPU
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
else
# use halo2_gpu
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_GPU_VERSION}
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_GPU_VERSION}
endif
libzkp:

View File

@@ -31,7 +31,8 @@ func init() {
}
// Register `prover-test` app for integration-test.
utils.RegisterSimulation(app, utils.ProverApp)
utils.RegisterSimulation(app, utils.ChunkProverApp)
utils.RegisterSimulation(app, utils.BatchProverApp)
}
func action(ctx *cli.Context) error {

View File

@@ -6,11 +6,21 @@ import (
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
"scroll-tech/common/version"
)
func TestRunProver(t *testing.T) {
prover := cmd.NewCmd("prover-test", "--version")
func TestRunChunkProver(t *testing.T) {
prover := cmd.NewCmd(string(utils.ChunkProverApp), "--version")
defer prover.WaitExit()
// wait result
prover.ExpectWithTimeout(t, true, time.Second*3, fmt.Sprintf("prover version %s", version.Version))
prover.RunApp(nil)
}
func TestRunBatchProver(t *testing.T) {
prover := cmd.NewCmd(string(utils.BatchProverApp), "--version")
defer prover.WaitExit()
// wait result

View File

@@ -4,15 +4,12 @@ import (
"encoding/json"
"fmt"
"os"
"sync"
"testing"
"time"
"github.com/google/uuid"
"github.com/scroll-tech/go-ethereum/rpc"
"golang.org/x/sync/errgroup"
proverConfig "scroll-tech/prover/config"
"scroll-tech/prover/config"
"scroll-tech/common/cmd"
"scroll-tech/common/docker"
@@ -31,7 +28,7 @@ func getIndex() int {
// ProverApp prover-test client manager.
type ProverApp struct {
Config *proverConfig.Config
Config *config.Config
base *docker.App
@@ -41,23 +38,30 @@ type ProverApp struct {
index int
name string
args []string
docker.AppAPI
}
// NewProverApp return a new proverApp manager.
func NewProverApp(base *docker.App, file string, httpURL string, proofType message.ProofType) *ProverApp {
uuid := uuid.New().String()
proverFile := fmt.Sprintf("/tmp/%s_%d_prover-config.json", uuid, base.Timestamp)
func NewProverApp(base *docker.App, mockName utils.MockAppName, file string, httpURL string) *ProverApp {
var proofType message.ProofType
switch mockName {
case utils.ChunkProverApp:
proofType = message.ProofTypeChunk
case utils.BatchProverApp:
proofType = message.ProofTypeBatch
default:
return nil
}
name := string(mockName)
proverFile := fmt.Sprintf("/tmp/%d_%s-config.json", base.Timestamp, name)
proverApp := &ProverApp{
base: base,
originFile: file,
proverFile: proverFile,
bboltDB: fmt.Sprintf("/tmp/%s_%d_bbolt_db", uuid, base.Timestamp),
bboltDB: fmt.Sprintf("/tmp/%d_%s_bbolt_db", base.Timestamp, name),
index: getIndex(),
name: string(utils.ProverApp),
args: []string{"--log.debug", "--config", proverFile},
name: name,
AppAPI: cmd.NewCmd(name, []string{"--log.debug", "--config", proverFile}...),
}
if err := proverApp.MockConfig(true, httpURL, proofType); err != nil {
panic(err)
@@ -66,18 +70,10 @@ func NewProverApp(base *docker.App, file string, httpURL string, proofType messa
}
// RunApp run prover-test child process by multi parameters.
func (r *ProverApp) RunApp(t *testing.T, args ...string) {
r.AppAPI = cmd.NewCmd(r.name, append(r.args, args...)...)
func (r *ProverApp) RunApp(t *testing.T) {
r.AppAPI.RunApp(func() bool { return r.AppAPI.WaitResult(t, time.Second*40, "prover start successfully") })
}
// RunAppWithExpectedResult runs the prover-test child process with multiple parameters,
// and checks for a specific expected result in the output.
func (r *ProverApp) RunAppWithExpectedResult(t *testing.T, expectedResult string, args ...string) {
r.AppAPI = cmd.NewCmd(r.name, append(r.args, args...)...)
r.AppAPI.RunApp(func() bool { return r.AppAPI.WaitResult(t, time.Second*40, expectedResult) })
}
// Free stop and release prover-test.
func (r *ProverApp) Free() {
if !utils.IsNil(r.AppAPI) {
@@ -90,7 +86,7 @@ func (r *ProverApp) Free() {
// MockConfig creates a new prover config.
func (r *ProverApp) MockConfig(store bool, httpURL string, proofType message.ProofType) error {
cfg, err := proverConfig.NewConfig(r.originFile)
cfg, err := config.NewConfig(r.originFile)
if err != nil {
return err
}
@@ -123,47 +119,3 @@ func (r *ProverApp) MockConfig(store bool, httpURL string, proofType message.Pro
}
return os.WriteFile(r.proverFile, data, 0600)
}
// ProverApps proverApp list.
type ProverApps []*ProverApp
// RunApps starts all the proverApps.
func (r ProverApps) RunApps(t *testing.T, args ...string) {
var eg errgroup.Group
for i := range r {
i := i
eg.Go(func() error {
r[i].RunApp(t, args...)
return nil
})
}
_ = eg.Wait()
}
// Free releases proverApps.
func (r ProverApps) Free() {
var wg sync.WaitGroup
wg.Add(len(r))
for i := range r {
i := i
go func() {
r[i].Free()
wg.Done()
}()
}
wg.Wait()
}
// WaitExit wait proverApps stopped.
func (r ProverApps) WaitExit() {
var wg sync.WaitGroup
wg.Add(len(r))
for i := range r {
i := i
go func() {
r[i].WaitExit()
wg.Done()
}()
}
wg.Wait()
}

View File

@@ -4,12 +4,10 @@ go 1.19
require (
github.com/go-resty/resty/v2 v2.7.0
github.com/google/uuid v1.3.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca
github.com/stretchr/testify v1.8.3
github.com/urfave/cli/v2 v2.25.7
go.etcd.io/bbolt v1.3.7
golang.org/x/sync v0.3.0
)
require (
@@ -21,6 +19,7 @@ require (
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/uint256 v1.2.3 // indirect
github.com/huin/goupnp v1.0.3 // indirect
@@ -45,6 +44,7 @@ require (
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/time v0.3.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect

View File

@@ -22,8 +22,8 @@ import (
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/common/version"
bcmd "scroll-tech/bridge/cmd"
)
@@ -40,8 +40,8 @@ func TestMain(m *testing.M) {
base = docker.NewDockerApp()
bridgeApp = bcmd.NewBridgeApp(base, "../../bridge/conf/config.json")
coordinatorApp = capp.NewCoordinatorApp(base, "../../coordinator/conf/config.json")
chunkProverApp = rapp.NewProverApp(base, "../../prover/config.json", coordinatorApp.HTTPEndpoint(), message.ProofTypeChunk)
batchProverApp = rapp.NewProverApp(base, "../../prover/config.json", coordinatorApp.HTTPEndpoint(), message.ProofTypeBatch)
chunkProverApp = rapp.NewProverApp(base, utils.ChunkProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
batchProverApp = rapp.NewProverApp(base, utils.BatchProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
m.Run()
bridgeApp.Free()
coordinatorApp.Free()
@@ -112,17 +112,21 @@ func TestCoordinatorProverInteraction(t *testing.T) {
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, batch.Hash)
assert.NoError(t, err)
t.Log(version.Version)
// Run coordinator app.
coordinatorApp.RunApp(t)
// Run prover app.
chunkProverApp.RunAppWithExpectedResult(t, "proof submitted successfully") // chunk prover login -> get task -> submit proof.
batchProverApp.RunAppWithExpectedResult(t, "proof submitted successfully") // batch prover login -> get task -> submit proof.
chunkProverApp.ExpectWithTimeout(t, true, time.Second*40, "proof submitted successfully") // chunk prover login -> get task -> submit proof.
batchProverApp.ExpectWithTimeout(t, true, time.Second*40, "proof submitted successfully") // batch prover login -> get task -> submit proof.
// All task has been proven, coordinator would not return any task.
chunkProverApp.ExpectWithTimeout(t, false, 60*time.Second, "get empty prover task")
batchProverApp.ExpectWithTimeout(t, false, 60*time.Second, "get empty prover task")
chunkProverApp.ExpectWithTimeout(t, true, 60*time.Second, "get empty prover task")
batchProverApp.ExpectWithTimeout(t, true, 60*time.Second, "get empty prover task")
chunkProverApp.RunApp(t)
batchProverApp.RunApp(t)
// Free apps.
chunkProverApp.WaitExit()
@@ -139,10 +143,12 @@ func TestProverReLogin(t *testing.T) {
// Run coordinator app.
coordinatorApp.RunApp(t) // login timeout: 1 sec
chunkProverApp.RunApp(t)
batchProverApp.RunApp(t)
// Run prover app.
chunkProverApp.RunAppWithExpectedResult(t, "re-login success") // chunk prover login.
batchProverApp.RunAppWithExpectedResult(t, "re-login success") // batch prover login.
chunkProverApp.WaitResult(t, time.Second*40, "re-login success") // chunk prover login.
batchProverApp.WaitResult(t, time.Second*40, "re-login success") // batch prover login.
// Free apps.
chunkProverApp.WaitExit()