mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-08 21:48:11 -05:00
refactor(coordinator): simplify logic post-Euclid (#1652)
This commit is contained in:
99
.github/workflows/prover.yml
vendored
99
.github/workflows/prover.yml
vendored
@@ -1,99 +0,0 @@
|
||||
name: Prover
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- develop
|
||||
- alpha
|
||||
paths:
|
||||
- 'prover/**'
|
||||
- '.github/workflows/prover.yml'
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'prover/**'
|
||||
- '.github/workflows/prover.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'prover'
|
||||
|
||||
jobs:
|
||||
skip_check:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
- id: skip_check
|
||||
uses: fkirc/skip-duplicate-actions@v5
|
||||
with:
|
||||
cancel_others: 'true'
|
||||
concurrent_skipping: 'same_content_newer'
|
||||
paths_ignore: '["**/README.md"]'
|
||||
|
||||
fmt:
|
||||
needs: [skip_check]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
components: rustfmt
|
||||
- name: Cargo cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Cargo check
|
||||
run: cargo check --all-features
|
||||
- name: Cargo fmt
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
needs: [skip_check, fmt]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
components: clippy
|
||||
- name: Cargo cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Run clippy
|
||||
run: cargo clippy --all-features --all-targets -- -D warnings
|
||||
|
||||
compile:
|
||||
needs: [skip_check, clippy]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
- name: Cache cargo
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Test
|
||||
run: |
|
||||
make prover
|
||||
@@ -11,7 +11,7 @@ Please note that this project is released with a [Contributor Code of Conduct][c
|
||||
|
||||
## Contribute to Scroll
|
||||
|
||||
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. If you are looking to contribute to Scroll's Halo2 zkEVM circuits please refer to the [zkEVM circuits](https://github.com/scroll-tech/zkevm-circuits) repo. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
|
||||
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
|
||||
|
||||
## Issues and PRs
|
||||
|
||||
|
||||
@@ -26,12 +26,6 @@ pub unsafe extern "C" fn verify_chunk_proof(
|
||||
|
||||
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
// Skip verification for darwinV2 as we can't host darwinV2 and euclid verifiers on the same
|
||||
// binary.
|
||||
if fork_name_str == "darwinV2" {
|
||||
return true as c_char;
|
||||
}
|
||||
|
||||
let proof = c_char_to_vec(proof);
|
||||
let verifier = verifier::get_verifier(fork_name_str);
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ const (
|
||||
EuclidFork = "euclid"
|
||||
EuclidV2Fork = "euclidV2"
|
||||
|
||||
EuclidForkNameForProver = "euclidv1"
|
||||
EuclidV2ForkNameForProver = "euclidv2"
|
||||
)
|
||||
|
||||
@@ -99,22 +98,22 @@ func (e *Byte48) UnmarshalJSON(input []byte) error {
|
||||
// BatchTaskDetail is a type containing BatchTask detail.
|
||||
type BatchTaskDetail struct {
|
||||
// use one of the string of EuclidFork / EuclidV2Fork
|
||||
ForkName string `json:"fork_name"`
|
||||
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
|
||||
ChunkProofs []ChunkProof `json:"chunk_proofs"`
|
||||
BatchHeader interface{} `json:"batch_header"`
|
||||
BlobBytes []byte `json:"blob_bytes"`
|
||||
KzgProof Byte48 `json:"kzg_proof,omitempty"`
|
||||
KzgCommitment Byte48 `json:"kzg_commitment,omitempty"`
|
||||
ChallengeDigest common.Hash `json:"challenge_digest,omitempty"`
|
||||
ForkName string `json:"fork_name"`
|
||||
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
|
||||
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
|
||||
BatchHeader interface{} `json:"batch_header"`
|
||||
BlobBytes []byte `json:"blob_bytes"`
|
||||
KzgProof Byte48 `json:"kzg_proof,omitempty"`
|
||||
KzgCommitment Byte48 `json:"kzg_commitment,omitempty"`
|
||||
ChallengeDigest common.Hash `json:"challenge_digest,omitempty"`
|
||||
}
|
||||
|
||||
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
|
||||
type BundleTaskDetail struct {
|
||||
// use one of the string of EuclidFork / EuclidV2Fork
|
||||
ForkName string `json:"fork_name"`
|
||||
BatchProofs []BatchProof `json:"batch_proofs"`
|
||||
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`
|
||||
ForkName string `json:"fork_name"`
|
||||
BatchProofs []*OpenVMBatchProof `json:"batch_proofs"`
|
||||
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`
|
||||
}
|
||||
|
||||
// ChunkInfo is for calculating pi_hash for chunk
|
||||
@@ -143,157 +142,6 @@ type BlockContextV2 struct {
|
||||
NumL1Msgs uint16 `json:"num_l1_msgs"`
|
||||
}
|
||||
|
||||
// SubCircuitRowUsage tracing info added in v0.11.0rc8
|
||||
type SubCircuitRowUsage struct {
|
||||
Name string `json:"name"`
|
||||
RowNumber uint64 `json:"row_number"`
|
||||
}
|
||||
|
||||
// ChunkProof
|
||||
type ChunkProof interface {
|
||||
Proof() []byte
|
||||
}
|
||||
|
||||
// NewChunkProof creates a new ChunkProof instance.
|
||||
func NewChunkProof(hardForkName string) ChunkProof {
|
||||
switch hardForkName {
|
||||
case EuclidFork, EuclidV2Fork:
|
||||
return &OpenVMChunkProof{}
|
||||
default:
|
||||
return &Halo2ChunkProof{}
|
||||
}
|
||||
}
|
||||
|
||||
// Halo2ChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type Halo2ChunkProof struct {
|
||||
StorageTrace []byte `json:"storage_trace,omitempty"`
|
||||
Protocol []byte `json:"protocol"`
|
||||
RawProof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
RowUsages []SubCircuitRowUsage `json:"row_usages,omitempty"`
|
||||
}
|
||||
|
||||
// Proof returns the proof bytes of a ChunkProof
|
||||
func (ap *Halo2ChunkProof) Proof() []byte {
|
||||
return ap.RawProof
|
||||
}
|
||||
|
||||
// BatchProof
|
||||
type BatchProof interface {
|
||||
SanityCheck() error
|
||||
Proof() []byte
|
||||
}
|
||||
|
||||
// NewBatchProof creates a new BatchProof instance.
|
||||
func NewBatchProof(hardForkName string) BatchProof {
|
||||
switch hardForkName {
|
||||
case EuclidFork, EuclidV2Fork:
|
||||
return &OpenVMBatchProof{}
|
||||
default:
|
||||
return &Halo2BatchProof{}
|
||||
}
|
||||
}
|
||||
|
||||
// Halo2BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
type Halo2BatchProof struct {
|
||||
Protocol []byte `json:"protocol"`
|
||||
RawProof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// Proof returns the proof bytes of a BatchProof
|
||||
func (ap *Halo2BatchProof) Proof() []byte {
|
||||
return ap.RawProof
|
||||
}
|
||||
|
||||
// SanityCheck checks whether a BatchProof is in a legal format
|
||||
func (ap *Halo2BatchProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
}
|
||||
|
||||
if len(ap.RawProof) == 0 {
|
||||
return errors.New("proof not ready")
|
||||
}
|
||||
|
||||
if len(ap.RawProof)%32 != 0 {
|
||||
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.RawProof))
|
||||
}
|
||||
|
||||
if len(ap.Instances) == 0 {
|
||||
return errors.New("instance not ready")
|
||||
}
|
||||
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BundleProof
|
||||
type BundleProof interface {
|
||||
SanityCheck() error
|
||||
Proof() []byte
|
||||
}
|
||||
|
||||
// NewBundleProof creates a new BundleProof instance.
|
||||
func NewBundleProof(hardForkName string) BundleProof {
|
||||
switch hardForkName {
|
||||
case EuclidFork, EuclidV2Fork:
|
||||
return &OpenVMBundleProof{}
|
||||
default:
|
||||
return &Halo2BundleProof{}
|
||||
}
|
||||
}
|
||||
|
||||
// BundleProof includes the proof info that are required for verification of a bundle of batch proofs.
|
||||
type Halo2BundleProof struct {
|
||||
RawProof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// Proof returns the proof bytes of a BundleProof
|
||||
func (ap *Halo2BundleProof) Proof() []byte {
|
||||
return ap.RawProof
|
||||
}
|
||||
|
||||
// SanityCheck checks whether a BundleProof is in a legal format
|
||||
func (ap *Halo2BundleProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
}
|
||||
|
||||
if len(ap.RawProof) == 0 {
|
||||
return errors.New("proof not ready")
|
||||
}
|
||||
|
||||
if len(ap.RawProof)%32 != 0 {
|
||||
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.RawProof))
|
||||
}
|
||||
|
||||
if len(ap.Instances) == 0 {
|
||||
return errors.New("instance not ready")
|
||||
}
|
||||
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Proof for flatten VM proof
|
||||
type OpenVMProof struct {
|
||||
Proof []byte `json:"proofs"`
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.5.9"
|
||||
var tag = "v4.5.10"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -90,17 +90,9 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
cfg.ProverManager = &coordinatorConfig.ProverManager{
|
||||
ProversPerSession: 1,
|
||||
Verifier: &coordinatorConfig.VerifierConfig{
|
||||
MockMode: true,
|
||||
LowVersionCircuit: &coordinatorConfig.CircuitConfig{
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "darwin",
|
||||
MinProverVersion: "v4.4.57",
|
||||
},
|
||||
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "darwinV2",
|
||||
ForkName: "euclidV2",
|
||||
MinProverVersion: "v4.4.89",
|
||||
},
|
||||
},
|
||||
|
||||
@@ -62,14 +62,14 @@ func action(ctx *cli.Context) error {
|
||||
return fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", taskID)
|
||||
}
|
||||
|
||||
var batchProofs []message.BatchProof
|
||||
var batchProofs []*message.OpenVMBatchProof
|
||||
for _, batch := range batches {
|
||||
proof := message.NewBatchProof("darwinV2")
|
||||
var proof message.OpenVMBatchProof
|
||||
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
|
||||
log.Error("failed to unmarshal batch proof")
|
||||
return fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, taskID, batch.Hash)
|
||||
}
|
||||
batchProofs = append(batchProofs, proof)
|
||||
batchProofs = append(batchProofs, &proof)
|
||||
}
|
||||
|
||||
taskDetail := message.BundleTaskDetail{
|
||||
|
||||
@@ -7,17 +7,9 @@
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"mock_mode": true,
|
||||
"low_version_circuit": {
|
||||
"params_path": "params",
|
||||
"assets_path": "assets",
|
||||
"fork_name": "darwin",
|
||||
"min_prover_version": "v4.4.43"
|
||||
},
|
||||
"high_version_circuit": {
|
||||
"params_path": "params",
|
||||
"assets_path": "assets",
|
||||
"fork_name": "darwinV2",
|
||||
"fork_name": "euclidV2",
|
||||
"min_prover_version": "v4.4.45"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,7 +51,6 @@ type Config struct {
|
||||
|
||||
// CircuitConfig circuit items.
|
||||
type CircuitConfig struct {
|
||||
ParamsPath string `json:"params_path"`
|
||||
AssetsPath string `json:"assets_path"`
|
||||
ForkName string `json:"fork_name"`
|
||||
MinProverVersion string `json:"min_prover_version"`
|
||||
@@ -59,8 +58,6 @@ type CircuitConfig struct {
|
||||
|
||||
// VerifierConfig load zk verifier config.
|
||||
type VerifierConfig struct {
|
||||
MockMode bool `json:"mock_mode"`
|
||||
LowVersionCircuit *CircuitConfig `json:"low_version_circuit"`
|
||||
HighVersionCircuit *CircuitConfig `json:"high_version_circuit"`
|
||||
}
|
||||
|
||||
|
||||
@@ -15,15 +15,18 @@ func TestConfig(t *testing.T) {
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 5,
|
||||
"external_prover_threshold": 32,
|
||||
"bundle_collection_time_sec": 180,
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"mock_mode": true,
|
||||
"params_path": "",
|
||||
"agg_vk_path": ""
|
||||
"high_version_circuit": {
|
||||
"assets_path": "assets",
|
||||
"fork_name": "euclidV2",
|
||||
"min_prover_version": "v4.4.45"
|
||||
}
|
||||
},
|
||||
"max_verifier_workers": 4,
|
||||
"min_prover_version": "v1.0.0"
|
||||
"max_verifier_workers": 4
|
||||
},
|
||||
"db": {
|
||||
"driver_name": "postgres",
|
||||
|
||||
@@ -26,7 +26,7 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
|
||||
panic("proof receiver new verifier failure")
|
||||
}
|
||||
|
||||
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap, "bundleVerifier", vf.BundleVkMap, "openVmVerifier", vf.OpenVMVkMap)
|
||||
log.Info("verifier created", "openVmVerifier", vf.OpenVMVkMap)
|
||||
|
||||
Auth = NewAuthController(db, cfg, vf)
|
||||
GetTask = NewGetTaskController(cfg, chainCfg, db, reg)
|
||||
|
||||
@@ -22,9 +22,6 @@ import (
|
||||
type LoginLogic struct {
|
||||
cfg *config.Config
|
||||
challengeOrm *orm.Challenge
|
||||
chunkVks map[string]struct{}
|
||||
batchVKs map[string]struct{}
|
||||
bundleVks map[string]struct{}
|
||||
|
||||
openVmVks map[string]struct{}
|
||||
|
||||
@@ -34,28 +31,14 @@ type LoginLogic struct {
|
||||
// NewLoginLogic new a LoginLogic
|
||||
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
|
||||
proverVersionHardForkMap := make(map[string][]string)
|
||||
if version.CheckScrollRepoVersion(cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
|
||||
log.Error("config file error, low verifier min_prover_version should not more than high verifier min_prover_version",
|
||||
"low verifier min_prover_version", cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion,
|
||||
"high verifier min_prover_version", cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion)
|
||||
panic("verifier config file error")
|
||||
}
|
||||
|
||||
var highHardForks []string
|
||||
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.HighVersionCircuit.ForkName)
|
||||
if cfg.ProverManager.Verifier.HighVersionCircuit.ForkName != message.EuclidFork && cfg.ProverManager.Verifier.HighVersionCircuit.ForkName != message.EuclidV2Fork {
|
||||
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.LowVersionCircuit.ForkName)
|
||||
}
|
||||
highHardForks = append(highHardForks, message.EuclidFork, message.EuclidV2Fork)
|
||||
proverVersionHardForkMap[cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion] = highHardForks
|
||||
|
||||
proverVersionHardForkMap[cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion] = []string{cfg.ProverManager.Verifier.LowVersionCircuit.ForkName}
|
||||
|
||||
return &LoginLogic{
|
||||
cfg: cfg,
|
||||
chunkVks: vf.ChunkVKMap,
|
||||
batchVKs: vf.BatchVKMap,
|
||||
bundleVks: vf.BundleVkMap,
|
||||
openVmVks: vf.OpenVMVkMap,
|
||||
challengeOrm: orm.NewChallenge(db),
|
||||
proverVersionHardForkMap: proverVersionHardForkMap,
|
||||
@@ -75,49 +58,25 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
|
||||
return errors.New("auth message verify failure")
|
||||
}
|
||||
|
||||
// FIXME: for backward compatibility, set prover version as darwin prover version,
|
||||
// change v4.4.56 to l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion after Euclid upgrade, including the log.
|
||||
// hardcode the prover version because l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion is used in another check and should be set as v4.4.89 for darwinV2 provers.
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, "v4.4.56") {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
|
||||
"v4.4.56", login.Message.ProverVersion)
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion, login.Message.ProverVersion)
|
||||
}
|
||||
|
||||
if len(login.Message.ProverTypes) > 0 {
|
||||
vks := make(map[string]struct{})
|
||||
for _, proverType := range login.Message.ProverTypes {
|
||||
switch proverType {
|
||||
case types.ProverTypeChunk:
|
||||
for vk := range l.chunkVks {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
case types.ProverTypeBatch:
|
||||
for vk := range l.batchVKs {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
for vk := range l.bundleVks {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
case types.ProverTypeOpenVM:
|
||||
for vk := range l.openVmVks {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
default:
|
||||
log.Error("invalid prover_type", "value", proverType, "prover name", login.Message.ProverName, "prover_version", login.Message.ProverVersion)
|
||||
}
|
||||
}
|
||||
vks := make(map[string]struct{})
|
||||
for vk := range l.openVmVks {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
|
||||
for _, vk := range login.Message.VKs {
|
||||
if _, ok := vks[vk]; !ok {
|
||||
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
|
||||
version.Version, login.Message.ProverVersion)
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return errors.New("incompatible vk. please check your params files or config files")
|
||||
for _, vk := range login.Message.VKs {
|
||||
if _, ok := vks[vk]; !ok {
|
||||
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
|
||||
version.Version, login.Message.ProverVersion)
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return errors.New("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,12 +101,6 @@ func (l *LoginLogic) ProverHardForkName(login *types.LoginParameter) (string, er
|
||||
}
|
||||
|
||||
proverVersion := proverVersionSplits[0]
|
||||
|
||||
// allowing darwin provers to login, because darwin provers can prove darwinV2 chunk tasks
|
||||
if proverVersion == "v4.4.56" {
|
||||
return "darwin", nil
|
||||
}
|
||||
|
||||
if hardForkNames, ok := l.proverVersionHardForkMap[proverVersion]; ok {
|
||||
return strings.Join(hardForkNames, ","), nil
|
||||
}
|
||||
|
||||
@@ -197,34 +197,27 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
return nil, fmt.Errorf("no chunk found for batch task id:%s", task.TaskID)
|
||||
}
|
||||
|
||||
var chunkProofs []message.ChunkProof
|
||||
var chunkProofs []*message.OpenVMChunkProof
|
||||
var chunkInfos []*message.ChunkInfo
|
||||
for _, chunk := range chunks {
|
||||
proof := message.NewChunkProof(hardForkName)
|
||||
var proof message.OpenVMChunkProof
|
||||
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, task.TaskID, chunk.Hash)
|
||||
}
|
||||
chunkProofs = append(chunkProofs, proof)
|
||||
chunkProofs = append(chunkProofs, &proof)
|
||||
|
||||
chunkInfo := message.ChunkInfo{
|
||||
ChainID: bp.cfg.L2.ChainID,
|
||||
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
|
||||
PostStateRoot: common.HexToHash(chunk.StateRoot),
|
||||
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
|
||||
DataHash: common.HexToHash(chunk.Hash),
|
||||
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
|
||||
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
|
||||
IsPadding: false,
|
||||
}
|
||||
if halo2Proof, ok := proof.(*message.Halo2ChunkProof); ok {
|
||||
if halo2Proof.ChunkInfo != nil {
|
||||
chunkInfo.TxBytes = halo2Proof.ChunkInfo.TxBytes
|
||||
}
|
||||
}
|
||||
if openvmProof, ok := proof.(*message.OpenVMChunkProof); ok {
|
||||
chunkInfo.InitialBlockNumber = openvmProof.MetaData.ChunkInfo.InitialBlockNumber
|
||||
chunkInfo.BlockCtxs = openvmProof.MetaData.ChunkInfo.BlockCtxs
|
||||
chunkInfo.TxDataLength = openvmProof.MetaData.ChunkInfo.TxDataLength
|
||||
ChainID: bp.cfg.L2.ChainID,
|
||||
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
|
||||
PostStateRoot: common.HexToHash(chunk.StateRoot),
|
||||
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
|
||||
DataHash: common.HexToHash(chunk.Hash),
|
||||
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
|
||||
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
|
||||
IsPadding: false,
|
||||
InitialBlockNumber: proof.MetaData.ChunkInfo.InitialBlockNumber,
|
||||
BlockCtxs: proof.MetaData.ChunkInfo.BlockCtxs,
|
||||
TxDataLength: proof.MetaData.ChunkInfo.TxDataLength,
|
||||
}
|
||||
chunkInfos = append(chunkInfos, &chunkInfo)
|
||||
}
|
||||
@@ -258,7 +251,7 @@ func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *or
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []message.ChunkProof, hardForkName string) (*message.BatchTaskDetail, error) {
|
||||
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []*message.OpenVMChunkProof, hardForkName string) (*message.BatchTaskDetail, error) {
|
||||
taskDetail := &message.BatchTaskDetail{
|
||||
ChunkInfos: chunkInfos,
|
||||
ChunkProofs: chunkProofs,
|
||||
@@ -266,8 +259,9 @@ func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*
|
||||
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else if hardForkName == message.EuclidFork {
|
||||
taskDetail.ForkName = message.EuclidForkNameForProver
|
||||
} else {
|
||||
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
|
||||
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
|
||||
}
|
||||
|
||||
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
|
||||
|
||||
@@ -200,13 +200,13 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
|
||||
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
|
||||
}
|
||||
|
||||
var batchProofs []message.BatchProof
|
||||
var batchProofs []*message.OpenVMBatchProof
|
||||
for _, batch := range batches {
|
||||
proof := message.NewBatchProof(hardForkName)
|
||||
var proof message.OpenVMBatchProof
|
||||
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, task.TaskID, batch.Hash)
|
||||
}
|
||||
batchProofs = append(batchProofs, proof)
|
||||
batchProofs = append(batchProofs, &proof)
|
||||
}
|
||||
|
||||
taskDetail := message.BundleTaskDetail{
|
||||
@@ -215,8 +215,9 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
|
||||
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else if hardForkName == message.EuclidFork {
|
||||
taskDetail.ForkName = message.EuclidForkNameForProver
|
||||
} else {
|
||||
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
|
||||
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
|
||||
}
|
||||
|
||||
taskDetail.BundleInfo = &message.OpenVMBundleInfo{
|
||||
@@ -227,10 +228,7 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
|
||||
NumBatches: uint32(len(batches)),
|
||||
PrevBatchHash: common.HexToHash(batches[0].ParentBatchHash),
|
||||
BatchHash: common.HexToHash(batches[len(batches)-1].Hash),
|
||||
}
|
||||
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.BundleInfo.MsgQueueHash = common.HexToHash(batches[len(batches)-1].PostL1MessageQueueHash)
|
||||
MsgQueueHash: common.HexToHash(batches[len(batches)-1].PostL1MessageQueueHash),
|
||||
}
|
||||
|
||||
batchProofsBytes, err := json.Marshal(taskDetail)
|
||||
|
||||
@@ -195,8 +195,9 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else if hardForkName == message.EuclidFork {
|
||||
taskDetail.ForkName = message.EuclidForkNameForProver
|
||||
} else {
|
||||
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
|
||||
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
@@ -121,11 +121,6 @@ func (b *BaseProverTask) hardForkSanityCheck(ctx *gin.Context, taskCtx *proverTa
|
||||
return "", getHardForkErr
|
||||
}
|
||||
|
||||
// for backward compatibility, darwin chunk prover can still prove darwinV2 chunk tasks
|
||||
if taskCtx.taskType == message.ProofTypeChunk && hardForkName == "darwinV2" && strings.HasPrefix(taskCtx.ProverVersion, "v4.4.56") {
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
if _, ok := taskCtx.HardForkNames[hardForkName]; !ok {
|
||||
return "", fmt.Errorf("to be assigned prover task's hard-fork name is not the same as prover, proverName: %s, proverVersion: %s, proverSupportHardForkNames: %s, taskHardForkName: %v", taskCtx.ProverName, taskCtx.ProverVersion, taskCtx.HardForkNames, hardForkName)
|
||||
}
|
||||
|
||||
@@ -171,19 +171,19 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
|
||||
|
||||
switch message.ProofType(proofParameter.TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
chunkProof := message.NewChunkProof(hardForkName)
|
||||
chunkProof := &message.OpenVMChunkProof{}
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &chunkProof); unmarshalErr != nil {
|
||||
return unmarshalErr
|
||||
}
|
||||
success, verifyErr = m.verifier.VerifyChunkProof(chunkProof, hardForkName)
|
||||
case message.ProofTypeBatch:
|
||||
batchProof := message.NewBatchProof(hardForkName)
|
||||
batchProof := &message.OpenVMBatchProof{}
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &batchProof); unmarshalErr != nil {
|
||||
return unmarshalErr
|
||||
}
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(batchProof, hardForkName)
|
||||
case message.ProofTypeBundle:
|
||||
bundleProof := message.NewBundleProof(hardForkName)
|
||||
bundleProof := &message.OpenVMBundleProof{}
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &bundleProof); unmarshalErr != nil {
|
||||
return unmarshalErr
|
||||
}
|
||||
|
||||
@@ -10,31 +10,26 @@ import (
|
||||
|
||||
// NewVerifier Sets up a mock verifier.
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
batchVKMap := map[string]struct{}{"mock_vk": {}}
|
||||
chunkVKMap := map[string]struct{}{"mock_vk": {}}
|
||||
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
|
||||
return &Verifier{cfg: cfg, OpenVMVkMap: map[string]struct{}{"mock_vk": {}}}, nil
|
||||
}
|
||||
|
||||
// VerifyChunkProof return a mock verification result for a ChunkProof.
|
||||
func (v *Verifier) VerifyChunkProof(proof message.ChunkProof, forkName string) (bool, error) {
|
||||
if string(proof.Proof()) == InvalidTestProof {
|
||||
func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName string) (bool, error) {
|
||||
if proof.VmProof != nil && string(proof.VmProof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// VerifyBatchProof return a mock verification result for a BatchProof.
|
||||
func (v *Verifier) VerifyBatchProof(proof message.BatchProof, forkName string) (bool, error) {
|
||||
if string(proof.Proof()) == InvalidTestProof {
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName string) (bool, error) {
|
||||
if proof.VmProof != nil && string(proof.VmProof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// VerifyBundleProof return a mock verification result for a BundleProof.
|
||||
func (v *Verifier) VerifyBundleProof(proof message.BundleProof, forkName string) (bool, error) {
|
||||
if string(proof.Proof()) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -7,11 +7,8 @@ import (
|
||||
// InvalidTestProof invalid proof used in tests
|
||||
const InvalidTestProof = "this is a invalid proof"
|
||||
|
||||
// Verifier represents a rust ffi to a halo2 verifier.
|
||||
// Verifier represents a rust ffi to a verifier.
|
||||
type Verifier struct {
|
||||
cfg *config.VerifierConfig
|
||||
ChunkVKMap map[string]struct{}
|
||||
BatchVKMap map[string]struct{}
|
||||
BundleVkMap map[string]struct{}
|
||||
OpenVMVkMap map[string]struct{}
|
||||
}
|
||||
|
||||
@@ -30,14 +30,12 @@ import (
|
||||
// in `*config.CircuitConfig` being changed
|
||||
type rustCircuitConfig struct {
|
||||
ForkName string `json:"fork_name"`
|
||||
ParamsPath string `json:"params_path"`
|
||||
AssetsPath string `json:"assets_path"`
|
||||
}
|
||||
|
||||
func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
|
||||
return &rustCircuitConfig{
|
||||
ForkName: cfg.ForkName,
|
||||
ParamsPath: cfg.ParamsPath,
|
||||
AssetsPath: cfg.AssetsPath,
|
||||
}
|
||||
}
|
||||
@@ -46,13 +44,11 @@ func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
|
||||
// Define a brand new struct here is to eliminate side effects in case fields
|
||||
// in `*config.VerifierConfig` being changed
|
||||
type rustVerifierConfig struct {
|
||||
LowVersionCircuit *rustCircuitConfig `json:"low_version_circuit"`
|
||||
HighVersionCircuit *rustCircuitConfig `json:"high_version_circuit"`
|
||||
}
|
||||
|
||||
func newRustVerifierConfig(cfg *config.VerifierConfig) *rustVerifierConfig {
|
||||
return &rustVerifierConfig{
|
||||
LowVersionCircuit: newRustCircuitConfig(cfg.LowVersionCircuit),
|
||||
HighVersionCircuit: newRustCircuitConfig(cfg.HighVersionCircuit),
|
||||
}
|
||||
}
|
||||
@@ -65,19 +61,6 @@ type rustVkDump struct {
|
||||
|
||||
// NewVerifier Sets up a rust ffi to call verify.
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
if cfg.MockMode {
|
||||
chunkVKMap := map[string]struct{}{"mock_vk": {}}
|
||||
batchVKMap := map[string]struct{}{"mock_vk": {}}
|
||||
bundleVKMap := map[string]struct{}{"mock_vk": {}}
|
||||
openVMVkMap := map[string]struct{}{"mock_vk": {}}
|
||||
return &Verifier{
|
||||
cfg: cfg,
|
||||
ChunkVKMap: chunkVKMap,
|
||||
BatchVKMap: batchVKMap,
|
||||
BundleVkMap: bundleVKMap,
|
||||
OpenVMVkMap: openVMVkMap,
|
||||
}, nil
|
||||
}
|
||||
verifierConfig := newRustVerifierConfig(cfg)
|
||||
configBytes, err := json.Marshal(verifierConfig)
|
||||
if err != nil {
|
||||
@@ -93,16 +76,9 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
|
||||
v := &Verifier{
|
||||
cfg: cfg,
|
||||
ChunkVKMap: make(map[string]struct{}),
|
||||
BatchVKMap: make(map[string]struct{}),
|
||||
BundleVkMap: make(map[string]struct{}),
|
||||
OpenVMVkMap: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
if err := v.loadLowVersionVKs(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := v.loadOpenVMVks(message.EuclidFork); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -111,21 +87,11 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v.loadDarwinVKs()
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
|
||||
func (v *Verifier) VerifyBatchProof(proof message.BatchProof, forkName string) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, batch verifier disabled")
|
||||
if string(proof.Proof()) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
}
|
||||
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Verifier.
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName string) (bool, error) {
|
||||
buf, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -143,16 +109,8 @@ func (v *Verifier) VerifyBatchProof(proof message.BatchProof, forkName string) (
|
||||
return verified != 0, nil
|
||||
}
|
||||
|
||||
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
|
||||
func (v *Verifier) VerifyChunkProof(proof message.ChunkProof, forkName string) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, verifier disabled")
|
||||
if string(proof.Proof()) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
}
|
||||
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Verifier.
|
||||
func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName string) (bool, error) {
|
||||
buf, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -171,15 +129,7 @@ func (v *Verifier) VerifyChunkProof(proof message.ChunkProof, forkName string) (
|
||||
}
|
||||
|
||||
// VerifyBundleProof Verify a ZkProof for a bundle of batches, by marshaling it and verifying it via the EVM verifier.
|
||||
func (v *Verifier) VerifyBundleProof(proof message.BundleProof, forkName string) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, verifier disabled")
|
||||
if string(proof.Proof()) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
}
|
||||
func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName string) (bool, error) {
|
||||
buf, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -209,32 +159,6 @@ func (v *Verifier) readVK(filePat string) (string, error) {
|
||||
return base64.StdEncoding.EncodeToString(byt), nil
|
||||
}
|
||||
|
||||
// load low version vks, current is darwin
|
||||
func (v *Verifier) loadLowVersionVKs(cfg *config.VerifierConfig) error {
|
||||
bundleVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_bundle.vkey"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
batchVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_batch.vkey"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chunkVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_chunk.vkey"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.BundleVkMap[bundleVK] = struct{}{}
|
||||
v.BatchVKMap[batchVK] = struct{}{}
|
||||
v.ChunkVKMap[chunkVK] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *Verifier) loadDarwinVKs() {
|
||||
v.BundleVkMap["AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD5dsp1rEy7PSqiIFikkkOPqKokLW2mZSwCbtKdkfLQcvTxARUwHSe4iZe27PRJ5WWaLqtRV1+x6+pSVKtcPtaV4kE7v2YJRf0582hxiAF0IBaOoREdpyNfA2a9cvhWb2TMaPrUYP9EDQ7CUiW1FQzxbjGc95ua2htscnpU7d9S5stHWzKb7okkCG7bTIL9aG6qTQo2YXW7n3H3Ir47oVJB7IKrUzKGvI5Wmanh2zpZOJ9Qm4/wY24cT7cJz+Ux6wAg=="] = struct{}{}
|
||||
v.BatchVKMap["AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD1DEjW4Kell67H07wazT5DdzrSh4+amh+cmosQHp9p9snFypyoBGt3UHtoJGQBZlywZWDS9ht5pnaEoGBdaKcQk+lFb+WxTiId0KOAa0mafTZTQw8yToy57Jple64qzlRu1dux30tZZGuerLN1CKzg5Xl2iOpMK+l87jCINwVp5cUtF/XrvhBbU7onKh3KBiy99iUqVyA3Y6iiIZhGKWBSuSA4bNgDYIoVkqjHpdL35aEShoRO6pNXt7rDzxFoPzH0JuPI54nE4OhVrzZXwtkAEosxVa/fszcE092FH+HhhtxZBYe/KEzwdISU9TOPdId3UF/UMYC0MiYOlqffVTgAg="] = struct{}{}
|
||||
v.ChunkVKMap["AAAAGQAAAATyWEABRbJ6hQQ5/zLX1gTasr7349minA9rSgMS6gDeHwZKqikRiO3md+pXjjxMHnKQtmXYgMXhJSvlmZ+Ws+cheuly2X1RuNQzcZuRImaKPR9LJsVZYsXfJbuqdKX8p0Gj8G83wMJOmTzNVUyUol0w0lTU+CEiTpHOnxBsTF3EWaW3s1u4ycOgWt1c9M6s7WmaBZLYgAWYCunO5CLCLApNGbCASeck/LuSoedEri5u6HccCKU2khG6zl6W07jvYSbDVLJktbjRiHv+/HQix+K14j8boo8Z/unhpwXCsPxkQA=="] = struct{}{}
|
||||
}
|
||||
|
||||
func (v *Verifier) loadOpenVMVks(forkName string) error {
|
||||
tempFile := path.Join(os.TempDir(), "openVmVk.json")
|
||||
defer func() {
|
||||
|
||||
@@ -29,17 +29,9 @@ func TestFFI(t *testing.T) {
|
||||
as := assert.New(t)
|
||||
|
||||
cfg := &config.VerifierConfig{
|
||||
MockMode: false,
|
||||
LowVersionCircuit: &config.CircuitConfig{
|
||||
ParamsPath: *paramsPath,
|
||||
AssetsPath: *assetsPathLo,
|
||||
ForkName: "darwin",
|
||||
MinProverVersion: "",
|
||||
},
|
||||
HighVersionCircuit: &config.CircuitConfig{
|
||||
ParamsPath: *paramsPath,
|
||||
AssetsPath: *assetsPathHi,
|
||||
ForkName: "darwinV2",
|
||||
ForkName: "euclidV2",
|
||||
MinProverVersion: "",
|
||||
},
|
||||
}
|
||||
@@ -48,43 +40,43 @@ func TestFFI(t *testing.T) {
|
||||
as.NoError(err)
|
||||
|
||||
chunkProof1 := readChunkProof(*chunkProofPath1, as)
|
||||
chunkOk1, err := v.VerifyChunkProof(chunkProof1, "darwinV2")
|
||||
chunkOk1, err := v.VerifyChunkProof(chunkProof1, "euclidV2")
|
||||
as.NoError(err)
|
||||
as.True(chunkOk1)
|
||||
t.Log("Verified chunk proof 1")
|
||||
|
||||
chunkProof2 := readChunkProof(*chunkProofPath2, as)
|
||||
chunkOk2, err := v.VerifyChunkProof(chunkProof2, "darwinV2")
|
||||
chunkOk2, err := v.VerifyChunkProof(chunkProof2, "euclidV2")
|
||||
as.NoError(err)
|
||||
as.True(chunkOk2)
|
||||
t.Log("Verified chunk proof 2")
|
||||
|
||||
batchProof := readBatchProof(*batchProofPath, as)
|
||||
batchOk, err := v.VerifyBatchProof(batchProof, "darwinV2")
|
||||
batchOk, err := v.VerifyBatchProof(batchProof, "euclidV2")
|
||||
as.NoError(err)
|
||||
as.True(batchOk)
|
||||
t.Log("Verified batch proof")
|
||||
}
|
||||
|
||||
func readBatchProof(filePat string, as *assert.Assertions) types.BatchProof {
|
||||
func readBatchProof(filePat string, as *assert.Assertions) *types.OpenVMBatchProof {
|
||||
f, err := os.Open(filePat)
|
||||
as.NoError(err)
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
proof := &types.Halo2BatchProof{}
|
||||
proof := &types.OpenVMBatchProof{}
|
||||
as.NoError(json.Unmarshal(byt, proof))
|
||||
|
||||
return proof
|
||||
}
|
||||
|
||||
func readChunkProof(filePat string, as *assert.Assertions) types.ChunkProof {
|
||||
func readChunkProof(filePat string, as *assert.Assertions) *types.OpenVMChunkProof {
|
||||
f, err := os.Open(filePat)
|
||||
as.NoError(err)
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
proof := &types.Halo2ChunkProof{}
|
||||
proof := &types.OpenVMChunkProof{}
|
||||
as.NoError(json.Unmarshal(byt, proof))
|
||||
|
||||
return proof
|
||||
|
||||
@@ -22,7 +22,7 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
|
||||
ProverVersion: "v0.0.1",
|
||||
Challenge: "abcdef",
|
||||
ProverProviderType: ProverProviderTypeInternal,
|
||||
ProverTypes: []ProverType{ProverTypeBatch},
|
||||
ProverTypes: []ProverType{ProverTypeOpenVM},
|
||||
VKs: []string{"vk1", "vk2"},
|
||||
},
|
||||
PublicKey: publicKeyHex,
|
||||
@@ -64,7 +64,7 @@ func TestGenerateSignature(t *testing.T) {
|
||||
ProverVersion: "v4.4.45-37af5ef5-38a68e2-1c5093c",
|
||||
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjQ4Mzg0ODUsIm9yaWdfaWF0IjoxNzI0ODM0ODg1LCJyYW5kb20iOiJ6QmdNZGstNGc4UzNUNTFrVEFsYk1RTXg2TGJ4SUs4czY3ejM2SlNuSFlJPSJ9.x9PvihhNx2w4_OX5uCrv8QJCNYVQkIi-K2k8XFXYmik",
|
||||
ProverProviderType: ProverProviderTypeInternal,
|
||||
ProverTypes: []ProverType{ProverTypeChunk},
|
||||
ProverTypes: []ProverType{ProverTypeOpenVM},
|
||||
VKs: []string{"mock_vk"},
|
||||
},
|
||||
PublicKey: publicKeyHex,
|
||||
|
||||
@@ -2,7 +2,6 @@ package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
@@ -21,10 +20,10 @@ type ProverType uint8
|
||||
|
||||
func (r ProverType) String() string {
|
||||
switch r {
|
||||
case ProverTypeChunk:
|
||||
return "prover type chunk"
|
||||
case ProverTypeBatch:
|
||||
return "prover type batch"
|
||||
case ProverTypeChunkDeprecated:
|
||||
return "prover type chunk (deprecated)"
|
||||
case ProverTypeBatchDeprecated:
|
||||
return "prover type batch (deprecated)"
|
||||
case ProverTypeOpenVM:
|
||||
return "prover type openvm"
|
||||
default:
|
||||
@@ -35,10 +34,10 @@ func (r ProverType) String() string {
|
||||
const (
|
||||
// ProverTypeUndefined is an unknown prover type
|
||||
ProverTypeUndefined ProverType = iota
|
||||
// ProverTypeChunk signals it's a chunk prover, which can prove chunk_tasks
|
||||
ProverTypeChunk
|
||||
// ProverTypeBatch signals it's a batch prover, which can prove batch_tasks and bundle_tasks
|
||||
ProverTypeBatch
|
||||
// ProverTypeChunk signals it's a chunk prover, which can prove chunk_tasks, which is deprecated
|
||||
ProverTypeChunkDeprecated
|
||||
// ProverTypeBatch signals it's a batch prover, which can prove batch_tasks and bundle_tasks, which is deprecated
|
||||
ProverTypeBatchDeprecated
|
||||
// ProverTypeOpenVM
|
||||
ProverTypeOpenVM
|
||||
)
|
||||
@@ -47,9 +46,9 @@ const (
|
||||
func MakeProverType(proofType message.ProofType) ProverType {
|
||||
switch proofType {
|
||||
case message.ProofTypeChunk:
|
||||
return ProverTypeChunk
|
||||
return ProverTypeChunkDeprecated
|
||||
case message.ProofTypeBatch, message.ProofTypeBundle:
|
||||
return ProverTypeBatch
|
||||
return ProverTypeBatchDeprecated
|
||||
default:
|
||||
return ProverTypeUndefined
|
||||
}
|
||||
|
||||
@@ -67,7 +67,7 @@ func randomURL() string {
|
||||
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
|
||||
}
|
||||
|
||||
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string, forks []string) (*cron.Collector, *http.Server) {
|
||||
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
|
||||
var err error
|
||||
db, err = testApps.GetGormDBClient()
|
||||
|
||||
@@ -84,17 +84,9 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
ProverManager: &config.ProverManager{
|
||||
ProversPerSession: proversPerSession,
|
||||
Verifier: &config.VerifierConfig{
|
||||
MockMode: true,
|
||||
LowVersionCircuit: &config.CircuitConfig{
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "homestead",
|
||||
MinProverVersion: "v4.4.57",
|
||||
},
|
||||
HighVersionCircuit: &config.CircuitConfig{
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "bernoulli",
|
||||
ForkName: "euclidV2",
|
||||
MinProverVersion: "v4.4.89",
|
||||
},
|
||||
},
|
||||
@@ -109,20 +101,17 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
},
|
||||
}
|
||||
|
||||
var chainConf params.ChainConfig
|
||||
for _, forkName := range forks {
|
||||
switch forkName {
|
||||
case "bernoulli":
|
||||
chainConf.BernoulliBlock = big.NewInt(100)
|
||||
case "homestead":
|
||||
chainConf.HomesteadBlock = big.NewInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
proofCollector := cron.NewCollector(context.Background(), db, conf, nil)
|
||||
|
||||
router := gin.New()
|
||||
api.InitController(conf, &chainConf, db, nil)
|
||||
api.InitController(conf, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
DarwinTime: new(uint64),
|
||||
DarwinV2Time: new(uint64),
|
||||
EuclidTime: new(uint64),
|
||||
EuclidV2Time: new(uint64),
|
||||
}, db, nil)
|
||||
route.Route(router, conf, nil)
|
||||
srv := &http.Server{
|
||||
Addr: coordinatorURL,
|
||||
@@ -142,7 +131,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
func setEnv(t *testing.T) {
|
||||
var err error
|
||||
|
||||
version.Version = "v4.4.57"
|
||||
version.Version = "v4.4.89"
|
||||
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
@@ -198,7 +187,7 @@ func TestApis(t *testing.T) {
|
||||
func testHandshake(t *testing.T) {
|
||||
// Setup coordinator and http server.
|
||||
coordinatorURL := randomURL()
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"homestead"})
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
|
||||
defer func() {
|
||||
proofCollector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -211,7 +200,7 @@ func testHandshake(t *testing.T) {
|
||||
func testFailedHandshake(t *testing.T) {
|
||||
// Setup coordinator and http server.
|
||||
coordinatorURL := randomURL()
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"homestead"})
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
|
||||
defer func() {
|
||||
proofCollector.Stop()
|
||||
}()
|
||||
@@ -229,7 +218,7 @@ func testFailedHandshake(t *testing.T) {
|
||||
|
||||
func testGetTaskBlocked(t *testing.T) {
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -273,7 +262,7 @@ func testGetTaskBlocked(t *testing.T) {
|
||||
|
||||
func testOutdatedProverVersion(t *testing.T) {
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -285,12 +274,12 @@ func testOutdatedProverVersion(t *testing.T) {
|
||||
batchProver := newMockProver(t, "prover_batch_test", coordinatorURL, message.ProofTypeBatch, "v1.999.999")
|
||||
assert.True(t, chunkProver.healthCheckSuccess(t))
|
||||
|
||||
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.56, actual version: %s", chunkProver.proverVersion)
|
||||
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.89, actual version: %s", chunkProver.proverVersion)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
assert.Equal(t, types.ErrJWTCommonErr, code)
|
||||
assert.Equal(t, expectedErr, errors.New(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.56, actual version: %s", batchProver.proverVersion)
|
||||
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.89, actual version: %s", batchProver.proverVersion)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
assert.Equal(t, types.ErrJWTCommonErr, code)
|
||||
assert.Equal(t, expectedErr, errors.New(errMsg))
|
||||
@@ -298,7 +287,7 @@ func testOutdatedProverVersion(t *testing.T) {
|
||||
|
||||
func testValidProof(t *testing.T) {
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -381,7 +370,7 @@ func testValidProof(t *testing.T) {
|
||||
func testInvalidProof(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"darwinV2"})
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -469,7 +458,7 @@ func testInvalidProof(t *testing.T) {
|
||||
func testProofGeneratedFailed(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"darwinV2"})
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -570,7 +559,7 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
func testTimeoutProof(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"darwinV2"})
|
||||
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -593,7 +582,9 @@ func testTimeoutProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 100, batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
encodeData, err := json.Marshal(message.Halo2ChunkProof{})
|
||||
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{}, MetaData: struct {
|
||||
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
|
||||
}{ChunkInfo: &message.ChunkInfo{}}})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbChunk.Hash, encodeData, types.ProvingTaskUnassigned, 1)
|
||||
|
||||
@@ -207,14 +207,16 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
|
||||
}
|
||||
|
||||
var proof []byte
|
||||
switch proverTaskSchema.TaskType {
|
||||
case int(message.ProofTypeChunk):
|
||||
encodeData, err := json.Marshal(message.Halo2ChunkProof{})
|
||||
switch message.ProofType(proverTaskSchema.TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{}, MetaData: struct {
|
||||
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
|
||||
}{ChunkInfo: &message.ChunkInfo{}}})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
proof = encodeData
|
||||
case int(message.ProofTypeBatch):
|
||||
encodeData, err := json.Marshal(message.Halo2BatchProof{})
|
||||
case message.ProofTypeBatch:
|
||||
encodeData, err := json.Marshal(message.OpenVMBatchProof{VmProof: &message.OpenVMProof{}})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
proof = encodeData
|
||||
@@ -223,16 +225,14 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
|
||||
if proofStatus == verifiedFailed {
|
||||
switch proverTaskSchema.TaskType {
|
||||
case int(message.ProofTypeChunk):
|
||||
chunkProof := message.Halo2ChunkProof{}
|
||||
chunkProof.RawProof = []byte(verifier.InvalidTestProof)
|
||||
encodeData, err := json.Marshal(&chunkProof)
|
||||
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)}, MetaData: struct {
|
||||
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
|
||||
}{ChunkInfo: &message.ChunkInfo{}}})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
proof = encodeData
|
||||
case int(message.ProofTypeBatch):
|
||||
batchProof := message.Halo2BatchProof{}
|
||||
batchProof.RawProof = []byte(verifier.InvalidTestProof)
|
||||
encodeData, err := json.Marshal(&batchProof)
|
||||
encodeData, err := json.Marshal(&message.OpenVMBatchProof{VmProof: &message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)}})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
proof = encodeData
|
||||
|
||||
5854
prover/Cargo.lock
generated
5854
prover/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,50 +0,0 @@
|
||||
[package]
|
||||
name = "prover"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
|
||||
[patch.crates-io]
|
||||
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
|
||||
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
|
||||
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
|
||||
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
log = "0.4"
|
||||
env_logger = "0.11.3"
|
||||
serde = { version = "1.0.198", features = ["derive"] }
|
||||
serde_json = "1.0.116"
|
||||
futures = "0.3.30"
|
||||
|
||||
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
|
||||
prover_darwin = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.12.2", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
prover_darwin_v2 = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.13.1", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "160db6c"}
|
||||
base64 = "0.13.1"
|
||||
reqwest = { version = "0.12.4", features = ["gzip"] }
|
||||
reqwest-middleware = "0.3"
|
||||
reqwest-retry = "0.5"
|
||||
once_cell = "1.19.0"
|
||||
hex = "0.4.3"
|
||||
tiny-keccak = { version = "2.0.0", features = ["sha3", "keccak"] }
|
||||
rand = "0.8.5"
|
||||
eth-keystore = "0.5.0"
|
||||
rlp = "0.5.2"
|
||||
tokio = "1.37.0"
|
||||
async-trait = "0.1"
|
||||
sled = "0.34.7"
|
||||
http = "1.1.0"
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
ctor = "0.2.8"
|
||||
@@ -1,48 +0,0 @@
|
||||
.PHONY: prover lint tests_binary
|
||||
|
||||
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
else
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
endif
|
||||
|
||||
ZKEVM_VERSION=$(shell ./print_high_zkevm_version.sh)
|
||||
ifeq (${ZKEVM_VERSION},)
|
||||
$(error ZKEVM_VERSION not set)
|
||||
else
|
||||
$(info ZKEVM_VERSION is ${ZKEVM_VERSION})
|
||||
endif
|
||||
|
||||
ZKEVM_COMMIT=$(shell echo ${ZKEVM_VERSION} | cut -d " " -f2)
|
||||
$(info ZKEVM_COMMIT is ${ZKEVM_COMMIT})
|
||||
|
||||
HALO2_GPU_VERSION=$(shell ./print_halo2gpu_version.sh | sed -n '2p')
|
||||
|
||||
GIT_REV=$(shell git rev-parse --short HEAD)
|
||||
GO_TAG=$(shell grep "var tag = " ../common/version/version.go | cut -d "\"" -f2)
|
||||
|
||||
ifeq (${GO_TAG},)
|
||||
$(error GO_TAG not set)
|
||||
else
|
||||
$(info GO_TAG is ${GO_TAG})
|
||||
endif
|
||||
|
||||
ifeq (${HALO2_GPU_VERSION},)
|
||||
# use halo2_proofs with CPU
|
||||
ZK_VERSION=${ZKEVM_COMMIT}-${HALO2_VERSION}
|
||||
else
|
||||
# use halo2_gpu
|
||||
ZK_VERSION=${ZKEVM_COMMIT}-${HALO2_GPU_VERSION}
|
||||
endif
|
||||
|
||||
prover:
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --release
|
||||
|
||||
tests_binary:
|
||||
cargo clean && cargo test --release --no-run
|
||||
ls target/release/deps/prover* | grep -v "\.d" | xargs -I{} ln -sf {} ./prover.test
|
||||
|
||||
lint:
|
||||
cargo check --all-features
|
||||
cargo clippy --all-features --all-targets -- -D warnings
|
||||
cargo fmt --all
|
||||
@@ -1,30 +0,0 @@
|
||||
{
|
||||
"sdk_config": {
|
||||
"prover_name_prefix": "prover-1",
|
||||
"keys_dir": "keys",
|
||||
"coordinator": {
|
||||
"base_url": "http://localhost:8555",
|
||||
"retry_count": 10,
|
||||
"retry_wait_time_sec": 10,
|
||||
"connection_timeout_sec": 30
|
||||
},
|
||||
"l2geth": {
|
||||
"endpoint": "http://localhost:9999"
|
||||
},
|
||||
"prover": {
|
||||
"circuit_types": [1,2,3],
|
||||
"circuit_version": "v0.13.1"
|
||||
},
|
||||
"db_path": "unique-db-path-for-prover-1"
|
||||
},
|
||||
"low_version_circuit": {
|
||||
"hard_fork_name": "darwin",
|
||||
"params_path": "params",
|
||||
"assets_path": "assets"
|
||||
},
|
||||
"high_version_circuit": {
|
||||
"hard_fork_name": "darwinV2",
|
||||
"params_path": "params",
|
||||
"assets_path": "assets"
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
config_file="$HOME/.cargo/config"
|
||||
|
||||
if [ ! -e "$config_file" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ $(head -n 1 "$config_file") == "#"* ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
halo2gpu_path=$(grep -Po '(?<=paths = \[")([^"]*)' $config_file)
|
||||
|
||||
pushd $halo2gpu_path
|
||||
|
||||
commit_hash=$(git log --pretty=format:%h -n 1)
|
||||
echo "${commit_hash:0:7}"
|
||||
|
||||
popd
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ue
|
||||
|
||||
higher_zkevm_item=`grep "zkevm-circuits.git" ./Cargo.lock | sort | uniq | awk -F "[#=]" '{print $3" "$4}' | sort -k 1 | tail -n 1`
|
||||
|
||||
higher_version=`echo $higher_zkevm_item | awk '{print $1}'`
|
||||
|
||||
higher_commit=`echo $higher_zkevm_item | cut -d ' ' -f2 | cut -c-7`
|
||||
|
||||
echo "$higher_version $higher_commit"
|
||||
@@ -1 +0,0 @@
|
||||
nightly-2023-12-03
|
||||
@@ -1,9 +0,0 @@
|
||||
edition = "2021"
|
||||
|
||||
comment_width = 100
|
||||
imports_granularity = "Crate"
|
||||
max_width = 100
|
||||
newline_style = "Unix"
|
||||
# normalize_comments = true
|
||||
reorder_imports = true
|
||||
wrap_comments = true
|
||||
@@ -1,51 +0,0 @@
|
||||
use anyhow::{bail, Result};
|
||||
|
||||
static SCROLL_PROVER_ASSETS_DIR_ENV_NAME: &str = "SCROLL_PROVER_ASSETS_DIR";
|
||||
static mut SCROLL_PROVER_ASSETS_DIRS: Vec<String> = vec![];
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AssetsDirEnvConfig {}
|
||||
|
||||
impl AssetsDirEnvConfig {
|
||||
pub fn init() -> Result<()> {
|
||||
let value = std::env::var(SCROLL_PROVER_ASSETS_DIR_ENV_NAME)?;
|
||||
let dirs: Vec<&str> = value.split(',').collect();
|
||||
if dirs.len() != 2 {
|
||||
bail!("env variable SCROLL_PROVER_ASSETS_DIR value must be 2 parts seperated by comma.")
|
||||
}
|
||||
unsafe {
|
||||
SCROLL_PROVER_ASSETS_DIRS = dirs.into_iter().map(|s| s.to_string()).collect();
|
||||
log::info!(
|
||||
"init SCROLL_PROVER_ASSETS_DIRS: {:?}",
|
||||
SCROLL_PROVER_ASSETS_DIRS
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn enable_first() {
|
||||
unsafe {
|
||||
log::info!(
|
||||
"set env {SCROLL_PROVER_ASSETS_DIR_ENV_NAME} to {}",
|
||||
&SCROLL_PROVER_ASSETS_DIRS[0]
|
||||
);
|
||||
std::env::set_var(
|
||||
SCROLL_PROVER_ASSETS_DIR_ENV_NAME,
|
||||
&SCROLL_PROVER_ASSETS_DIRS[0],
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enable_second() {
|
||||
unsafe {
|
||||
log::info!(
|
||||
"set env {SCROLL_PROVER_ASSETS_DIR_ENV_NAME} to {}",
|
||||
&SCROLL_PROVER_ASSETS_DIRS[1]
|
||||
);
|
||||
std::env::set_var(
|
||||
SCROLL_PROVER_ASSETS_DIR_ENV_NAME,
|
||||
&SCROLL_PROVER_ASSETS_DIRS[1],
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
#![feature(lazy_cell)]
|
||||
#![feature(core_intrinsics)]
|
||||
|
||||
mod config;
|
||||
mod prover;
|
||||
mod types;
|
||||
mod utils;
|
||||
mod zk_circuits_handler;
|
||||
|
||||
use clap::{ArgAction, Parser};
|
||||
use prover::{LocalProver, LocalProverConfig};
|
||||
use scroll_proving_sdk::{
|
||||
prover::ProverBuilder,
|
||||
utils::{get_version, init_tracing},
|
||||
};
|
||||
use tokio::runtime;
|
||||
use utils::get_prover_type;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(disable_version_flag = true)]
|
||||
struct Args {
|
||||
/// Path of config file
|
||||
#[arg(long = "config", default_value = "conf/config.json")]
|
||||
config_file: String,
|
||||
|
||||
/// Version of this prover
|
||||
#[arg(short, long, action = ArgAction::SetTrue)]
|
||||
version: bool,
|
||||
|
||||
/// Path of log file
|
||||
#[arg(long = "log.file")]
|
||||
log_file: Option<String>,
|
||||
}
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let rt = runtime::Builder::new_multi_thread()
|
||||
.thread_stack_size(16 * 1024 * 1024) // Set stack size to 16MB
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Failed to create Tokio runtime");
|
||||
|
||||
rt.block_on(async {
|
||||
init_tracing();
|
||||
|
||||
let args = Args::parse();
|
||||
|
||||
if args.version {
|
||||
println!("version is {}", get_version());
|
||||
std::process::exit(0);
|
||||
}
|
||||
|
||||
let cfg = LocalProverConfig::from_file(args.config_file)?;
|
||||
let sdk_config = cfg.sdk_config.clone();
|
||||
let mut prover_types = vec![];
|
||||
sdk_config
|
||||
.prover
|
||||
.circuit_types
|
||||
.iter()
|
||||
.for_each(|circuit_type| {
|
||||
if let Some(pt) = get_prover_type(*circuit_type) {
|
||||
if !prover_types.contains(&pt) {
|
||||
prover_types.push(pt);
|
||||
}
|
||||
}
|
||||
});
|
||||
let local_prover = LocalProver::new(cfg, prover_types);
|
||||
let prover = ProverBuilder::new(sdk_config)
|
||||
.with_proving_service(Box::new(local_prover))
|
||||
.build()
|
||||
.await?;
|
||||
|
||||
prover.run().await;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
@@ -1,192 +0,0 @@
|
||||
use crate::{
|
||||
types::ProverType,
|
||||
utils::get_prover_type,
|
||||
zk_circuits_handler::{CircuitsHandler, CircuitsHandlerProvider},
|
||||
};
|
||||
use anyhow::{anyhow, Result};
|
||||
use async_trait::async_trait;
|
||||
use scroll_proving_sdk::{
|
||||
config::Config as SdkConfig,
|
||||
prover::{
|
||||
proving_service::{
|
||||
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
|
||||
QueryTaskResponse, TaskStatus,
|
||||
},
|
||||
ProvingService,
|
||||
},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
fs::File,
|
||||
sync::{Arc, Mutex},
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
use tokio::{runtime::Handle, sync::RwLock, task::JoinHandle};
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct LocalProverConfig {
|
||||
pub sdk_config: SdkConfig,
|
||||
pub high_version_circuit: CircuitConfig,
|
||||
pub low_version_circuit: CircuitConfig,
|
||||
}
|
||||
|
||||
impl LocalProverConfig {
|
||||
pub fn from_reader<R>(reader: R) -> Result<Self>
|
||||
where
|
||||
R: std::io::Read,
|
||||
{
|
||||
serde_json::from_reader(reader).map_err(|e| anyhow!(e))
|
||||
}
|
||||
|
||||
pub fn from_file(file_name: String) -> Result<Self> {
|
||||
let file = File::open(file_name)?;
|
||||
Self::from_reader(&file)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
pub hard_fork_name: String,
|
||||
pub params_path: String,
|
||||
pub assets_path: String,
|
||||
}
|
||||
|
||||
pub struct LocalProver {
|
||||
config: LocalProverConfig,
|
||||
prover_types: Vec<ProverType>,
|
||||
circuits_handler_provider: RwLock<CircuitsHandlerProvider>,
|
||||
next_task_id: Arc<Mutex<u64>>,
|
||||
current_task: Arc<Mutex<Option<JoinHandle<Result<String>>>>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ProvingService for LocalProver {
|
||||
fn is_local(&self) -> bool {
|
||||
true
|
||||
}
|
||||
async fn get_vks(&self, req: GetVkRequest) -> GetVkResponse {
|
||||
let mut prover_types = vec![];
|
||||
req.circuit_types.iter().for_each(|circuit_type| {
|
||||
if let Some(pt) = get_prover_type(*circuit_type) {
|
||||
if !prover_types.contains(&pt) {
|
||||
prover_types.push(pt);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let vks = self
|
||||
.circuits_handler_provider
|
||||
.read()
|
||||
.await
|
||||
.init_vks(&self.config, prover_types)
|
||||
.await;
|
||||
GetVkResponse { vks, error: None }
|
||||
}
|
||||
async fn prove(&self, req: ProveRequest) -> ProveResponse {
|
||||
let handler = self
|
||||
.circuits_handler_provider
|
||||
.write()
|
||||
.await
|
||||
.get_circuits_handler(&req.hard_fork_name, self.prover_types.clone())
|
||||
.expect("failed to get circuit handler");
|
||||
|
||||
match self.do_prove(req, handler).await {
|
||||
Ok(resp) => resp,
|
||||
Err(e) => ProveResponse {
|
||||
status: TaskStatus::Failed,
|
||||
error: Some(format!("failed to request proof: {}", e)),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async fn query_task(&self, req: QueryTaskRequest) -> QueryTaskResponse {
|
||||
let handle = self.current_task.lock().unwrap().take();
|
||||
if let Some(handle) = handle {
|
||||
if handle.is_finished() {
|
||||
return match handle.await {
|
||||
Ok(Ok(proof)) => QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Success,
|
||||
proof: Some(proof),
|
||||
..Default::default()
|
||||
},
|
||||
Ok(Err(e)) => QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Failed,
|
||||
error: Some(format!("proving task failed: {}", e)),
|
||||
..Default::default()
|
||||
},
|
||||
Err(e) => QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Failed,
|
||||
error: Some(format!("proving task panicked: {}", e)),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
} else {
|
||||
*self.current_task.lock().unwrap() = Some(handle);
|
||||
return QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Proving,
|
||||
..Default::default()
|
||||
};
|
||||
}
|
||||
}
|
||||
// If no handle is found
|
||||
QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Failed,
|
||||
error: Some("no proving task is running".to_string()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LocalProver {
|
||||
pub fn new(config: LocalProverConfig, prover_types: Vec<ProverType>) -> Self {
|
||||
let circuits_handler_provider = CircuitsHandlerProvider::new(config.clone())
|
||||
.expect("failed to create circuits handler provider");
|
||||
|
||||
Self {
|
||||
config,
|
||||
prover_types,
|
||||
circuits_handler_provider: RwLock::new(circuits_handler_provider),
|
||||
next_task_id: Arc::new(Mutex::new(0)),
|
||||
current_task: Arc::new(Mutex::new(None)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_prove(
|
||||
&self,
|
||||
req: ProveRequest,
|
||||
handler: Arc<Box<dyn CircuitsHandler>>,
|
||||
) -> Result<ProveResponse> {
|
||||
let task_id = {
|
||||
let mut next_task_id = self.next_task_id.lock().unwrap();
|
||||
*next_task_id += 1;
|
||||
*next_task_id
|
||||
};
|
||||
|
||||
let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
|
||||
let created_at = duration.as_secs() as f64 + duration.subsec_nanos() as f64 * 1e-9;
|
||||
|
||||
let req_clone = req.clone();
|
||||
let handle = Handle::current();
|
||||
let task_handle =
|
||||
tokio::task::spawn_blocking(move || handle.block_on(handler.get_proof_data(req_clone)));
|
||||
|
||||
*self.current_task.lock().unwrap() = Some(task_handle);
|
||||
|
||||
Ok(ProveResponse {
|
||||
task_id: task_id.to_string(),
|
||||
circuit_type: req.circuit_type,
|
||||
circuit_version: req.circuit_version,
|
||||
hard_fork_name: req.hard_fork_name,
|
||||
status: TaskStatus::Proving,
|
||||
created_at,
|
||||
input: Some(req.input),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,153 +0,0 @@
|
||||
use ethers_core::types::H256;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
use scroll_proving_sdk::prover::types::CircuitType;
|
||||
|
||||
pub type CommonHash = H256;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum ProverType {
|
||||
Chunk,
|
||||
Batch,
|
||||
}
|
||||
|
||||
impl ProverType {
|
||||
fn from_u8(v: u8) -> Self {
|
||||
match v {
|
||||
1 => ProverType::Chunk,
|
||||
2 => ProverType::Batch,
|
||||
_ => {
|
||||
panic!("invalid prover_type")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ProverType {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
ProverType::Chunk => serializer.serialize_u8(1),
|
||||
ProverType::Batch => serializer.serialize_u8(2),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ProverType {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: u8 = u8::deserialize(deserializer)?;
|
||||
Ok(ProverType::from_u8(v))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct Task {
|
||||
#[serde(rename = "type", default)]
|
||||
pub task_type: CircuitType,
|
||||
pub task_data: String,
|
||||
#[serde(default)]
|
||||
pub hard_fork_name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct ProofDetail {
|
||||
pub id: String,
|
||||
#[serde(rename = "type", default)]
|
||||
pub proof_type: CircuitType,
|
||||
pub proof_data: String,
|
||||
pub error: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ProofFailureType {
|
||||
Undefined,
|
||||
Panic,
|
||||
NoPanic,
|
||||
}
|
||||
|
||||
impl ProofFailureType {
|
||||
fn from_u8(v: u8) -> Self {
|
||||
match v {
|
||||
1 => ProofFailureType::Panic,
|
||||
2 => ProofFailureType::NoPanic,
|
||||
_ => ProofFailureType::Undefined,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ProofFailureType {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
ProofFailureType::Undefined => serializer.serialize_u8(0),
|
||||
ProofFailureType::Panic => serializer.serialize_u8(1),
|
||||
ProofFailureType::NoPanic => serializer.serialize_u8(2),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ProofFailureType {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: u8 = u8::deserialize(deserializer)?;
|
||||
Ok(ProofFailureType::from_u8(v))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ProofFailureType {
|
||||
fn default() -> Self {
|
||||
Self::Undefined
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ProofStatus {
|
||||
Ok,
|
||||
Error,
|
||||
}
|
||||
|
||||
impl ProofStatus {
|
||||
fn from_u8(v: u8) -> Self {
|
||||
match v {
|
||||
0 => ProofStatus::Ok,
|
||||
_ => ProofStatus::Error,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ProofStatus {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
ProofStatus::Ok => serializer.serialize_u8(0),
|
||||
ProofStatus::Error => serializer.serialize_u8(1),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ProofStatus {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: u8 = u8::deserialize(deserializer)?;
|
||||
Ok(ProofStatus::from_u8(v))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ProofStatus {
|
||||
fn default() -> Self {
|
||||
Self::Ok
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
use crate::types::ProverType;
|
||||
use scroll_proving_sdk::prover::types::CircuitType;
|
||||
|
||||
pub fn get_circuit_types(prover_type: ProverType) -> Vec<CircuitType> {
|
||||
match prover_type {
|
||||
ProverType::Chunk => vec![CircuitType::Chunk],
|
||||
ProverType::Batch => vec![CircuitType::Batch, CircuitType::Bundle],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_prover_type(task_type: CircuitType) -> Option<ProverType> {
|
||||
match task_type {
|
||||
CircuitType::Undefined => None,
|
||||
CircuitType::Chunk => Some(ProverType::Chunk),
|
||||
CircuitType::Batch => Some(ProverType::Batch),
|
||||
CircuitType::Bundle => Some(ProverType::Batch),
|
||||
}
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
mod common;
|
||||
mod darwin;
|
||||
mod darwin_v2;
|
||||
|
||||
use crate::{
|
||||
config::AssetsDirEnvConfig, prover::LocalProverConfig, types::ProverType,
|
||||
utils::get_circuit_types,
|
||||
};
|
||||
use anyhow::{bail, Result};
|
||||
use async_trait::async_trait;
|
||||
use darwin::DarwinHandler;
|
||||
use darwin_v2::DarwinV2Handler;
|
||||
use scroll_proving_sdk::prover::{proving_service::ProveRequest, CircuitType};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
type HardForkName = String;
|
||||
|
||||
pub mod utils {
|
||||
pub fn encode_vk(vk: Vec<u8>) -> String {
|
||||
base64::encode(vk)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait CircuitsHandler: Send + Sync {
|
||||
async fn get_vk(&self, task_type: CircuitType) -> Option<Vec<u8>>;
|
||||
|
||||
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String>;
|
||||
}
|
||||
|
||||
type CircuitsHandlerBuilder = fn(
|
||||
prover_types: Vec<ProverType>,
|
||||
config: &LocalProverConfig,
|
||||
) -> Result<Box<dyn CircuitsHandler>>;
|
||||
|
||||
pub struct CircuitsHandlerProvider {
|
||||
config: LocalProverConfig,
|
||||
circuits_handler_builder_map: HashMap<HardForkName, CircuitsHandlerBuilder>,
|
||||
current_fork_name: Option<HardForkName>,
|
||||
current_circuit: Option<Arc<Box<dyn CircuitsHandler>>>,
|
||||
}
|
||||
|
||||
impl CircuitsHandlerProvider {
|
||||
pub fn new(config: LocalProverConfig) -> Result<Self> {
|
||||
let mut m: HashMap<HardForkName, CircuitsHandlerBuilder> = HashMap::new();
|
||||
|
||||
if let Err(e) = AssetsDirEnvConfig::init() {
|
||||
panic!("AssetsDirEnvConfig init failed: {:#}", e);
|
||||
}
|
||||
|
||||
fn handler_builder(
|
||||
prover_types: Vec<ProverType>,
|
||||
config: &LocalProverConfig,
|
||||
) -> Result<Box<dyn CircuitsHandler>> {
|
||||
log::info!(
|
||||
"now init zk circuits handler, hard_fork_name: {}",
|
||||
&config.low_version_circuit.hard_fork_name
|
||||
);
|
||||
AssetsDirEnvConfig::enable_first();
|
||||
DarwinHandler::new(
|
||||
prover_types,
|
||||
&config.low_version_circuit.params_path,
|
||||
&config.low_version_circuit.assets_path,
|
||||
)
|
||||
.map(|handler| Box::new(handler) as Box<dyn CircuitsHandler>)
|
||||
}
|
||||
m.insert(
|
||||
config.low_version_circuit.hard_fork_name.clone(),
|
||||
handler_builder,
|
||||
);
|
||||
|
||||
fn next_handler_builder(
|
||||
prover_types: Vec<ProverType>,
|
||||
config: &LocalProverConfig,
|
||||
) -> Result<Box<dyn CircuitsHandler>> {
|
||||
log::info!(
|
||||
"now init zk circuits handler, hard_fork_name: {}",
|
||||
&config.high_version_circuit.hard_fork_name
|
||||
);
|
||||
AssetsDirEnvConfig::enable_second();
|
||||
DarwinV2Handler::new(
|
||||
prover_types,
|
||||
&config.high_version_circuit.params_path,
|
||||
&config.high_version_circuit.assets_path,
|
||||
)
|
||||
.map(|handler| Box::new(handler) as Box<dyn CircuitsHandler>)
|
||||
}
|
||||
|
||||
m.insert(
|
||||
config.high_version_circuit.hard_fork_name.clone(),
|
||||
next_handler_builder,
|
||||
);
|
||||
|
||||
let provider = CircuitsHandlerProvider {
|
||||
config,
|
||||
circuits_handler_builder_map: m,
|
||||
current_fork_name: None,
|
||||
current_circuit: None,
|
||||
};
|
||||
|
||||
Ok(provider)
|
||||
}
|
||||
|
||||
pub fn get_circuits_handler(
|
||||
&mut self,
|
||||
hard_fork_name: &String,
|
||||
prover_types: Vec<ProverType>,
|
||||
) -> Result<Arc<Box<dyn CircuitsHandler>>> {
|
||||
match &self.current_fork_name {
|
||||
Some(fork_name) if fork_name == hard_fork_name => {
|
||||
log::info!("get circuits handler from cache");
|
||||
if let Some(handler) = &self.current_circuit {
|
||||
Ok(handler.clone())
|
||||
} else {
|
||||
bail!("missing cached handler, there must be something wrong.")
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
log::info!(
|
||||
"failed to get circuits handler from cache, create a new one: {hard_fork_name}"
|
||||
);
|
||||
if let Some(builder) = self.circuits_handler_builder_map.get(hard_fork_name) {
|
||||
log::info!("building circuits handler for {hard_fork_name}");
|
||||
let handler = builder(prover_types, &self.config)
|
||||
.expect("failed to build circuits handler");
|
||||
self.current_fork_name = Some(hard_fork_name.clone());
|
||||
let arc_handler = Arc::new(handler);
|
||||
self.current_circuit = Some(arc_handler.clone());
|
||||
Ok(arc_handler)
|
||||
} else {
|
||||
bail!("missing builder, there must be something wrong.")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn init_vks(
|
||||
&self,
|
||||
config: &LocalProverConfig,
|
||||
prover_types: Vec<ProverType>,
|
||||
) -> Vec<String> {
|
||||
let mut vks = Vec::new();
|
||||
for (hard_fork_name, build) in self.circuits_handler_builder_map.iter() {
|
||||
let handler =
|
||||
build(prover_types.clone(), config).expect("failed to build circuits handler");
|
||||
|
||||
for prover_type in prover_types.iter() {
|
||||
for task_type in get_circuit_types(*prover_type).into_iter() {
|
||||
let vk = handler
|
||||
.get_vk(task_type)
|
||||
.await
|
||||
.map_or("".to_string(), utils::encode_vk);
|
||||
log::info!(
|
||||
"vk for {hard_fork_name}, is {vk}, task_type: {:?}",
|
||||
task_type
|
||||
);
|
||||
if !vk.is_empty() {
|
||||
vks.push(vk)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
vks
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
use std::{collections::BTreeMap, rc::Rc};
|
||||
|
||||
use crate::types::ProverType;
|
||||
|
||||
use once_cell::sync::OnceCell;
|
||||
|
||||
use halo2_proofs::{halo2curves::bn256::Bn256, poly::kzg::commitment::ParamsKZG};
|
||||
|
||||
static mut PARAMS_MAP: OnceCell<Rc<BTreeMap<u32, ParamsKZG<Bn256>>>> = OnceCell::new();
|
||||
|
||||
pub fn get_params_map_instance<'a, F>(load_params_func: F) -> &'a BTreeMap<u32, ParamsKZG<Bn256>>
|
||||
where
|
||||
F: FnOnce() -> BTreeMap<u32, ParamsKZG<Bn256>>,
|
||||
{
|
||||
unsafe {
|
||||
PARAMS_MAP.get_or_init(|| {
|
||||
let params_map = load_params_func();
|
||||
Rc::new(params_map)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_degrees<F>(prover_types: &std::collections::HashSet<ProverType>, f: F) -> Vec<u32>
|
||||
where
|
||||
F: FnMut(&ProverType) -> Vec<u32>,
|
||||
{
|
||||
prover_types
|
||||
.iter()
|
||||
.flat_map(f)
|
||||
.collect::<std::collections::HashSet<u32>>()
|
||||
.into_iter()
|
||||
.collect()
|
||||
}
|
||||
@@ -1,401 +0,0 @@
|
||||
use super::{common::*, CircuitsHandler};
|
||||
use crate::types::ProverType;
|
||||
use anyhow::{bail, Context, Ok, Result};
|
||||
use async_trait::async_trait;
|
||||
use once_cell::sync::Lazy;
|
||||
use scroll_proving_sdk::prover::{proving_service::ProveRequest, CircuitType};
|
||||
use serde::Deserialize;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::types::CommonHash;
|
||||
use std::env;
|
||||
|
||||
use prover_darwin::{
|
||||
aggregator::Prover as BatchProver,
|
||||
check_chunk_hashes,
|
||||
common::Prover as CommonProver,
|
||||
config::{AGG_DEGREES, ZKEVM_DEGREES},
|
||||
zkevm::Prover as ChunkProver,
|
||||
BatchProof, BatchProvingTask, BlockTrace, BundleProof, BundleProvingTask, ChunkInfo,
|
||||
ChunkProof, ChunkProvingTask,
|
||||
};
|
||||
|
||||
// Only used for debugging.
|
||||
static OUTPUT_DIR: Lazy<Option<String>> = Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct BatchTaskDetail {
|
||||
pub chunk_infos: Vec<ChunkInfo>,
|
||||
#[serde(flatten)]
|
||||
pub batch_proving_task: BatchProvingTask,
|
||||
}
|
||||
|
||||
type BundleTaskDetail = BundleProvingTask;
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct ChunkTaskDetail {
|
||||
pub block_hashes: Vec<CommonHash>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct DarwinHandler {
|
||||
chunk_prover: Option<RwLock<ChunkProver<'static>>>,
|
||||
batch_prover: Option<RwLock<BatchProver<'static>>>,
|
||||
}
|
||||
|
||||
impl DarwinHandler {
|
||||
pub fn new_multi(
|
||||
prover_types: Vec<ProverType>,
|
||||
params_dir: &str,
|
||||
assets_dir: &str,
|
||||
) -> Result<Self> {
|
||||
let class_name = std::intrinsics::type_name::<Self>();
|
||||
let prover_types_set = prover_types
|
||||
.into_iter()
|
||||
.collect::<std::collections::HashSet<ProverType>>();
|
||||
let mut handler = Self {
|
||||
batch_prover: None,
|
||||
chunk_prover: None,
|
||||
};
|
||||
let degrees: Vec<u32> = get_degrees(&prover_types_set, |prover_type| match prover_type {
|
||||
ProverType::Chunk => ZKEVM_DEGREES.clone(),
|
||||
ProverType::Batch => AGG_DEGREES.clone(),
|
||||
});
|
||||
let params_map = get_params_map_instance(|| {
|
||||
log::info!(
|
||||
"calling get_params_map from {}, prover_types: {:?}, degrees: {:?}",
|
||||
class_name,
|
||||
prover_types_set,
|
||||
degrees
|
||||
);
|
||||
CommonProver::load_params_map(params_dir, °rees)
|
||||
});
|
||||
for prover_type in prover_types_set {
|
||||
match prover_type {
|
||||
ProverType::Chunk => {
|
||||
handler.chunk_prover = Some(RwLock::new(ChunkProver::from_params_and_assets(
|
||||
params_map, assets_dir,
|
||||
)));
|
||||
}
|
||||
ProverType::Batch => {
|
||||
handler.batch_prover = Some(RwLock::new(BatchProver::from_params_and_assets(
|
||||
params_map, assets_dir,
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(handler)
|
||||
}
|
||||
|
||||
pub fn new(prover_types: Vec<ProverType>, params_dir: &str, assets_dir: &str) -> Result<Self> {
|
||||
Self::new_multi(prover_types, params_dir, assets_dir)
|
||||
}
|
||||
|
||||
async fn gen_chunk_proof_raw(&self, chunk_trace: Vec<BlockTrace>) -> Result<ChunkProof> {
|
||||
if let Some(prover) = self.chunk_prover.as_ref() {
|
||||
let chunk = ChunkProvingTask::from(chunk_trace);
|
||||
|
||||
let chunk_proof =
|
||||
prover
|
||||
.write()
|
||||
.await
|
||||
.gen_chunk_proof(chunk, None, None, self.get_output_dir())?;
|
||||
|
||||
return Ok(chunk_proof);
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
async fn gen_chunk_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let chunk_traces: Vec<BlockTrace> = serde_json::from_str(&prove_request.input)?;
|
||||
let chunk_proof = self.gen_chunk_proof_raw(chunk_traces).await?;
|
||||
Ok(serde_json::to_string(&chunk_proof)?)
|
||||
}
|
||||
|
||||
async fn gen_batch_proof_raw(&self, batch_task_detail: BatchTaskDetail) -> Result<BatchProof> {
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let chunk_hashes_proofs: Vec<(ChunkInfo, ChunkProof)> = batch_task_detail
|
||||
.chunk_infos
|
||||
.clone()
|
||||
.into_iter()
|
||||
.zip(batch_task_detail.batch_proving_task.chunk_proofs.clone())
|
||||
.collect();
|
||||
|
||||
let chunk_proofs: Vec<ChunkProof> =
|
||||
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
|
||||
|
||||
let is_valid = prover.read().await.check_protocol_of_chunks(&chunk_proofs);
|
||||
|
||||
if !is_valid {
|
||||
bail!("non-match chunk protocol")
|
||||
}
|
||||
check_chunk_hashes("", &chunk_hashes_proofs).context("failed to check chunk info")?;
|
||||
let batch_proof = prover.write().await.gen_batch_proof(
|
||||
batch_task_detail.batch_proving_task,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
)?;
|
||||
|
||||
return Ok(batch_proof);
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
async fn gen_batch_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&prove_request.input)?;
|
||||
let batch_proof = self.gen_batch_proof_raw(batch_task_detail).await?;
|
||||
Ok(serde_json::to_string(&batch_proof)?)
|
||||
}
|
||||
|
||||
async fn gen_bundle_proof_raw(
|
||||
&self,
|
||||
bundle_task_detail: BundleTaskDetail,
|
||||
) -> Result<BundleProof> {
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let bundle_proof = prover.write().await.gen_bundle_proof(
|
||||
bundle_task_detail,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
)?;
|
||||
|
||||
return Ok(bundle_proof);
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
async fn gen_bundle_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let bundle_task_detail: BundleTaskDetail = serde_json::from_str(&prove_request.input)?;
|
||||
let bundle_proof = self.gen_bundle_proof_raw(bundle_task_detail).await?;
|
||||
Ok(serde_json::to_string(&bundle_proof)?)
|
||||
}
|
||||
|
||||
fn get_output_dir(&self) -> Option<&str> {
|
||||
OUTPUT_DIR.as_deref()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CircuitsHandler for DarwinHandler {
|
||||
async fn get_vk(&self, task_type: CircuitType) -> Option<Vec<u8>> {
|
||||
match task_type {
|
||||
CircuitType::Chunk => self.chunk_prover.as_ref().unwrap().read().await.get_vk(),
|
||||
CircuitType::Batch => self
|
||||
.batch_prover
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.read()
|
||||
.await
|
||||
.get_batch_vk(),
|
||||
CircuitType::Bundle => self
|
||||
.batch_prover
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.read()
|
||||
.await
|
||||
.get_bundle_vk(),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
match prove_request.circuit_type {
|
||||
CircuitType::Chunk => self.gen_chunk_proof(prove_request).await,
|
||||
CircuitType::Batch => self.gen_batch_proof(prove_request).await,
|
||||
CircuitType::Bundle => self.gen_bundle_proof(prove_request).await,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// =================================== tests module ========================================
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::zk_circuits_handler::utils::encode_vk;
|
||||
use prover_darwin::utils::chunk_trace_to_witness_block;
|
||||
use scroll_proving_sdk::utils::init_tracing;
|
||||
use std::{path::PathBuf, sync::LazyLock};
|
||||
|
||||
#[ctor::ctor]
|
||||
fn init() {
|
||||
init_tracing();
|
||||
log::info!("logger initialized");
|
||||
}
|
||||
|
||||
static DEFAULT_WORK_DIR: &str = "/assets";
|
||||
static WORK_DIR: LazyLock<String> = LazyLock::new(|| {
|
||||
std::env::var("DARWIN_TEST_DIR")
|
||||
.unwrap_or(String::from(DEFAULT_WORK_DIR))
|
||||
.trim_end_matches('/')
|
||||
.to_string()
|
||||
});
|
||||
static PARAMS_PATH: LazyLock<String> = LazyLock::new(|| format!("{}/test_params", *WORK_DIR));
|
||||
static ASSETS_PATH: LazyLock<String> = LazyLock::new(|| format!("{}/test_assets", *WORK_DIR));
|
||||
static PROOF_DUMP_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}/proof_data", *WORK_DIR));
|
||||
static BATCH_DIR_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}/traces/batch_24", *WORK_DIR));
|
||||
static BATCH_VK_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}/test_assets/vk_batch.vkey", *WORK_DIR));
|
||||
static CHUNK_VK_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}/test_assets/vk_chunk.vkey", *WORK_DIR));
|
||||
|
||||
#[test]
|
||||
fn it_works() {
|
||||
let result = true;
|
||||
assert!(result);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_circuits() -> Result<()> {
|
||||
let bi_handler = DarwinHandler::new_multi(
|
||||
vec![ProverType::Chunk, ProverType::Batch],
|
||||
&PARAMS_PATH,
|
||||
&ASSETS_PATH,
|
||||
)?;
|
||||
|
||||
let chunk_handler = bi_handler;
|
||||
let chunk_vk = chunk_handler.get_vk(CircuitType::Chunk).await.unwrap();
|
||||
|
||||
check_vk(CircuitType::Chunk, chunk_vk, "chunk vk must be available");
|
||||
let chunk_dir_paths = get_chunk_dir_paths()?;
|
||||
log::info!("chunk_dir_paths, {:?}", chunk_dir_paths);
|
||||
let mut chunk_infos = vec![];
|
||||
let mut chunk_proofs = vec![];
|
||||
for (id, chunk_path) in chunk_dir_paths.into_iter().enumerate() {
|
||||
let chunk_id = format!("chunk_proof{}", id + 1);
|
||||
log::info!("start to process {chunk_id}");
|
||||
let chunk_trace = read_chunk_trace(chunk_path)?;
|
||||
|
||||
let chunk_info = traces_to_chunk_info(chunk_trace.clone())?;
|
||||
chunk_infos.push(chunk_info);
|
||||
|
||||
log::info!("start to prove {chunk_id}");
|
||||
let chunk_proof = chunk_handler.gen_chunk_proof_raw(chunk_trace).await?;
|
||||
let proof_data = serde_json::to_string(&chunk_proof)?;
|
||||
dump_proof(chunk_id, proof_data)?;
|
||||
chunk_proofs.push(chunk_proof);
|
||||
}
|
||||
|
||||
let batch_handler = chunk_handler;
|
||||
let batch_vk = batch_handler.get_vk(CircuitType::Batch).await.unwrap();
|
||||
check_vk(CircuitType::Batch, batch_vk, "batch vk must be available");
|
||||
let batch_task_detail = make_batch_task_detail(chunk_infos, chunk_proofs);
|
||||
log::info!("start to prove batch");
|
||||
let batch_proof = batch_handler.gen_batch_proof_raw(batch_task_detail).await?;
|
||||
let proof_data = serde_json::to_string(&batch_proof)?;
|
||||
dump_proof("batch_proof".to_string(), proof_data)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn make_batch_task_detail(_: Vec<ChunkInfo>, _: Vec<ChunkProof>) -> BatchTaskDetail {
|
||||
todo!();
|
||||
// BatchTaskDetail {
|
||||
// chunk_infos,
|
||||
// batch_proving_task: BatchProvingTask {
|
||||
// parent_batch_hash: todo!(),
|
||||
// parent_state_root: todo!(),
|
||||
// batch_header: todo!(),
|
||||
// chunk_proofs,
|
||||
// },
|
||||
// }
|
||||
}
|
||||
|
||||
fn check_vk(proof_type: CircuitType, vk: Vec<u8>, info: &str) {
|
||||
log::info!("check_vk, {:?}", proof_type);
|
||||
let vk_from_file = read_vk(proof_type).unwrap();
|
||||
assert_eq!(vk_from_file, encode_vk(vk), "{info}")
|
||||
}
|
||||
|
||||
fn read_vk(proof_type: CircuitType) -> Result<String> {
|
||||
log::info!("read_vk, {:?}", proof_type);
|
||||
let vk_file = match proof_type {
|
||||
CircuitType::Chunk => CHUNK_VK_PATH.clone(),
|
||||
CircuitType::Batch => BATCH_VK_PATH.clone(),
|
||||
CircuitType::Bundle => todo!(),
|
||||
CircuitType::Undefined => unreachable!(),
|
||||
};
|
||||
|
||||
let data = std::fs::read(vk_file)?;
|
||||
Ok(encode_vk(data))
|
||||
}
|
||||
|
||||
fn read_chunk_trace(path: PathBuf) -> Result<Vec<BlockTrace>> {
|
||||
log::info!("read_chunk_trace, {:?}", path);
|
||||
let mut chunk_trace: Vec<BlockTrace> = vec![];
|
||||
|
||||
fn read_block_trace(file: &PathBuf) -> Result<BlockTrace> {
|
||||
let f = std::fs::File::open(file)?;
|
||||
Ok(serde_json::from_reader(&f)?)
|
||||
}
|
||||
|
||||
if path.is_dir() {
|
||||
let entries = std::fs::read_dir(&path)?;
|
||||
let mut files: Vec<String> = entries
|
||||
.into_iter()
|
||||
.filter_map(|e| {
|
||||
if e.is_err() {
|
||||
return None;
|
||||
}
|
||||
let entry = e.unwrap();
|
||||
if entry.path().is_dir() {
|
||||
return None;
|
||||
}
|
||||
if let Result::Ok(file_name) = entry.file_name().into_string() {
|
||||
Some(file_name)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
files.sort();
|
||||
|
||||
log::info!("files in chunk {:?} is {:?}", path, files);
|
||||
for file in files {
|
||||
let block_trace = read_block_trace(&path.join(file))?;
|
||||
chunk_trace.push(block_trace);
|
||||
}
|
||||
} else {
|
||||
let block_trace = read_block_trace(&path)?;
|
||||
chunk_trace.push(block_trace);
|
||||
}
|
||||
Ok(chunk_trace)
|
||||
}
|
||||
|
||||
fn get_chunk_dir_paths() -> Result<Vec<PathBuf>> {
|
||||
let batch_path = PathBuf::from(BATCH_DIR_PATH.clone());
|
||||
let entries = std::fs::read_dir(&batch_path)?;
|
||||
let mut files: Vec<String> = entries
|
||||
.filter_map(|e| {
|
||||
if e.is_err() {
|
||||
return None;
|
||||
}
|
||||
let entry = e.unwrap();
|
||||
if entry.path().is_dir() {
|
||||
if let Result::Ok(file_name) = entry.file_name().into_string() {
|
||||
Some(file_name)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
files.sort();
|
||||
log::info!("files in batch {:?} is {:?}", batch_path, files);
|
||||
Ok(files.into_iter().map(|f| batch_path.join(f)).collect())
|
||||
}
|
||||
|
||||
fn traces_to_chunk_info(chunk_trace: Vec<BlockTrace>) -> Result<ChunkInfo> {
|
||||
let witness_block = chunk_trace_to_witness_block(chunk_trace)?;
|
||||
Ok(ChunkInfo::from_witness_block(&witness_block, false))
|
||||
}
|
||||
|
||||
fn dump_proof(id: String, proof_data: String) -> Result<()> {
|
||||
let dump_path = PathBuf::from(PROOF_DUMP_PATH.clone());
|
||||
Ok(std::fs::write(dump_path.join(id), proof_data)?)
|
||||
}
|
||||
}
|
||||
@@ -1,459 +0,0 @@
|
||||
use super::{common::*, CircuitsHandler};
|
||||
use crate::types::ProverType;
|
||||
use anyhow::{bail, Context, Ok, Result};
|
||||
use async_trait::async_trait;
|
||||
use once_cell::sync::Lazy;
|
||||
use scroll_proving_sdk::prover::{proving_service::ProveRequest, CircuitType};
|
||||
use serde::Deserialize;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::types::CommonHash;
|
||||
use std::env;
|
||||
|
||||
use prover_darwin_v2::{
|
||||
aggregator::Prover as BatchProver,
|
||||
check_chunk_hashes,
|
||||
common::Prover as CommonProver,
|
||||
config::{AGG_DEGREES, ZKEVM_DEGREES},
|
||||
zkevm::Prover as ChunkProver,
|
||||
BatchProof, BatchProvingTask, BlockTrace, BundleProof, BundleProvingTask, ChunkInfo,
|
||||
ChunkProof, ChunkProvingTask,
|
||||
};
|
||||
|
||||
// Only used for debugging.
|
||||
static OUTPUT_DIR: Lazy<Option<String>> = Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct BatchTaskDetail {
|
||||
pub chunk_infos: Vec<ChunkInfo>,
|
||||
#[serde(flatten)]
|
||||
pub batch_proving_task: BatchProvingTask,
|
||||
}
|
||||
|
||||
type BundleTaskDetail = BundleProvingTask;
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct ChunkTaskDetail {
|
||||
pub block_hashes: Vec<CommonHash>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct DarwinV2Handler {
|
||||
chunk_prover: Option<RwLock<ChunkProver<'static>>>,
|
||||
batch_prover: Option<RwLock<BatchProver<'static>>>,
|
||||
}
|
||||
|
||||
impl DarwinV2Handler {
|
||||
pub fn new_multi(
|
||||
prover_types: Vec<ProverType>,
|
||||
params_dir: &str,
|
||||
assets_dir: &str,
|
||||
) -> Result<Self> {
|
||||
let class_name = std::intrinsics::type_name::<Self>();
|
||||
let prover_types_set = prover_types
|
||||
.into_iter()
|
||||
.collect::<std::collections::HashSet<ProverType>>();
|
||||
let mut handler = Self {
|
||||
batch_prover: None,
|
||||
chunk_prover: None,
|
||||
};
|
||||
let degrees: Vec<u32> = get_degrees(&prover_types_set, |prover_type| match prover_type {
|
||||
ProverType::Chunk => ZKEVM_DEGREES.clone(),
|
||||
ProverType::Batch => AGG_DEGREES.clone(),
|
||||
});
|
||||
let params_map = get_params_map_instance(|| {
|
||||
log::info!(
|
||||
"calling get_params_map from {}, prover_types: {:?}, degrees: {:?}",
|
||||
class_name,
|
||||
prover_types_set,
|
||||
degrees
|
||||
);
|
||||
CommonProver::load_params_map(params_dir, °rees)
|
||||
});
|
||||
for prover_type in prover_types_set {
|
||||
match prover_type {
|
||||
ProverType::Chunk => {
|
||||
handler.chunk_prover = Some(RwLock::new(ChunkProver::from_params_and_assets(
|
||||
params_map, assets_dir,
|
||||
)));
|
||||
}
|
||||
ProverType::Batch => {
|
||||
handler.batch_prover = Some(RwLock::new(BatchProver::from_params_and_assets(
|
||||
params_map, assets_dir,
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(handler)
|
||||
}
|
||||
|
||||
pub fn new(prover_types: Vec<ProverType>, params_dir: &str, assets_dir: &str) -> Result<Self> {
|
||||
Self::new_multi(prover_types, params_dir, assets_dir)
|
||||
}
|
||||
|
||||
async fn gen_chunk_proof_raw(&self, chunk_trace: Vec<BlockTrace>) -> Result<ChunkProof> {
|
||||
if let Some(prover) = self.chunk_prover.as_ref() {
|
||||
let chunk = ChunkProvingTask::from(chunk_trace);
|
||||
|
||||
let chunk_proof =
|
||||
prover
|
||||
.write()
|
||||
.await
|
||||
.gen_chunk_proof(chunk, None, None, self.get_output_dir())?;
|
||||
|
||||
return Ok(chunk_proof);
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
async fn gen_chunk_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let chunk_traces: Vec<BlockTrace> = serde_json::from_str(&prove_request.input)?;
|
||||
let chunk_proof = self.gen_chunk_proof_raw(chunk_traces).await?;
|
||||
Ok(serde_json::to_string(&chunk_proof)?)
|
||||
}
|
||||
|
||||
async fn gen_batch_proof_raw(&self, batch_task_detail: BatchTaskDetail) -> Result<BatchProof> {
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let chunk_hashes_proofs: Vec<(ChunkInfo, ChunkProof)> = batch_task_detail
|
||||
.chunk_infos
|
||||
.clone()
|
||||
.into_iter()
|
||||
.zip(batch_task_detail.batch_proving_task.chunk_proofs.clone())
|
||||
.collect();
|
||||
|
||||
let chunk_proofs: Vec<ChunkProof> =
|
||||
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
|
||||
|
||||
let is_valid = prover.write().await.check_protocol_of_chunks(&chunk_proofs);
|
||||
|
||||
if !is_valid {
|
||||
bail!("non-match chunk protocol")
|
||||
}
|
||||
check_chunk_hashes("", &chunk_hashes_proofs).context("failed to check chunk info")?;
|
||||
let batch_proof = prover.write().await.gen_batch_proof(
|
||||
batch_task_detail.batch_proving_task,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
)?;
|
||||
|
||||
return Ok(batch_proof);
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
async fn gen_batch_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&prove_request.input)?;
|
||||
let batch_proof = self.gen_batch_proof_raw(batch_task_detail).await?;
|
||||
Ok(serde_json::to_string(&batch_proof)?)
|
||||
}
|
||||
|
||||
async fn gen_bundle_proof_raw(
|
||||
&self,
|
||||
bundle_task_detail: BundleTaskDetail,
|
||||
) -> Result<BundleProof> {
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let bundle_proof = prover.write().await.gen_bundle_proof(
|
||||
bundle_task_detail,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
)?;
|
||||
|
||||
return Ok(bundle_proof);
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
async fn gen_bundle_proof(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
let bundle_task_detail: BundleTaskDetail = serde_json::from_str(&prove_request.input)?;
|
||||
let bundle_proof = self.gen_bundle_proof_raw(bundle_task_detail).await?;
|
||||
Ok(serde_json::to_string(&bundle_proof)?)
|
||||
}
|
||||
|
||||
fn get_output_dir(&self) -> Option<&str> {
|
||||
OUTPUT_DIR.as_deref()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CircuitsHandler for DarwinV2Handler {
|
||||
async fn get_vk(&self, task_type: CircuitType) -> Option<Vec<u8>> {
|
||||
match task_type {
|
||||
CircuitType::Chunk => self.chunk_prover.as_ref().unwrap().read().await.get_vk(),
|
||||
CircuitType::Batch => self
|
||||
.batch_prover
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.read()
|
||||
.await
|
||||
.get_batch_vk(),
|
||||
CircuitType::Bundle => self
|
||||
.batch_prover
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.read()
|
||||
.await
|
||||
.get_bundle_vk(),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
|
||||
match prove_request.circuit_type {
|
||||
CircuitType::Chunk => self.gen_chunk_proof(prove_request).await,
|
||||
CircuitType::Batch => self.gen_batch_proof(prove_request).await,
|
||||
CircuitType::Bundle => self.gen_bundle_proof(prove_request).await,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// =================================== tests module ========================================
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::zk_circuits_handler::utils::encode_vk;
|
||||
use ethers_core::types::H256;
|
||||
use prover_darwin_v2::{
|
||||
aggregator::eip4844, utils::chunk_trace_to_witness_block, BatchData, BatchHeader,
|
||||
MAX_AGG_SNARKS,
|
||||
};
|
||||
use scroll_proving_sdk::utils::init_tracing;
|
||||
use std::{path::PathBuf, sync::LazyLock};
|
||||
|
||||
#[ctor::ctor]
|
||||
fn init() {
|
||||
init_tracing();
|
||||
log::info!("logger initialized");
|
||||
}
|
||||
|
||||
static DEFAULT_WORK_DIR: &str = "/assets";
|
||||
static WORK_DIR: LazyLock<String> = LazyLock::new(|| {
|
||||
std::env::var("DARWIN_V2_TEST_DIR")
|
||||
.unwrap_or(String::from(DEFAULT_WORK_DIR))
|
||||
.trim_end_matches('/')
|
||||
.to_string()
|
||||
});
|
||||
static PARAMS_PATH: LazyLock<String> = LazyLock::new(|| format!("{}/test_params", *WORK_DIR));
|
||||
static ASSETS_PATH: LazyLock<String> = LazyLock::new(|| format!("{}/test_assets", *WORK_DIR));
|
||||
static PROOF_DUMP_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}/proof_data", *WORK_DIR));
|
||||
static BATCH_DIR_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}/traces/batch_24", *WORK_DIR));
|
||||
static BATCH_VK_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}/test_assets/vk_batch.vkey", *WORK_DIR));
|
||||
static CHUNK_VK_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{}/test_assets/vk_chunk.vkey", *WORK_DIR));
|
||||
|
||||
#[test]
|
||||
fn it_works() {
|
||||
let result = true;
|
||||
assert!(result);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_circuits() -> Result<()> {
|
||||
let bi_handler = DarwinV2Handler::new_multi(
|
||||
vec![ProverType::Chunk, ProverType::Batch],
|
||||
&PARAMS_PATH,
|
||||
&ASSETS_PATH,
|
||||
)?;
|
||||
|
||||
let chunk_handler = bi_handler;
|
||||
let chunk_vk = chunk_handler.get_vk(CircuitType::Chunk).await.unwrap();
|
||||
|
||||
check_vk(CircuitType::Chunk, chunk_vk, "chunk vk must be available");
|
||||
let chunk_dir_paths = get_chunk_dir_paths()?;
|
||||
log::info!("chunk_dir_paths, {:?}", chunk_dir_paths);
|
||||
let mut chunk_traces = vec![];
|
||||
let mut chunk_infos = vec![];
|
||||
let mut chunk_proofs = vec![];
|
||||
for (id, chunk_path) in chunk_dir_paths.into_iter().enumerate() {
|
||||
let chunk_id = format!("chunk_proof{}", id + 1);
|
||||
log::info!("start to process {chunk_id}");
|
||||
let chunk_trace = read_chunk_trace(chunk_path)?;
|
||||
chunk_traces.push(chunk_trace.clone());
|
||||
let chunk_info = traces_to_chunk_info(chunk_trace.clone())?;
|
||||
chunk_infos.push(chunk_info);
|
||||
|
||||
log::info!("start to prove {chunk_id}");
|
||||
let chunk_proof = chunk_handler.gen_chunk_proof_raw(chunk_trace).await?;
|
||||
let proof_data = serde_json::to_string(&chunk_proof)?;
|
||||
dump_proof(chunk_id, proof_data)?;
|
||||
chunk_proofs.push(chunk_proof);
|
||||
}
|
||||
|
||||
let batch_handler = chunk_handler;
|
||||
let batch_vk = batch_handler.get_vk(CircuitType::Batch).await.unwrap();
|
||||
check_vk(CircuitType::Batch, batch_vk, "batch vk must be available");
|
||||
let batch_task_detail = make_batch_task_detail(chunk_traces, chunk_proofs, None);
|
||||
log::info!("start to prove batch");
|
||||
let batch_proof = batch_handler.gen_batch_proof_raw(batch_task_detail).await?;
|
||||
let proof_data = serde_json::to_string(&batch_proof)?;
|
||||
dump_proof("batch_proof".to_string(), proof_data)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// copied from https://github.com/scroll-tech/scroll-prover/blob/main/integration/src/prove.rs
|
||||
fn get_blob_from_chunks(chunks: &[ChunkInfo]) -> Vec<u8> {
|
||||
let num_chunks = chunks.len();
|
||||
|
||||
let padded_chunk =
|
||||
ChunkInfo::mock_padded_chunk_info_for_testing(chunks.last().as_ref().unwrap());
|
||||
let chunks_with_padding = [
|
||||
chunks.to_vec(),
|
||||
vec![padded_chunk; MAX_AGG_SNARKS - num_chunks],
|
||||
]
|
||||
.concat();
|
||||
let batch_data = BatchData::<{ MAX_AGG_SNARKS }>::new(chunks.len(), &chunks_with_padding);
|
||||
let batch_bytes = batch_data.get_batch_data_bytes();
|
||||
let blob_bytes = eip4844::get_blob_bytes(&batch_bytes);
|
||||
log::info!("blob_bytes len {}", blob_bytes.len());
|
||||
blob_bytes
|
||||
}
|
||||
|
||||
// TODO: chunk_infos can be extracted from chunk_proofs.
|
||||
// Still needed?
|
||||
fn make_batch_task_detail(
|
||||
chunk_traces: Vec<Vec<BlockTrace>>,
|
||||
chunk_proofs: Vec<ChunkProof>,
|
||||
last_batcher_header: Option<BatchHeader<{ MAX_AGG_SNARKS }>>,
|
||||
) -> BatchTaskDetail {
|
||||
// dummy parent batch hash
|
||||
let dummy_parent_batch_hash = H256([
|
||||
0xab, 0xac, 0xad, 0xae, 0xaf, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
]);
|
||||
let chunk_infos: Vec<_> = chunk_proofs.iter().map(|p| p.chunk_info.clone()).collect();
|
||||
|
||||
let l1_message_popped = chunk_traces
|
||||
.iter()
|
||||
.flatten()
|
||||
.map(|chunk| chunk.num_l1_txs())
|
||||
.sum();
|
||||
let last_block_timestamp = chunk_traces.last().map_or(0, |block_traces| {
|
||||
block_traces
|
||||
.last()
|
||||
.map_or(0, |block_trace| block_trace.header.timestamp.as_u64())
|
||||
});
|
||||
|
||||
let blob_bytes = get_blob_from_chunks(&chunk_infos);
|
||||
let batch_header = BatchHeader::construct_from_chunks(
|
||||
last_batcher_header.map_or(4, |header| header.version),
|
||||
last_batcher_header.map_or(123, |header| header.batch_index + 1),
|
||||
l1_message_popped,
|
||||
last_batcher_header.map_or(l1_message_popped, |header| {
|
||||
header.total_l1_message_popped + l1_message_popped
|
||||
}),
|
||||
last_batcher_header.map_or(dummy_parent_batch_hash, |header| header.batch_hash()),
|
||||
last_block_timestamp,
|
||||
&chunk_infos,
|
||||
&blob_bytes,
|
||||
);
|
||||
BatchTaskDetail {
|
||||
chunk_infos,
|
||||
batch_proving_task: BatchProvingTask {
|
||||
chunk_proofs,
|
||||
batch_header,
|
||||
blob_bytes,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn check_vk(proof_type: CircuitType, vk: Vec<u8>, info: &str) {
|
||||
log::info!("check_vk, {:?}", proof_type);
|
||||
let vk_from_file = read_vk(proof_type).unwrap();
|
||||
assert_eq!(vk_from_file, encode_vk(vk), "{info}")
|
||||
}
|
||||
|
||||
fn read_vk(proof_type: CircuitType) -> Result<String> {
|
||||
log::info!("read_vk, {:?}", proof_type);
|
||||
let vk_file = match proof_type {
|
||||
CircuitType::Chunk => CHUNK_VK_PATH.clone(),
|
||||
CircuitType::Batch => BATCH_VK_PATH.clone(),
|
||||
CircuitType::Bundle => todo!(),
|
||||
CircuitType::Undefined => unreachable!(),
|
||||
};
|
||||
|
||||
let data = std::fs::read(vk_file)?;
|
||||
Ok(encode_vk(data))
|
||||
}
|
||||
|
||||
fn read_chunk_trace(path: PathBuf) -> Result<Vec<BlockTrace>> {
|
||||
log::info!("read_chunk_trace, {:?}", path);
|
||||
let mut chunk_trace: Vec<BlockTrace> = vec![];
|
||||
|
||||
fn read_block_trace(file: &PathBuf) -> Result<BlockTrace> {
|
||||
let f = std::fs::File::open(file)?;
|
||||
Ok(serde_json::from_reader(&f)?)
|
||||
}
|
||||
|
||||
if path.is_dir() {
|
||||
let entries = std::fs::read_dir(&path)?;
|
||||
let mut files: Vec<String> = entries
|
||||
.into_iter()
|
||||
.filter_map(|e| {
|
||||
if e.is_err() {
|
||||
return None;
|
||||
}
|
||||
let entry = e.unwrap();
|
||||
if entry.path().is_dir() {
|
||||
return None;
|
||||
}
|
||||
if let Result::Ok(file_name) = entry.file_name().into_string() {
|
||||
Some(file_name)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
files.sort();
|
||||
|
||||
log::info!("files in chunk {:?} is {:?}", path, files);
|
||||
for file in files {
|
||||
let block_trace = read_block_trace(&path.join(file))?;
|
||||
chunk_trace.push(block_trace);
|
||||
}
|
||||
} else {
|
||||
let block_trace = read_block_trace(&path)?;
|
||||
chunk_trace.push(block_trace);
|
||||
}
|
||||
Ok(chunk_trace)
|
||||
}
|
||||
|
||||
fn get_chunk_dir_paths() -> Result<Vec<PathBuf>> {
|
||||
let batch_path = PathBuf::from(BATCH_DIR_PATH.clone());
|
||||
let entries = std::fs::read_dir(&batch_path)?;
|
||||
let mut files: Vec<String> = entries
|
||||
.filter_map(|e| {
|
||||
if e.is_err() {
|
||||
return None;
|
||||
}
|
||||
let entry = e.unwrap();
|
||||
if entry.path().is_dir() {
|
||||
if let Result::Ok(file_name) = entry.file_name().into_string() {
|
||||
Some(file_name)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
files.sort();
|
||||
log::info!("files in batch {:?} is {:?}", batch_path, files);
|
||||
Ok(files.into_iter().map(|f| batch_path.join(f)).collect())
|
||||
}
|
||||
|
||||
fn traces_to_chunk_info(chunk_trace: Vec<BlockTrace>) -> Result<ChunkInfo> {
|
||||
let witness_block = chunk_trace_to_witness_block(chunk_trace)?;
|
||||
Ok(ChunkInfo::from_witness_block(&witness_block, false))
|
||||
}
|
||||
|
||||
fn dump_proof(id: String, proof_data: String) -> Result<()> {
|
||||
let dump_path = PathBuf::from(PROOF_DUMP_PATH.clone());
|
||||
Ok(std::fs::write(dump_path.join(id), proof_data)?)
|
||||
}
|
||||
}
|
||||
@@ -724,11 +724,9 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
|
||||
return fmt.Errorf("failed to get end chunk of batch: %w", err)
|
||||
}
|
||||
|
||||
hardForkName := encoding.GetHardforkName(r.chainCfg, firstChunk.StartBlockNumber, firstChunk.StartBlockTime)
|
||||
|
||||
var aggProof message.BundleProof
|
||||
var aggProof *message.OpenVMBundleProof
|
||||
if withProof {
|
||||
aggProof, err = r.bundleOrm.GetVerifiedProofByHash(r.ctx, bundle.Hash, hardForkName)
|
||||
aggProof, err = r.bundleOrm.GetVerifiedProofByHash(r.ctx, bundle.Hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get verified proof by bundle index: %d, err: %w", bundle.Index, err)
|
||||
}
|
||||
@@ -1049,7 +1047,7 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*db
|
||||
return calldata, blobs, maxBlockHeight, totalGasUsed, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV4(dbBatch *orm.Batch, aggProof message.BundleProof) ([]byte, error) {
|
||||
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV4(dbBatch *orm.Batch, aggProof *message.OpenVMBundleProof) ([]byte, error) {
|
||||
if aggProof != nil { // finalizeBundle with proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBundleWithProof",
|
||||
@@ -1077,7 +1075,7 @@ func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV4(dbBatch *orm.Batch
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof message.BundleProof) ([]byte, error) {
|
||||
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, error) {
|
||||
if aggProof != nil { // finalizeBundle with proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBundlePostEuclidV2",
|
||||
|
||||
@@ -148,11 +148,12 @@ func testL2RelayerProcessPendingBundles(t *testing.T) {
|
||||
// no valid proof, rollup status remains the same
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(bundles[0].RollupStatus))
|
||||
|
||||
proof := &message.Halo2BundleProof{
|
||||
RawProof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
patchGuard := gomonkey.ApplyMethodFunc((*message.OpenVMBundleProof)(nil), "SanityCheck", func() error {
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
proof := &message.OpenVMBundleProof{EvmProof: &message.OpenVMEvmProof{Instances: make([]byte, 384)}}
|
||||
err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, proof, types.ProvingTaskVerified, 600)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
||||
@@ -121,7 +121,7 @@ func (o *Batch) GetBatchCount(ctx context.Context) (uint64, error) {
|
||||
}
|
||||
|
||||
// GetVerifiedProofByHash retrieves the verified aggregate proof for a batch with the given hash.
|
||||
func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash, hardForkName string) (message.BatchProof, error) {
|
||||
func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.OpenVMBatchProof, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Select("proof")
|
||||
@@ -132,11 +132,11 @@ func (o *Batch) GetVerifiedProofByHash(ctx context.Context, hash, hardForkName s
|
||||
return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
|
||||
}
|
||||
|
||||
proof := message.NewBatchProof(hardForkName)
|
||||
var proof message.OpenVMBatchProof
|
||||
if err := json.Unmarshal(batch.Proof, &proof); err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetVerifiedProofByHash error: %w, batch hash: %v", err, hash)
|
||||
}
|
||||
return proof, nil
|
||||
return &proof, nil
|
||||
}
|
||||
|
||||
// GetLatestBatch retrieves the latest batch from the database.
|
||||
@@ -432,7 +432,7 @@ func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash st
|
||||
|
||||
// UpdateProofByHash updates the batch proof by hash.
|
||||
// for unit test.
|
||||
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof message.BatchProof, proofTimeSec uint64) error {
|
||||
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.OpenVMBatchProof, proofTimeSec uint64) error {
|
||||
proofBytes, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)
|
||||
|
||||
@@ -134,7 +134,7 @@ func (o *Bundle) GetFirstPendingBundle(ctx context.Context) (*Bundle, error) {
|
||||
}
|
||||
|
||||
// GetVerifiedProofByHash retrieves the verified aggregate proof for a bundle with the given hash.
|
||||
func (o *Bundle) GetVerifiedProofByHash(ctx context.Context, hash, hardForkName string) (message.BundleProof, error) {
|
||||
func (o *Bundle) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.OpenVMBundleProof, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Select("proof")
|
||||
@@ -145,11 +145,11 @@ func (o *Bundle) GetVerifiedProofByHash(ctx context.Context, hash, hardForkName
|
||||
return nil, fmt.Errorf("Bundle.GetVerifiedProofByHash error: %w, bundle hash: %v", err, hash)
|
||||
}
|
||||
|
||||
proof := message.NewBundleProof(hardForkName)
|
||||
var proof message.OpenVMBundleProof
|
||||
if err := json.Unmarshal(bundle.Proof, &proof); err != nil {
|
||||
return nil, fmt.Errorf("Bundle.GetVerifiedProofByHash error: %w, bundle hash: %v", err, hash)
|
||||
}
|
||||
return proof, nil
|
||||
return &proof, nil
|
||||
}
|
||||
|
||||
// InsertBundle inserts a new bundle into the database.
|
||||
@@ -256,7 +256,7 @@ func (o *Bundle) UpdateRollupStatus(ctx context.Context, hash string, status typ
|
||||
|
||||
// UpdateProofAndProvingStatusByHash updates the bundle proof and proving status by hash.
|
||||
// only used in unit tests.
|
||||
func (o *Bundle) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof message.BundleProof, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
|
||||
func (o *Bundle) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof *message.OpenVMBundleProof, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
|
||||
@@ -292,7 +292,7 @@ func TestBatchOrm(t *testing.T) {
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1, "darwinV2")
|
||||
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, dbProof)
|
||||
|
||||
@@ -451,18 +451,16 @@ func TestBundleOrm(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("GetVerifiedProofByHash", func(t *testing.T) {
|
||||
proof := &message.Halo2BundleProof{
|
||||
RawProof: []byte("test proof"),
|
||||
}
|
||||
proof := &message.OpenVMBundleProof{EvmProof: &message.OpenVMEvmProof{Instances: make([]byte, 384)}}
|
||||
proofBytes, err := json.Marshal(proof)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.Model(&Bundle{}).Where("hash = ?", bundle1.Hash).Update("proof", proofBytes).Error
|
||||
assert.NoError(t, err)
|
||||
|
||||
retrievedProof, err := bundleOrm.GetVerifiedProofByHash(context.Background(), bundle1.Hash, "darwinV2")
|
||||
retrievedProof, err := bundleOrm.GetVerifiedProofByHash(context.Background(), bundle1.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, proof.RawProof, retrievedProof.Proof())
|
||||
assert.Equal(t, proof.Proof(), retrievedProof.Proof())
|
||||
})
|
||||
|
||||
t.Run("GetBundles", func(t *testing.T) {
|
||||
@@ -474,9 +472,7 @@ func TestBundleOrm(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("UpdateProofAndProvingStatusByHash", func(t *testing.T) {
|
||||
proof := &message.Halo2BundleProof{
|
||||
RawProof: []byte("new test proof"),
|
||||
}
|
||||
proof := &message.OpenVMBundleProof{EvmProof: &message.OpenVMEvmProof{Instances: make([]byte, 384)}}
|
||||
err := bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle2.Hash, proof, types.ProvingTaskVerified, 600)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -487,10 +483,10 @@ func TestBundleOrm(t *testing.T) {
|
||||
assert.Equal(t, int32(600), bundle.ProofTimeSec)
|
||||
assert.NotNil(t, bundle.ProvedAt)
|
||||
|
||||
retrievedProof := message.Halo2BundleProof{}
|
||||
retrievedProof := &message.OpenVMBundleProof{}
|
||||
err = json.Unmarshal(bundle.Proof, &retrievedProof)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, proof.RawProof, retrievedProof.Proof())
|
||||
assert.Equal(t, proof.Proof(), retrievedProof.Proof())
|
||||
})
|
||||
|
||||
t.Run("UpdateRollupStatus", func(t *testing.T) {
|
||||
|
||||
@@ -208,7 +208,7 @@ func TestFunction(t *testing.T) {
|
||||
|
||||
// l1 rollup and watch rollup events
|
||||
t.Run("TestCommitAndFinalizeGenesisBatch", testCommitAndFinalizeGenesisBatch)
|
||||
t.Run("testCommitBatchAndFinalizeBundleCodecV4V5V6", testCommitBatchAndFinalizeBundleCodecV4V5V6)
|
||||
t.Run("TestCommitBatchAndFinalizeBundleCodecV4V5V6", testCommitBatchAndFinalizeBundleCodecV4V5V6)
|
||||
t.Run("TestCommitBatchAndFinalizeBundleCodecV7", testCommitBatchAndFinalizeBundleCodecV7)
|
||||
|
||||
// l1 gas oracle
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
@@ -147,11 +148,12 @@ func testCommitBatchAndFinalizeBundleCodecV4V5V6(t *testing.T) {
|
||||
|
||||
bup.TryProposeBundle() // The proposed bundle contains two batches when codec version is codecv3.
|
||||
|
||||
batchProof := &message.Halo2BatchProof{
|
||||
RawProof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
patchGuard1 := gomonkey.ApplyMethodFunc((*message.OpenVMBatchProof)(nil), "SanityCheck", func() error {
|
||||
return nil
|
||||
})
|
||||
defer patchGuard1.Reset()
|
||||
|
||||
batchProof := &message.OpenVMBatchProof{}
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
batches = batches[1:]
|
||||
@@ -162,11 +164,12 @@ func testCommitBatchAndFinalizeBundleCodecV4V5V6(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
bundleProof := &message.Halo2BundleProof{
|
||||
RawProof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
patchGuard2 := gomonkey.ApplyMethodFunc((*message.OpenVMBundleProof)(nil), "SanityCheck", func() error {
|
||||
return nil
|
||||
})
|
||||
defer patchGuard2.Reset()
|
||||
|
||||
bundleProof := &message.OpenVMBundleProof{EvmProof: &message.OpenVMEvmProof{Instances: make([]byte, 384)}}
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
for _, bundle := range bundles {
|
||||
|
||||
Reference in New Issue
Block a user