Compare commits

..

63 Commits

Author SHA1 Message Date
Ho
14e2633ba3 Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-12-22 17:10:13 +09:00
Ho
21326c25e6 Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-12-04 19:06:09 +09:00
Ho
9c2bc02f64 Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-11-24 18:41:23 +09:00
Ho
9e5579c4cb cover client reset in test 2025-11-21 12:34:11 +09:00
Ho
ac4a72003c refactoring client 2025-11-21 12:25:54 +09:00
Ho
19447984bd fix issues 2025-11-21 10:13:39 +09:00
Ho
d66d705456 fix after merging 2025-11-21 08:37:30 +09:00
Ho
c938d6c25e Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-11-21 08:33:55 +09:00
Ho
cf9e3680c0 Fix login version issue 2025-11-11 19:09:06 +09:00
Ho
e9470ff7a5 update config template 2025-11-11 15:24:16 +09:00
Ho
51b1e79b31 add docker action 2025-11-11 14:28:25 +09:00
Ho
c22d9ecad1 fix goimport issue 2025-11-06 16:11:59 +09:00
Ho
e7551650b2 fix concurrent issue 2025-11-06 16:08:39 +09:00
Ho
20fde41be8 complete persistent layer and unit test 2025-11-05 22:02:14 +09:00
Ho
4df1dd8acd Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-10-27 15:09:17 +09:00
Ho
6696aac16a WIP 2025-10-23 15:23:59 +09:00
Ho
4b79e63c9b WIP: some refactors 2025-10-22 10:27:38 +09:00
Ho
ac0396db3c add persistent for running status 2025-10-22 08:31:55 +09:00
Ho
17e6c5b7ac robust prover manager 2025-10-20 22:24:04 +09:00
Ho
b6e33456fa fix issue 2025-10-20 22:02:48 +09:00
Ho
7572bf8923 fix 2025-10-20 15:21:13 +09:00
Ho
5d41788b07 + fix get task behavior
+ improve the robust of tests
2025-10-20 14:42:05 +09:00
Ho
8f8a537fba Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-10-20 14:09:45 +09:00
Ho
b1c3a4ecc0 more log for init 2025-10-17 22:27:51 +09:00
Ho
d9a29cddce fix config issue 2025-10-17 22:26:29 +09:00
Ho
c992157eb4 Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-10-17 16:14:52 +09:00
Ho
404c664e10 fix unittest 2025-10-10 15:33:55 +09:00
Ho
8a15836d20 add compatibile mode and more logs 2025-10-09 14:30:43 +09:00
Ho
4365aafa9a refactor libzkp to be completely mocked out 2025-10-08 11:32:13 +09:00
Ho
6ee026fa16 depress link for libzkp 2025-10-07 11:04:04 +09:00
Ho
c79ad57fb7 finish binary 2025-10-07 10:54:41 +09:00
Ho
fa5b113248 Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-10-07 09:53:54 +09:00
Zhang Zhuo
884b050866 Merge branch 'develop' into coordinator_proxy 2025-09-19 09:39:24 +08:00
Ho
1d9fa41535 Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-09-10 20:48:38 +09:00
Ho
b7f23c6734 basic tests 2025-09-10 20:48:21 +09:00
Ho
057e22072c fix issues 2025-09-10 20:38:21 +09:00
Ho
c7b83a0784 fix issue in test 2025-09-10 13:55:45 +09:00
Ho
92ca7a6b76 improve get_task proxy 2025-09-10 13:55:38 +09:00
Ho
256c90af6f Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-09-09 22:21:46 +09:00
Ho
50f3e1a97c fix issues from test 2025-09-09 22:21:24 +09:00
Ho
2721503657 refining 2025-09-09 20:10:18 +09:00
Ho
a04b64df03 routes 2025-09-08 22:30:51 +09:00
Ho
78dbe6cde1 controller WIP 2025-09-07 22:39:32 +09:00
Ho
9df6429d98 wip 2025-09-06 21:50:55 +09:00
Ho
e6be62f633 WIP 2025-09-05 22:31:45 +09:00
Ho
c72ee5d679 Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-09-03 22:13:03 +09:00
Ho
4725d8a73c Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-09-02 17:24:58 +09:00
Ho
322766f54f WIP 2025-09-02 17:22:35 +09:00
Ho
5614ec3b86 WIP 2025-09-01 10:12:16 +09:00
Ho
5a07a1652b WIP 2025-08-27 09:43:30 +09:00
Ho
64ef0f4ec0 WIP 2025-08-25 11:52:03 +09:00
Ho
321dd43af8 unit test for client 2025-08-25 11:43:50 +09:00
Ho
624a7a29b8 WIP: AI step 2025-08-25 09:35:10 +09:00
Ho
4f878d9231 AI step 2025-08-24 23:05:56 +09:00
Ho
7b3a65b35b framework for auto login 2025-08-24 22:41:17 +09:00
Ho
0d238d77a6 WIP: the structure of client manager 2025-08-24 22:32:38 +09:00
Ho
76ecdf064a add proxy config sample 2025-08-24 22:14:32 +09:00
Ho
5c6c225f76 WIP: config and client controller 2025-08-24 22:14:22 +09:00
Ho
3adb2e0a1b WIP: controller 2025-08-24 21:18:13 +09:00
Ho
412ad56a64 extend loginlogic 2025-08-24 20:43:40 +09:00
Ho
9796d16f6c WIP: update login logic and coordinator client 2025-08-24 20:32:11 +09:00
Ho
1f2b857671 add proxy_login route 2025-08-24 15:35:51 +09:00
Ho
5dbb5c5fb7 extend api for proxy 2025-08-24 14:54:54 +09:00
69 changed files with 4808 additions and 2596 deletions

View File

@@ -1 +0,0 @@
target/

View File

@@ -360,6 +360,49 @@ jobs:
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
coordinator-proxy:
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: coordinator-proxy
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: coordinator-proxy
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/coordinator-proxy.Dockerfile
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
coordinator-cron:
runs-on:
group: scroll-reth-runner-group

3234
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -10,7 +10,7 @@ resolver = "2"
[workspace.package]
authors = ["Scroll developers"]
edition = "2024"
edition = "2021"
homepage = "https://scroll.io"
readme = "README.md"
repository = "https://github.com/scroll-tech/scroll"
@@ -24,19 +24,26 @@ scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", tag =
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll", "rkyv"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2" }
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll"] }
axiom-sdk = { git = "https://github.com/axiom-crypto/axiom-api-cli.git", tag = "v1.0.9" }
metrics = "0.23.0"
metrics-util = "0.17"
metrics-tracing-context = "0.16.0"
anyhow = "1.0"
alloy = { version = "1", default-features = false }
alloy-primitives = { version = "1.4.1", default-features = false, features = ["tiny-keccak"] }
# also use this to trigger "serde" feature for primitives
alloy-serde = { version = "1", default-features = false }
jiff = "0.2"
serde = { version = "1", default-features = false, features = ["derive"] }
serde_json = { version = "1.0" }
serde_derive = "1.0"
tokio = "1"
serde_with = "3"
itertools = "0.14"
tiny-keccak = "2.0"
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
eyre = "0.6"
once_cell = "1.20"
base64 = "0.22"

View File

@@ -1,16 +0,0 @@
UNAME_S := $(shell uname -s)
IS_DARWIN := $(findstring Darwin,$(UNAME_S))
SHLIB_EXT := so
ifeq ($(UNAME_S),Darwin)
SHLIB_EXT := dylib
endif
LIB_ZKP_NAME := libzkp.$(SHLIB_EXT)
define macos_codesign
@if [ -n "$(IS_DARWIN)" ]; then \
codesign --force --sign - '$(1)'; \
codesign --verify --deep --verbose '$(1)'; \
fi
endef

View File

@@ -0,0 +1,26 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
# Build coordinator proxy
FROM base as builder
COPY . .
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_proxy && mv ./build/bin/coordinator_proxy /bin/coordinator_proxy
# Pull coordinator proxy into a second stage deploy ubuntu container
FROM ubuntu:20.04
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
RUN apt update && apt install vim netcat-openbsd net-tools curl jq -y
COPY --from=builder /bin/coordinator_proxy /bin/
RUN /bin/coordinator_proxy --version
WORKDIR /app
ENTRYPOINT ["/bin/coordinator_proxy"]

View File

@@ -0,0 +1,8 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -12,6 +12,7 @@ require (
github.com/gin-gonic/gin v1.9.1
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.20
github.com/mitchellh/mapstructure v1.5.0
github.com/modern-go/reflect2 v1.0.2
github.com/orcaman/concurrent-map v1.0.0
github.com/prometheus/client_golang v1.19.0
@@ -147,7 +148,6 @@ require (
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect

View File

@@ -4,6 +4,7 @@ import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/mitchellh/mapstructure"
)
// Response the response schema
@@ -13,6 +14,19 @@ type Response struct {
Data interface{} `json:"data"`
}
func (resp *Response) DecodeData(out interface{}) error {
// Decode generically unmarshaled JSON (map[string]any, []any) into a typed struct
// honoring `json` tags and allowing weak type conversions.
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
TagName: "json",
Result: out,
})
if err != nil {
return err
}
return dec.Decode(resp.Data)
}
// RenderJSON renders response with json
func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) {
var errMsg string

View File

@@ -1,5 +1,4 @@
/build/bin
.idea
internal/logic/verifier/lib
libzkp.so
libzkp.dylib
internal/libzkp/lib/libzkp.so

View File

@@ -1,10 +1,8 @@
include ../build/common.mk
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator libzkp
IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
LIBZKP_PATH=./internal/logic/libzkp/lib/$(LIB_ZKP_NAME)
LIBZKP_PATH=./internal/logic/libzkp/lib/libzkp.so
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
ZKVM_VERSION=$(shell grep -m 1 "zkvm-prover?" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
@@ -29,7 +27,6 @@ libzkp: clean_libzkp $(LIBZKP_PATH)
coordinator_api: $(LIBZKP_PATH) ## Builds the Coordinator api instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
$(call macos_codesign,$(PWD)/build/bin/coordinator_api)
coordinator_cron:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron
@@ -37,6 +34,10 @@ coordinator_cron:
coordinator_tool:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool
coordinator_proxy:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_proxy ./cmd/proxy
localsetup: coordinator_api ## Local setup: build coordinator_api, copy config, and setup releases
mkdir -p build/bin/conf
@echo "Copying configuration files..."
@@ -49,8 +50,6 @@ localsetup: coordinator_api ## Local setup: build coordinator_api, copy config,
@echo "Setting up releases..."
cd $(CURDIR)/build && bash setup_releases.sh
run_coordinator_api: coordinator_api
cd build/bin && ./coordinator_api
#coordinator_api_skip_libzkp:
# go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api

View File

@@ -10,8 +10,6 @@ See [monorepo prerequisites](../README.md#prerequisites).
## Build
Using Go version 1.22
```bash
make clean
make coordinator_api

View File

@@ -64,10 +64,9 @@ for ((i=0; i<$VERIFIER_COUNT; i++)); do
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/verifier.bin -O ${ASSET_DIR}/verifier.bin
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/root_verifier_vk -O ${ASSET_DIR}/root_verifier_vk
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/openVmVk.json -O ${ASSET_DIR}/openVmVk.json
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/axiom_program_ids.json -O ${ASSET_DIR}/axiom_program_ids.json
echo "Completed downloading assets for $FORK_NAME"
echo "---"
done
echo "All verifier assets downloaded successfully"
echo "All verifier assets downloaded successfully"

View File

@@ -0,0 +1,122 @@
package app
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"os/signal"
"time"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/observability"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/proxy"
"scroll-tech/coordinator/internal/route"
)
var app *cli.App
func init() {
// Set up coordinator app info.
app = cli.NewApp()
app.Action = action
app.Name = "coordinator proxy"
app.Usage = "Proxy for multiple Scroll L2 Coordinators"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Register `coordinator-test` app for integration-test.
utils.RegisterSimulation(app, utils.CoordinatorAPIApp)
}
func action(ctx *cli.Context) error {
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewProxyConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
var db *gorm.DB
if dbCfg := cfg.ProxyManager.DB; dbCfg != nil {
log.Info("Apply persistent storage", "via", cfg.ProxyManager.DB.DSN)
db, err = database.InitDB(cfg.ProxyManager.DB)
if err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
if err = database.CloseDB(db); err != nil {
log.Error("can not close db connection", "error", err)
}
}()
observability.Server(ctx, db)
}
registry := prometheus.DefaultRegisterer
apiSrv := server(ctx, cfg, db, registry)
log.Info(
"Start coordinator api successfully.",
"version", version.Version,
)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
log.Info("start shutdown coordinator proxy server ...")
closeCtx, cancelExit := context.WithTimeout(context.Background(), 5*time.Second)
defer cancelExit()
if err = apiSrv.Shutdown(closeCtx); err != nil {
log.Warn("shutdown coordinator proxy server failure", "error", err)
return nil
}
<-closeCtx.Done()
log.Info("coordinator proxy server exiting success")
return nil
}
func server(ctx *cli.Context, cfg *config.ProxyConfig, db *gorm.DB, reg prometheus.Registerer) *http.Server {
router := gin.New()
proxy.InitController(cfg, db, reg)
route.ProxyRoute(router, cfg, reg)
port := ctx.String(httpPortFlag.Name)
srv := &http.Server{
Addr: fmt.Sprintf(":%s", port),
Handler: router,
ReadHeaderTimeout: time.Minute,
}
go func() {
if runServerErr := srv.ListenAndServe(); runServerErr != nil && !errors.Is(runServerErr, http.ErrServerClosed) {
log.Crit("run coordinator proxy http server failure", "error", runServerErr)
}
}()
return srv
}
// Run coordinator.
func Run() {
// RunApp the coordinator.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,30 @@
package app
import "github.com/urfave/cli/v2"
var (
apiFlags = []cli.Flag{
// http flags
&httpEnabledFlag,
&httpListenAddrFlag,
&httpPortFlag,
}
// httpEnabledFlag enable rpc server.
httpEnabledFlag = cli.BoolFlag{
Name: "http",
Usage: "Enable the HTTP-RPC server",
Value: false,
}
// httpListenAddrFlag set the http address.
httpListenAddrFlag = cli.StringFlag{
Name: "http.addr",
Usage: "HTTP-RPC server listening interface",
Value: "localhost",
}
// httpPortFlag set http.port.
httpPortFlag = cli.IntFlag{
Name: "http.port",
Usage: "HTTP-RPC server listening port",
Value: 8590,
}
)

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/coordinator/cmd/proxy/app"
func main() {
app.Run()
}

View File

@@ -0,0 +1,31 @@
{
"proxy_manager": {
"proxy_cli": {
"proxy_name": "proxy_name",
"secret": "client private key"
},
"auth": {
"secret": "proxy secret key",
"challenge_expire_duration_sec": 3600,
"login_expire_duration_sec": 3600
},
"verifier": {
"min_prover_version": "v4.4.45",
"verifiers": []
},
"db": {
"driver_name": "postgres",
"dsn": "postgres://localhost/scroll?sslmode=disable",
"maxOpenNum": 200,
"maxIdleNum": 20
}
},
"coordinators": {
"sepolia": {
"base_url": "http://localhost:8555",
"retry_count": 10,
"retry_wait_time_sec": 10,
"connection_timeout_sec": 30
}
}
}

View File

@@ -0,0 +1,74 @@
package config
import (
"encoding/json"
"os"
"path/filepath"
"scroll-tech/common/database"
"scroll-tech/common/utils"
)
// Proxy loads proxy configuration items.
type ProxyManager struct {
// Zk verifier config help to confine the connected prover.
Verifier *VerifierConfig `json:"verifier"`
Client *ProxyClient `json:"proxy_cli"`
Auth *Auth `json:"auth"`
DB *database.Config `json:"db,omitempty"`
}
func (m *ProxyManager) Normalize() {
if m.Client.Secret == "" {
m.Client.Secret = m.Auth.Secret
}
if m.Client.ProxyVersion == "" {
m.Client.ProxyVersion = m.Verifier.MinProverVersion
}
}
// Proxy client configuration for connect to upstream as a client
type ProxyClient struct {
ProxyName string `json:"proxy_name"`
ProxyVersion string `json:"proxy_version,omitempty"`
Secret string `json:"secret,omitempty"`
}
// Coordinator configuration
type UpStream struct {
BaseUrl string `json:"base_url"`
RetryCount uint `json:"retry_count"`
RetryWaitTime uint `json:"retry_wait_time_sec"`
ConnectionTimeoutSec uint `json:"connection_timeout_sec"`
CompatibileMode bool `json:"compatible_mode,omitempty"`
}
// Config load configuration items.
type ProxyConfig struct {
ProxyManager *ProxyManager `json:"proxy_manager"`
ProxyName string `json:"proxy_name"`
Coordinators map[string]*UpStream `json:"coordinators"`
}
// NewConfig returns a new instance of Config.
func NewProxyConfig(file string) (*ProxyConfig, error) {
buf, err := os.ReadFile(filepath.Clean(file))
if err != nil {
return nil, err
}
cfg := &ProxyConfig{}
err = json.Unmarshal(buf, cfg)
if err != nil {
return nil, err
}
// Override config with environment variables
err = utils.OverrideConfigWithEnv(cfg, "SCROLL_COORDINATOR_PROXY")
if err != nil {
return nil, err
}
return cfg, nil
}

View File

@@ -19,28 +19,56 @@ type AuthController struct {
loginLogic *auth.LoginLogic
}
// NewAuthController returns an LoginController instance
func NewAuthController(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *AuthController {
func NewAuthControllerWithLogic(loginLogic *auth.LoginLogic) *AuthController {
return &AuthController{
loginLogic: auth.NewLoginLogic(db, cfg, vf),
loginLogic: loginLogic,
}
}
// Login the api controller for login
// NewAuthController returns an LoginController instance
func NewAuthController(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *AuthController {
return &AuthController{
loginLogic: auth.NewLoginLogic(db, cfg.ProverManager.Verifier, vf),
}
}
// Login the api controller for login, used as the Authenticator in JWT
// It can work in two mode: full process for normal login, or if login request
// is posted from proxy, run a simpler process to login a client
func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
// check if the login is post by proxy
var viaProxy bool
if proverType, proverTypeExist := c.Get(types.ProverProviderTypeKey); proverTypeExist {
proverType := uint8(proverType.(float64))
viaProxy = proverType == types.ProverProviderTypeProxy
}
var login types.LoginParameter
if err := c.ShouldBind(&login); err != nil {
return "", fmt.Errorf("missing the public_key, err:%w", err)
}
// check login parameter's token is equal to bearer token, the Authorization must be existed
// if not exist, the jwt token will intercept it
brearToken := c.GetHeader("Authorization")
if brearToken != "Bearer "+login.Message.Challenge {
return "", errors.New("check challenge failure for the not equal challenge string")
// if not, process with normal login
if !viaProxy {
// check login parameter's token is equal to bearer token, the Authorization must be existed
// if not exist, the jwt token will intercept it
brearToken := c.GetHeader("Authorization")
if brearToken != "Bearer "+login.Message.Challenge {
return "", errors.New("check challenge failure for the not equal challenge string")
}
if err := auth.VerifyMsg(&login); err != nil {
return "", err
}
// check the challenge is used, if used, return failure
if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil {
return "", fmt.Errorf("login insert challenge string failure:%w", err)
}
}
if err := a.loginLogic.Check(&login); err != nil {
if err := a.loginLogic.CompatiblityCheck(&login); err != nil {
return "", fmt.Errorf("check the login parameter failure: %w", err)
}
@@ -49,11 +77,6 @@ func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
return "", fmt.Errorf("prover hard fork name failure:%w", err)
}
// check the challenge is used, if used, return failure
if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil {
return "", fmt.Errorf("login insert challenge string failure:%w", err)
}
returnData := types.LoginParameterWithHardForkName{
HardForkName: hardForkNames,
LoginParameter: login,
@@ -85,10 +108,6 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
c.Set(types.ProverName, proverName)
}
if publicKey, ok := claims[types.PublicKey]; ok {
c.Set(types.PublicKey, publicKey)
}
if proverVersion, ok := claims[types.ProverVersion]; ok {
c.Set(types.ProverVersion, proverVersion)
}
@@ -101,5 +120,9 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
c.Set(types.ProverProviderTypeKey, providerType)
}
if publicKey, ok := claims[types.PublicKey]; ok {
return publicKey
}
return nil
}

View File

@@ -0,0 +1,150 @@
package proxy
import (
"context"
"fmt"
"sync"
"time"
jwt "github.com/appleboy/gin-jwt/v2"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/logic/auth"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/types"
)
// AuthController is login API
type AuthController struct {
apiLogin *api.AuthController
clients Clients
proverMgr *ProverManager
}
const upstreamConnTimeout = time.Second * 5
const LoginParamCache = "login_param"
const ProverTypesKey = "prover_types"
const SignatureKey = "prover_signature"
// NewAuthController returns an LoginController instance
func NewAuthController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager) *AuthController {
// use a dummy Verifier to create login logic (we do not use any information in verifier)
dummyVf := verifier.Verifier{
OpenVMVkMap: make(map[string]struct{}),
}
loginLogic := auth.NewLoginLogicWithSimpleDeduplicator(cfg.ProxyManager.Verifier, &dummyVf)
authController := &AuthController{
apiLogin: api.NewAuthControllerWithLogic(loginLogic),
clients: clients,
proverMgr: proverMgr,
}
return authController
}
// Login extended the Login hander in api controller
func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
loginRes, err := a.apiLogin.Login(c)
if err != nil {
return nil, err
}
loginParam := loginRes.(types.LoginParameterWithHardForkName)
if loginParam.LoginParameter.Message.ProverProviderType == types.ProverProviderTypeProxy {
return nil, fmt.Errorf("proxy do not support recursive login")
}
session := a.proverMgr.GetOrCreate(loginParam.PublicKey)
log.Debug("start handling login", "cli", loginParam.Message.ProverName)
loginCtx, cf := context.WithTimeout(context.Background(), upstreamConnTimeout)
var wg sync.WaitGroup
for _, cli := range a.clients {
wg.Add(1)
go func(cli Client) {
defer wg.Done()
if err := session.ProxyLogin(loginCtx, cli, &loginParam.LoginParameter); err != nil {
log.Error("proxy login failed during token cache update",
"userKey", loginParam.PublicKey,
"upstream", cli.Name(),
"error", err)
}
}(cli)
}
go func(cliName string) {
wg.Wait()
cf()
log.Debug("first login attempt has completed", "cli", cliName)
}(loginParam.Message.ProverName)
return loginParam.LoginParameter, nil
}
// PayloadFunc returns jwt.MapClaims with {public key, prover name}.
func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
v, ok := data.(types.LoginParameter)
if !ok {
log.Error("PayloadFunc received unexpected type", "type", fmt.Sprintf("%T", data))
return jwt.MapClaims{}
}
return jwt.MapClaims{
types.PublicKey: v.PublicKey,
types.ProverName: v.Message.ProverName,
types.ProverVersion: v.Message.ProverVersion,
types.ProverProviderTypeKey: v.Message.ProverProviderType,
SignatureKey: v.Signature,
ProverTypesKey: v.Message.ProverTypes,
}
}
// IdentityHandler replies to client for /login
func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
claims := jwt.ExtractClaims(c)
loginParam := &types.LoginParameter{}
if proverName, ok := claims[types.ProverName]; ok {
loginParam.Message.ProverName, _ = proverName.(string)
}
if proverVersion, ok := claims[types.ProverVersion]; ok {
loginParam.Message.ProverVersion, _ = proverVersion.(string)
}
if providerType, ok := claims[types.ProverProviderTypeKey]; ok {
num, _ := providerType.(float64)
loginParam.Message.ProverProviderType = types.ProverProviderType(num)
}
if signature, ok := claims[SignatureKey]; ok {
loginParam.Signature, _ = signature.(string)
}
if proverTypes, ok := claims[ProverTypesKey]; ok {
arr, _ := proverTypes.([]any)
for _, elm := range arr {
num, _ := elm.(float64)
loginParam.Message.ProverTypes = append(loginParam.Message.ProverTypes, types.ProverType(num))
}
}
if publicKey, ok := claims[types.PublicKey]; ok {
loginParam.PublicKey, _ = publicKey.(string)
}
if loginParam.PublicKey != "" {
c.Set(LoginParamCache, loginParam)
c.Set(types.ProverName, loginParam.Message.ProverName)
// publickey will also be set since we have specified public_key as identical key
return loginParam.PublicKey
}
return nil
}

View File

@@ -0,0 +1,246 @@
//nolint:errcheck,bodyclose // body is closed in the following handleHttpResp call
package proxy
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
ctypes "scroll-tech/common/types"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/types"
)
type ProxyCli interface {
Login(ctx context.Context, genLogin func(string) (*types.LoginParameter, error)) (*ctypes.Response, error)
ProxyLogin(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error)
Token() string
Reset()
}
type ProverCli interface {
GetTask(ctx context.Context, param *types.GetTaskParameter) (*ctypes.Response, error)
SubmitProof(ctx context.Context, param *types.SubmitProofParameter) (*ctypes.Response, error)
}
// Client wraps an http client with a preset host for coordinator API calls
type upClient struct {
httpClient *http.Client
baseURL string
loginToken string
compatibileMode bool
resetFromMgr func()
}
// NewClient creates a new Client with the specified host
func newUpClient(cfg *config.UpStream) *upClient {
return &upClient{
httpClient: &http.Client{
Timeout: time.Duration(cfg.ConnectionTimeoutSec) * time.Second,
},
baseURL: cfg.BaseUrl,
compatibileMode: cfg.CompatibileMode,
}
}
func (c *upClient) Reset() {
if c.resetFromMgr != nil {
c.resetFromMgr()
}
}
func (c *upClient) Token() string {
return c.loginToken
}
// need a parsable schema definition
type loginSchema struct {
Time string `json:"time"`
Token string `json:"token"`
}
// Login performs the complete login process: get challenge then login
func (c *upClient) Login(ctx context.Context, genLogin func(string) (*types.LoginParameter, error)) (*ctypes.Response, error) {
// Step 1: Get challenge
url := fmt.Sprintf("%s/coordinator/v1/challenge", c.baseURL)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, fmt.Errorf("failed to create challenge request: %w", err)
}
challengeResp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to get challenge: %w", err)
}
parsedResp, err := handleHttpResp(challengeResp)
if err != nil {
return nil, err
} else if parsedResp.ErrCode != 0 {
return nil, fmt.Errorf("challenge failed: %d (%s)", parsedResp.ErrCode, parsedResp.ErrMsg)
}
// Ste p2: Parse challenge response
var challengeSchema loginSchema
if err := parsedResp.DecodeData(&challengeSchema); err != nil {
return nil, fmt.Errorf("failed to parse challenge response: %w", err)
}
// Step 3: Use the token from challenge as Bearer token for login
url = fmt.Sprintf("%s/coordinator/v1/login", c.baseURL)
param, err := genLogin(challengeSchema.Token)
if err != nil {
return nil, fmt.Errorf("failed to setup login parameter: %w", err)
}
jsonData, err := json.Marshal(param)
if err != nil {
return nil, fmt.Errorf("failed to marshal login parameter: %w", err)
}
req, err = http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create login request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+challengeSchema.Token)
loginResp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to perform login request: %w", err)
}
return handleHttpResp(loginResp)
}
func handleHttpResp(resp *http.Response) (*ctypes.Response, error) {
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusUnauthorized {
defer resp.Body.Close()
var respWithData ctypes.Response
// Note: Body is consumed after decoding, caller should not read it again
if err := json.NewDecoder(resp.Body).Decode(&respWithData); err == nil {
return &respWithData, nil
} else {
return nil, fmt.Errorf("login parsing expected response failed: %v", err)
}
}
return nil, fmt.Errorf("login request failed with status: %d", resp.StatusCode)
}
func (c *upClient) proxyLoginCompatibleMode(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error) {
mimePrivK, err := buildPrivateKey([]byte(param.PublicKey))
if err != nil {
return nil, err
}
mimePkHex := common.Bytes2Hex(crypto.CompressPubkey(&mimePrivK.PublicKey))
genLoginParam := func(challenge string) (*types.LoginParameter, error) {
// Create login parameter with proxy settings
loginParam := &types.LoginParameter{
Message: param.Message,
PublicKey: mimePkHex,
}
loginParam.Message.Challenge = challenge
// Sign the message with the private key
if err := loginParam.SignWithKey(mimePrivK); err != nil {
return nil, fmt.Errorf("failed to sign login parameter: %w", err)
}
return loginParam, nil
}
return c.Login(ctx, genLoginParam)
}
// ProxyLogin makes a POST request to /v1/proxy_login with LoginParameter
func (c *upClient) ProxyLogin(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error) {
if c.compatibileMode {
return c.proxyLoginCompatibleMode(ctx, param)
}
url := fmt.Sprintf("%s/coordinator/v1/proxy_login", c.baseURL)
jsonData, err := json.Marshal(param)
if err != nil {
return nil, fmt.Errorf("failed to marshal proxy login parameter: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create proxy login request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+c.loginToken)
proxyLoginResp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to perform proxy login request: %w", err)
}
return handleHttpResp(proxyLoginResp)
}
// GetTask makes a POST request to /v1/get_task with GetTaskParameter
func (c *upClient) GetTask(ctx context.Context, param *types.GetTaskParameter) (*ctypes.Response, error) {
url := fmt.Sprintf("%s/coordinator/v1/get_task", c.baseURL)
jsonData, err := json.Marshal(param)
if err != nil {
return nil, fmt.Errorf("failed to marshal get task parameter: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create get task request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
if c.loginToken != "" {
req.Header.Set("Authorization", "Bearer "+c.loginToken)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
return handleHttpResp(resp)
}
// SubmitProof makes a POST request to /v1/submit_proof with SubmitProofParameter
func (c *upClient) SubmitProof(ctx context.Context, param *types.SubmitProofParameter) (*ctypes.Response, error) {
url := fmt.Sprintf("%s/coordinator/v1/submit_proof", c.baseURL)
jsonData, err := json.Marshal(param)
if err != nil {
return nil, fmt.Errorf("failed to marshal submit proof parameter: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create submit proof request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
if c.loginToken != "" {
req.Header.Set("Authorization", "Bearer "+c.loginToken)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
return handleHttpResp(resp)
}

View File

@@ -0,0 +1,220 @@
package proxy
import (
"context"
"crypto/ecdsa"
"fmt"
"sync"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/types"
)
type Client interface {
// a client to access upstream coordinator with specified identity
// so prover can contact with coordinator as itself
Client(string) ProverCli
// the client to access upstream as proxy itself
ClientAsProxy(context.Context) ProxyCli
Name() string
}
type ClientManager struct {
name string
cliCfg *config.ProxyClient
cfg *config.UpStream
privKey *ecdsa.PrivateKey
cachedCli struct {
sync.RWMutex
cli *upClient
completionCtx context.Context
}
}
// transformToValidPrivateKey safely transforms arbitrary bytes into valid private key bytes
func buildPrivateKey(inputBytes []byte) (*ecdsa.PrivateKey, error) {
// Try appending bytes from 0x0 to 0x20 until we get a valid private key
for appendByte := byte(0x0); appendByte <= 0x20; appendByte++ {
// Append the byte to input
extendedBytes := append(inputBytes, appendByte)
// Calculate 256-bit hash
hash := crypto.Keccak256(extendedBytes)
// Try to create private key from hash
if k, err := crypto.ToECDSA(hash); err == nil {
return k, nil
}
}
return nil, fmt.Errorf("failed to generate valid private key from input bytes")
}
func NewClientManager(name string, cliCfg *config.ProxyClient, cfg *config.UpStream) (*ClientManager, error) {
log.Info("init client", "name", name, "upcfg", cfg.BaseUrl, "compatible mode", cfg.CompatibileMode)
privKey, err := buildPrivateKey([]byte(cliCfg.Secret))
if err != nil {
return nil, err
}
return &ClientManager{
name: name,
privKey: privKey,
cfg: cfg,
cliCfg: cliCfg,
}, nil
}
type ctxKeyType string
const loginCliKey ctxKeyType = "cli"
func (cliMgr *ClientManager) doLogin(ctx context.Context, loginCli *upClient) {
if cliMgr.cfg.CompatibileMode {
loginCli.loginToken = "dummy"
log.Info("Skip login process for compatible mode")
return
}
// Calculate wait time between 2 seconds and cfg.RetryWaitTime
minWait := 2 * time.Second
waitDuration := time.Duration(cliMgr.cfg.RetryWaitTime) * time.Second
if waitDuration < minWait {
waitDuration = minWait
}
for {
log.Info("proxy attempting login to upstream coordinator", "name", cliMgr.name)
loginResp, err := loginCli.Login(ctx, cliMgr.genLoginParam)
if err == nil && loginResp.ErrCode == 0 {
var loginResult loginSchema
err = loginResp.DecodeData(&loginResult)
if err != nil {
log.Error("login parsing data fail", "error", err)
} else {
loginCli.loginToken = loginResult.Token
log.Info("login to upstream coordinator successful", "name", cliMgr.name, "time", loginResult.Time)
// TODO: we need to parse time if we start making use of it
return
}
} else if err != nil {
log.Error("login process fail", "error", err)
} else {
log.Error("login get fail resp", "code", loginResp.ErrCode, "msg", loginResp.ErrMsg)
}
log.Info("login to upstream coordinator failed, retrying", "name", cliMgr.name, "error", err, "waitDuration", waitDuration)
timer := time.NewTimer(waitDuration)
select {
case <-ctx.Done():
timer.Stop()
return
case <-timer.C:
// Continue to next retry
}
}
}
func (cliMgr *ClientManager) Name() string {
return cliMgr.name
}
func (cliMgr *ClientManager) Client(token string) ProverCli {
loginCli := newUpClient(cliMgr.cfg)
loginCli.loginToken = token
return loginCli
}
func (cliMgr *ClientManager) ClientAsProxy(ctx context.Context) ProxyCli {
cliMgr.cachedCli.RLock()
if cliMgr.cachedCli.cli != nil {
defer cliMgr.cachedCli.RUnlock()
return cliMgr.cachedCli.cli
}
cliMgr.cachedCli.RUnlock()
cliMgr.cachedCli.Lock()
if cliMgr.cachedCli.cli != nil {
defer cliMgr.cachedCli.Unlock()
return cliMgr.cachedCli.cli
}
var completionCtx context.Context
// Check if completion context is set
if cliMgr.cachedCli.completionCtx != nil {
completionCtx = cliMgr.cachedCli.completionCtx
} else {
// Set new completion context and launch login goroutine
ctx, completionDone := context.WithCancel(context.TODO())
loginCli := newUpClient(cliMgr.cfg)
loginCli.resetFromMgr = func() {
cliMgr.cachedCli.Lock()
if cliMgr.cachedCli.cli == loginCli {
log.Info("cached client cleared", "name", cliMgr.name)
cliMgr.cachedCli.cli = nil
}
cliMgr.cachedCli.Unlock()
}
completionCtx = context.WithValue(ctx, loginCliKey, loginCli)
cliMgr.cachedCli.completionCtx = completionCtx
// Launch keep-login goroutine
go func() {
defer completionDone()
cliMgr.doLogin(context.Background(), loginCli)
cliMgr.cachedCli.Lock()
cliMgr.cachedCli.cli = loginCli
cliMgr.cachedCli.completionCtx = nil
cliMgr.cachedCli.Unlock()
}()
}
cliMgr.cachedCli.Unlock()
// Wait for completion or request cancellation
select {
case <-ctx.Done():
return nil
case <-completionCtx.Done():
cli := completionCtx.Value(loginCliKey).(*upClient)
return cli
}
}
func (cliMgr *ClientManager) genLoginParam(challenge string) (*types.LoginParameter, error) {
// Generate public key string
publicKeyHex := common.Bytes2Hex(crypto.CompressPubkey(&cliMgr.privKey.PublicKey))
// Create login parameter with proxy settings
loginParam := &types.LoginParameter{
Message: types.Message{
Challenge: challenge,
ProverName: cliMgr.cliCfg.ProxyName,
ProverVersion: version.Version,
ProverProviderType: types.ProverProviderTypeProxy,
ProverTypes: []types.ProverType{}, // Default empty
VKs: []string{}, // Default empty
},
PublicKey: publicKeyHex,
}
// Sign the message with the private key
if err := loginParam.SignWithKey(cliMgr.privKey); err != nil {
return nil, fmt.Errorf("failed to sign login parameter: %w", err)
}
return loginParam, nil
}

View File

@@ -0,0 +1,44 @@
package proxy
import (
"github.com/prometheus/client_golang/prometheus"
"gorm.io/gorm"
"scroll-tech/coordinator/internal/config"
)
var (
// GetTask the prover task controller
GetTask *GetTaskController
// SubmitProof the submit proof controller
SubmitProof *SubmitProofController
// Auth the auth controller
Auth *AuthController
)
// Clients manager a series of thread-safe clients for requesting upstream
// coordinators
type Clients map[string]Client
// InitController inits Controller with database
func InitController(cfg *config.ProxyConfig, db *gorm.DB, reg prometheus.Registerer) {
// normalize cfg
cfg.ProxyManager.Normalize()
clients := make(map[string]Client)
for nm, upCfg := range cfg.Coordinators {
cli, err := NewClientManager(nm, cfg.ProxyManager.Client, upCfg)
if err != nil {
panic("create new client fail")
}
clients[cli.Name()] = cli
}
proverManager := NewProverManagerWithPersistent(100, db)
priorityManager := NewPriorityUpstreamManagerPersistent(db)
Auth = NewAuthController(cfg, clients, proverManager)
GetTask = NewGetTaskController(cfg, clients, proverManager, priorityManager, reg)
SubmitProof = NewSubmitProofController(cfg, clients, proverManager, priorityManager, reg)
}

View File

@@ -0,0 +1,229 @@
package proxy
import (
"fmt"
"math/rand"
"sync"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/coordinator/internal/config"
coordinatorType "scroll-tech/coordinator/internal/types"
)
func getSessionData(ctx *gin.Context) (string, string) {
publicKeyData, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
publicKey, castOk := publicKeyData.(string)
if !publicKeyExist || !castOk {
nerr := fmt.Errorf("no public key binding: %v", publicKeyData)
log.Warn("get_task parameter fail", "error", nerr)
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
return "", ""
}
publicNameData, publicNameExist := ctx.Get(coordinatorType.ProverName)
publicName, castOk := publicNameData.(string)
if !publicNameExist || !castOk {
log.Error("no public name binding for unknown reason, but we still forward with name = 'unknown'", "data", publicNameData)
publicName = "unknown"
}
return publicKey, publicName
}
// PriorityUpstreamManager manages priority upstream mappings with thread safety
type PriorityUpstreamManager struct {
sync.RWMutex
*proverPriorityPersist
data map[string]string
}
// NewPriorityUpstreamManager creates a new PriorityUpstreamManager
func NewPriorityUpstreamManager() *PriorityUpstreamManager {
return &PriorityUpstreamManager{
data: make(map[string]string),
}
}
// NewPriorityUpstreamManager creates a new PriorityUpstreamManager
func NewPriorityUpstreamManagerPersistent(db *gorm.DB) *PriorityUpstreamManager {
return &PriorityUpstreamManager{
data: make(map[string]string),
proverPriorityPersist: NewProverPriorityPersist(db),
}
}
// Get retrieves the priority upstream for a given key
func (p *PriorityUpstreamManager) Get(key string) (string, bool) {
p.RLock()
value, exists := p.data[key]
p.RUnlock()
if !exists {
if v, err := p.proverPriorityPersist.Get(key); err != nil {
log.Error("persistent priority record read failure", "error", err, "key", key)
} else if v != "" {
log.Debug("restore record from persistent layer", "key", key, "value", v)
return v, true
}
}
return value, exists
}
// Set sets the priority upstream for a given key
func (p *PriorityUpstreamManager) Set(key, value string) {
defer func() {
if err := p.proverPriorityPersist.Update(key, value); err != nil {
log.Error("update priority record failure", "error", err, "key", key, "value", value)
}
}()
p.Lock()
defer p.Unlock()
p.data[key] = value
}
// Delete removes the priority upstream for a given key
func (p *PriorityUpstreamManager) Delete(key string) {
defer func() {
if err := p.proverPriorityPersist.Del(key); err != nil {
log.Error("delete priority record failure", "error", err, "key", key)
}
}()
p.Lock()
defer p.Unlock()
delete(p.data, key)
}
// GetTaskController the get prover task api controller
type GetTaskController struct {
proverMgr *ProverManager
clients Clients
priorityUpstream *PriorityUpstreamManager
//workingRnd *rand.Rand
//getTaskAccessCounter *prometheus.CounterVec
}
// NewGetTaskController create a get prover task controller
func NewGetTaskController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager, priorityMgr *PriorityUpstreamManager, reg prometheus.Registerer) *GetTaskController {
// TODO: implement proxy get task controller initialization
return &GetTaskController{
priorityUpstream: priorityMgr,
proverMgr: proverMgr,
clients: clients,
}
}
// func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error {
// // TODO: implement proxy get task access counter
// return nil
// }
// GetTasks get assigned chunk/batch task
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
var getTaskParameter coordinatorType.GetTaskParameter
if err := ctx.ShouldBind(&getTaskParameter); err != nil {
nerr := fmt.Errorf("prover task parameter invalid, err:%w", err)
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
return
}
publicKey, proverName := getSessionData(ctx)
if publicKey == "" {
return
}
session := ptc.proverMgr.Get(publicKey)
if session == nil {
nerr := fmt.Errorf("can not get session for prover %s", proverName)
types.RenderFailure(ctx, types.InternalServerError, nerr)
return
}
getTask := func(cli Client) (error, int) {
log.Debug("Start get task", "up", cli.Name(), "cli", proverName)
upStream := cli.Name()
resp, err := session.GetTask(ctx, &getTaskParameter, cli)
if err != nil {
log.Error("Upstream error for get task", "error", err, "up", upStream, "cli", proverName)
return err, types.ErrCoordinatorGetTaskFailure
} else if resp.ErrCode != types.ErrCoordinatorEmptyProofData {
if resp.ErrCode != 0 {
// simply dispatch the error from upstream to prover
log.Error("Upstream has error resp for get task", "code", resp.ErrCode, "msg", resp.ErrMsg, "up", upStream, "cli", proverName)
return fmt.Errorf("upstream failure %s:", resp.ErrMsg), resp.ErrCode
}
var task coordinatorType.GetTaskSchema
if err = resp.DecodeData(&task); err == nil {
task.TaskID = formUpstreamWithTaskName(upStream, task.TaskID)
ptc.priorityUpstream.Set(publicKey, upStream)
log.Debug("Upstream get task", "up", upStream, "cli", proverName, "taskID", task.TaskID, "taskType", task.TaskType)
types.RenderSuccess(ctx, &task)
return nil, 0
} else {
log.Error("Upstream has wrong data for get task", "error", err, "up", upStream, "cli", proverName)
return fmt.Errorf("decode task fail: %v", err), types.InternalServerError
}
}
return nil, resp.ErrCode
}
// if the priority upstream is set, we try this upstream first until get the task resp or no task resp
priorityUpstream, exist := ptc.priorityUpstream.Get(publicKey)
if exist {
cli := ptc.clients[priorityUpstream]
log.Debug("Try get task from priority stream", "up", priorityUpstream, "cli", proverName)
if cli != nil {
err, code := getTask(cli)
if err != nil {
types.RenderFailure(ctx, code, err)
return
} else if code == 0 {
// get task done and rendered, return
return
}
// only continue if get empty task (the task has been removed in upstream)
log.Debug("can not get priority task from upstream", "up", priorityUpstream, "cli", proverName)
} else {
log.Warn("A upstream is removed or lost for some reason while running", "up", priorityUpstream, "cli", proverName)
}
}
ptc.priorityUpstream.Delete(publicKey)
// Create a slice to hold the keys
keys := make([]string, 0, len(ptc.clients))
for k := range ptc.clients {
keys = append(keys, k)
}
// Shuffle the keys using a local RNG (avoid deprecated rand.Seed)
rand.Shuffle(len(keys), func(i, j int) {
keys[i], keys[j] = keys[j], keys[i]
})
// Iterate over the shuffled keys
for _, n := range keys {
if err, code := getTask(ptc.clients[n]); err == nil && code == 0 {
// get task done
return
}
}
log.Debug("get no task from upstream", "cli", proverName)
// if all get task failed, throw empty proof resp
types.RenderFailure(ctx, types.ErrCoordinatorEmptyProofData, fmt.Errorf("get empty prover task"))
}

View File

@@ -0,0 +1,125 @@
package proxy
import (
"time"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"scroll-tech/coordinator/internal/types"
)
type proverDataPersist struct {
db *gorm.DB
}
// NewProverDataPersist creates a persistence instance backed by a gorm DB.
func NewProverDataPersist(db *gorm.DB) *proverDataPersist {
return &proverDataPersist{db: db}
}
// gorm model mapping to table `prover_sessions`
type proverSessionRecord struct {
PublicKey string `gorm:"column:public_key;not null"`
Upstream string `gorm:"column:upstream;not null"`
UpToken string `gorm:"column:up_token;not null"`
Expired time.Time `gorm:"column:expired;not null"`
}
func (proverSessionRecord) TableName() string { return "prover_sessions" }
// priority_upstream model
type priorityUpstreamRecord struct {
PublicKey string `gorm:"column:public_key;not null"`
Upstream string `gorm:"column:upstream;not null"`
}
func (priorityUpstreamRecord) TableName() string { return "priority_upstream" }
// get retrieves ProverSession for a given user key, returns empty if still not exists
func (p *proverDataPersist) Get(userKey string) (*proverSession, error) {
if p == nil || p.db == nil {
return nil, nil
}
var rows []proverSessionRecord
if err := p.db.Where("public_key = ?", userKey).Find(&rows).Error; err != nil || len(rows) == 0 {
return nil, err
}
ret := &proverSession{
proverToken: make(map[string]loginToken),
}
for _, r := range rows {
ls := &types.LoginSchema{
Token: r.UpToken,
Time: r.Expired,
}
ret.proverToken[r.Upstream] = loginToken{LoginSchema: ls}
}
return ret, nil
}
func (p *proverDataPersist) Update(userKey, up string, login *types.LoginSchema) error {
if p == nil || p.db == nil || login == nil {
return nil
}
rec := proverSessionRecord{
PublicKey: userKey,
Upstream: up,
UpToken: login.Token,
Expired: login.Time,
}
return p.db.Clauses(
clause.OnConflict{
Columns: []clause.Column{{Name: "public_key"}, {Name: "upstream"}},
DoUpdates: clause.AssignmentColumns([]string{"up_token", "expired"}),
},
).Create(&rec).Error
}
type proverPriorityPersist struct {
db *gorm.DB
}
func NewProverPriorityPersist(db *gorm.DB) *proverPriorityPersist {
return &proverPriorityPersist{db: db}
}
func (p *proverPriorityPersist) Get(userKey string) (string, error) {
if p == nil || p.db == nil {
return "", nil
}
var rec priorityUpstreamRecord
if err := p.db.Where("public_key = ?", userKey).First(&rec).Error; err != nil {
if err != gorm.ErrRecordNotFound {
return "", err
} else {
return "", nil
}
}
return rec.Upstream, nil
}
func (p *proverPriorityPersist) Update(userKey, up string) error {
if p == nil || p.db == nil {
return nil
}
rec := priorityUpstreamRecord{PublicKey: userKey, Upstream: up}
return p.db.Clauses(
clause.OnConflict{
Columns: []clause.Column{{Name: "public_key"}},
DoUpdates: clause.Assignments(map[string]interface{}{"upstream": up}),
},
).Create(&rec).Error
}
func (p *proverPriorityPersist) Del(userKey string) error {
if p == nil || p.db == nil {
return nil
}
return p.db.Where("public_key = ?", userKey).Delete(&priorityUpstreamRecord{}).Error
}

View File

@@ -0,0 +1,285 @@
package proxy
import (
"context"
"fmt"
"math"
"sync"
"gorm.io/gorm"
"github.com/scroll-tech/go-ethereum/log"
ctypes "scroll-tech/common/types"
"scroll-tech/coordinator/internal/types"
)
type ProverManager struct {
sync.RWMutex
data map[string]*proverSession
willDeprecatedData map[string]*proverSession
sizeLimit int
persistent *proverDataPersist
}
func NewProverManager(size int) *ProverManager {
return &ProverManager{
data: make(map[string]*proverSession),
willDeprecatedData: make(map[string]*proverSession),
sizeLimit: size,
}
}
func NewProverManagerWithPersistent(size int, db *gorm.DB) *ProverManager {
return &ProverManager{
data: make(map[string]*proverSession),
willDeprecatedData: make(map[string]*proverSession),
sizeLimit: size,
persistent: NewProverDataPersist(db),
}
}
// get retrieves ProverSession for a given user key, returns empty if still not exists
func (m *ProverManager) Get(userKey string) (ret *proverSession) {
defer func() {
if ret == nil {
var err error
ret, err = m.persistent.Get(userKey)
if err != nil {
log.Error("Get persistent layer for prover tokens fail", "error", err)
} else if ret != nil {
log.Debug("restore record from persistent", "key", userKey, "token", ret.proverToken)
ret.persistent = m.persistent
}
}
if ret != nil {
m.Lock()
m.data[userKey] = ret
m.Unlock()
}
}()
m.RLock()
defer m.RUnlock()
if r, existed := m.data[userKey]; existed {
return r
} else {
return m.willDeprecatedData[userKey]
}
}
func (m *ProverManager) GetOrCreate(userKey string) *proverSession {
if ret := m.Get(userKey); ret != nil {
return ret
}
m.Lock()
defer m.Unlock()
ret := &proverSession{
proverToken: make(map[string]loginToken),
persistent: m.persistent,
}
if len(m.data) >= m.sizeLimit {
m.willDeprecatedData = m.data
m.data = make(map[string]*proverSession)
}
m.data[userKey] = ret
return ret
}
type loginToken struct {
*types.LoginSchema
phase uint
}
// Client wraps an http client with a preset host for coordinator API calls
type proverSession struct {
persistent *proverDataPersist
sync.RWMutex
proverToken map[string]loginToken
completionCtx context.Context
}
func (c *proverSession) maintainLogin(ctx context.Context, cliMgr Client, up string, param *types.LoginParameter, phase uint) (result loginToken, nerr error) {
c.Lock()
curPhase := c.proverToken[up].phase
if c.completionCtx != nil {
waitctx := c.completionCtx
c.Unlock()
select {
case <-waitctx.Done():
return c.maintainLogin(ctx, cliMgr, up, param, phase)
case <-ctx.Done():
nerr = fmt.Errorf("ctx fail")
return
}
}
if phase < curPhase {
// outdate login phase, give up
log.Debug("drop outdated proxy login attempt", "upstream", up, "cli", param.Message.ProverName, "phase", phase, "now", curPhase)
defer c.Unlock()
return c.proverToken[up], nil
}
// occupy the update slot
completeCtx, cf := context.WithCancel(ctx)
defer cf()
c.completionCtx = completeCtx
defer func() {
c.Lock()
c.completionCtx = nil
if result.LoginSchema != nil {
c.proverToken[up] = result
log.Info("maintain login status", "upstream", up, "cli", param.Message.ProverName, "phase", curPhase+1)
}
c.Unlock()
if nerr != nil {
log.Error("maintain login fail", "error", nerr, "upstream", up, "cli", param.Message.ProverName, "phase", curPhase)
}
}()
c.Unlock()
log.Debug("start proxy login process", "upstream", up, "cli", param.Message.ProverName)
cli := cliMgr.ClientAsProxy(ctx)
if cli == nil {
nerr = fmt.Errorf("get upstream cli fail")
return
}
resp, err := cli.ProxyLogin(ctx, param)
if err != nil {
nerr = fmt.Errorf("proxylogin fail: %v", err)
return
}
if resp.ErrCode == ctypes.ErrJWTTokenExpired {
log.Info("up stream has expired, renew upstream connection", "up", up)
cli.Reset()
cli = cliMgr.ClientAsProxy(ctx)
if cli == nil {
nerr = fmt.Errorf("get upstream cli fail (secondary try)")
return
}
// like SDK, we would try one more time if the upstream token is expired
resp, err = cli.ProxyLogin(ctx, param)
if err != nil {
nerr = fmt.Errorf("proxylogin fail: %v", err)
return
}
}
if resp.ErrCode != 0 {
nerr = fmt.Errorf("upstream fail: %d (%s)", resp.ErrCode, resp.ErrMsg)
return
}
var loginResult loginSchema
if err := resp.DecodeData(&loginResult); err != nil {
nerr = err
return
}
log.Debug("Proxy login done", "upstream", up, "cli", param.Message.ProverName)
result = loginToken{
LoginSchema: &types.LoginSchema{
Token: loginResult.Token,
},
phase: curPhase + 1,
}
return
}
// const expireTolerant = 10 * time.Minute
// ProxyLogin makes a POST request to /v1/proxy_login with LoginParameter
func (c *proverSession) ProxyLogin(ctx context.Context, cli Client, param *types.LoginParameter) error {
up := cli.Name()
c.RLock()
existedToken := c.proverToken[up]
c.RUnlock()
newtoken, err := c.maintainLogin(ctx, cli, up, param, math.MaxUint)
if newtoken.phase > existedToken.phase {
if err := c.persistent.Update(param.PublicKey, up, newtoken.LoginSchema); err != nil {
log.Error("Update persistent layer for prover tokens fail", "error", err)
}
}
return err
}
// GetTask makes a POST request to /v1/get_task with GetTaskParameter
func (c *proverSession) GetTask(ctx context.Context, param *types.GetTaskParameter, cliMgr Client) (*ctypes.Response, error) {
up := cliMgr.Name()
c.RLock()
log.Debug("call get task", "up", up, "tokens", c.proverToken)
token := c.proverToken[up]
c.RUnlock()
if token.LoginSchema != nil {
resp, err := cliMgr.Client(token.Token).GetTask(ctx, param)
if err != nil {
return nil, err
}
if resp.ErrCode != ctypes.ErrJWTTokenExpired {
return resp, nil
}
}
// like SDK, we would try one more time if the upstream token is expired
// get param from ctx
loginParam, ok := ctx.Value(LoginParamCache).(*types.LoginParameter)
if !ok {
return nil, fmt.Errorf("Unexpected error, no loginparam ctx value")
}
newToken, err := c.maintainLogin(ctx, cliMgr, up, loginParam, token.phase)
if err != nil {
return nil, fmt.Errorf("update prover token fail: %v", err)
}
return cliMgr.Client(newToken.Token).GetTask(ctx, param)
}
// SubmitProof makes a POST request to /v1/submit_proof with SubmitProofParameter
func (c *proverSession) SubmitProof(ctx context.Context, param *types.SubmitProofParameter, cliMgr Client) (*ctypes.Response, error) {
up := cliMgr.Name()
c.RLock()
token := c.proverToken[up]
c.RUnlock()
if token.LoginSchema != nil {
resp, err := cliMgr.Client(token.Token).SubmitProof(ctx, param)
if err != nil {
return nil, err
}
if resp.ErrCode != ctypes.ErrJWTTokenExpired {
return resp, nil
}
}
// like SDK, we would try one more time if the upstream token is expired
// get param from ctx
loginParam, ok := ctx.Value(LoginParamCache).(*types.LoginParameter)
if !ok {
return nil, fmt.Errorf("Unexpected error, no loginparam ctx value")
}
newToken, err := c.maintainLogin(ctx, cliMgr, up, loginParam, token.phase)
if err != nil {
return nil, fmt.Errorf("update prover token fail: %v", err)
}
return cliMgr.Client(newToken.Token).SubmitProof(ctx, param)
}

View File

@@ -0,0 +1,107 @@
package proxy
import (
"testing"
)
// TestProverManagerGetAndCreate validates basic creation and retrieval semantics.
func TestProverManagerGetAndCreate(t *testing.T) {
pm := NewProverManager(2)
if got := pm.Get("user1"); got != nil {
t.Fatalf("expected nil for non-existent key, got: %+v", got)
}
sess1 := pm.GetOrCreate("user1")
if sess1 == nil {
t.Fatalf("expected non-nil session from GetOrCreate")
}
// Should be stable on subsequent Get
if got := pm.Get("user1"); got != sess1 {
t.Fatalf("expected same session pointer on Get, got different instance: %p vs %p", got, sess1)
}
}
// TestProverManagerRolloverAndPromotion verifies rollover when sizeLimit is reached
// and that old entries are accessible and promoted back to active data map.
func TestProverManagerRolloverAndPromotion(t *testing.T) {
pm := NewProverManager(2)
s1 := pm.GetOrCreate("u1")
s2 := pm.GetOrCreate("u2")
if s1 == nil || s2 == nil {
t.Fatalf("expected sessions to be created for u1/u2")
}
// Precondition: data should contain 2 entries, no deprecated yet.
pm.RLock()
if len(pm.data) != 2 {
pm.RUnlock()
t.Fatalf("expected data len=2 before rollover, got %d", len(pm.data))
}
if len(pm.willDeprecatedData) != 0 {
pm.RUnlock()
t.Fatalf("expected willDeprecatedData len=0 before rollover, got %d", len(pm.willDeprecatedData))
}
pm.RUnlock()
// Trigger rollover by creating a third key.
s3 := pm.GetOrCreate("u3")
if s3 == nil {
t.Fatalf("expected session for u3 after rollover")
}
// After rollover: current data should only have u3, deprecated should hold u1 and u2.
pm.RLock()
if len(pm.data) != 1 {
pm.RUnlock()
t.Fatalf("expected data len=1 after rollover (only u3), got %d", len(pm.data))
}
if _, ok := pm.data["u3"]; !ok {
pm.RUnlock()
t.Fatalf("expected 'u3' to be in active data after rollover")
}
if len(pm.willDeprecatedData) != 2 {
pm.RUnlock()
t.Fatalf("expected willDeprecatedData len=2 after rollover, got %d", len(pm.willDeprecatedData))
}
pm.RUnlock()
// Accessing an old key should return the same pointer and promote it to active data map.
got1 := pm.Get("u1")
if got1 != s1 {
t.Fatalf("expected same pointer for u1 after promotion, got %p want %p", got1, s1)
}
// The promotion should add it to active data (without enforcing size limit on promotion).
pm.RLock()
if _, ok := pm.data["u1"]; !ok {
pm.RUnlock()
t.Fatalf("expected 'u1' to be present in active data after promotion")
}
if len(pm.data) != 2 {
// Now should contain u3 and u1
pm.RUnlock()
t.Fatalf("expected data len=2 after promotion of u1, got %d", len(pm.data))
}
pm.RUnlock()
// Access the other deprecated key and ensure behavior is consistent.
got2 := pm.Get("u2")
if got2 != s2 {
t.Fatalf("expected same pointer for u2 after promotion, got %p want %p", got2, s2)
}
pm.RLock()
if _, ok := pm.data["u2"]; !ok {
pm.RUnlock()
t.Fatalf("expected 'u2' to be present in active data after promotion")
}
// Note: promotion does not enforce sizeLimit, so data can grow beyond sizeLimit after promotions.
if len(pm.data) != 3 {
pm.RUnlock()
t.Fatalf("expected data len=3 after promoting both u1 and u2, got %d", len(pm.data))
}
pm.RUnlock()
}

View File

@@ -0,0 +1,94 @@
package proxy
import (
"fmt"
"strings"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types"
"scroll-tech/coordinator/internal/config"
coordinatorType "scroll-tech/coordinator/internal/types"
)
// SubmitProofController the submit proof api controller
type SubmitProofController struct {
proverMgr *ProverManager
clients Clients
priorityUpstream *PriorityUpstreamManager
}
// NewSubmitProofController create the submit proof api controller instance
func NewSubmitProofController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager, priorityMgr *PriorityUpstreamManager, reg prometheus.Registerer) *SubmitProofController {
return &SubmitProofController{
proverMgr: proverMgr,
clients: clients,
priorityUpstream: priorityMgr,
}
}
func upstreamFromTaskName(taskID string) (string, string) {
parts, rest, found := strings.Cut(taskID, ":")
if found {
return parts, rest
}
return "", parts
}
func formUpstreamWithTaskName(upstream string, taskID string) string {
return fmt.Sprintf("%s:%s", upstream, taskID)
}
// SubmitProof prover submit the proof to coordinator
func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
var submitParameter coordinatorType.SubmitProofParameter
if err := ctx.ShouldBind(&submitParameter); err != nil {
nerr := fmt.Errorf("prover submitProof parameter invalid, err:%w", err)
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
return
}
publicKey, proverName := getSessionData(ctx)
if publicKey == "" {
return
}
session := spc.proverMgr.Get(publicKey)
if session == nil {
nerr := fmt.Errorf("can not get session for prover %s", proverName)
types.RenderFailure(ctx, types.InternalServerError, nerr)
return
}
upstream, realTaskID := upstreamFromTaskName(submitParameter.TaskID)
cli, existed := spc.clients[upstream]
if !existed {
log.Warn("A upstream for submitting is removed or lost for some reason while running", "up", upstream)
nerr := fmt.Errorf("Invalid upstream name (%s) from taskID %s", upstream, submitParameter.TaskID)
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
return
}
log.Debug("Start submitting", "up", upstream, "cli", proverName, "id", realTaskID, "status", submitParameter.Status)
submitParameter.TaskID = realTaskID
resp, err := session.SubmitProof(ctx, &submitParameter, cli)
if err != nil {
log.Error("Upstream has error resp for submit", "error", err, "up", upstream, "cli", proverName, "taskID", realTaskID)
types.RenderFailure(ctx, types.ErrCoordinatorGetTaskFailure, err)
return
} else if resp.ErrCode != 0 {
log.Error("Upstream has error resp for get task", "code", resp.ErrCode, "msg", resp.ErrMsg, "up", upstream, "cli", proverName, "taskID", realTaskID)
// simply dispatch the error from upstream to prover
types.RenderFailure(ctx, resp.ErrCode, fmt.Errorf("%s", resp.ErrMsg))
return
} else {
log.Debug("Submit proof to upstream", "up", upstream, "cli", proverName, "taskID", realTaskID)
spc.priorityUpstream.Delete(publicKey)
types.RenderSuccess(ctx, resp.Data)
return
}
}

View File

@@ -1,6 +1,7 @@
package auth
import (
"context"
"errors"
"fmt"
"strings"
@@ -19,45 +20,72 @@ import (
// LoginLogic the auth logic
type LoginLogic struct {
cfg *config.Config
challengeOrm *orm.Challenge
cfg *config.VerifierConfig
deduplicator ChallengeDeduplicator
openVmVks map[string]struct{}
proverVersionHardForkMap map[string]string
}
type ChallengeDeduplicator interface {
InsertChallenge(ctx context.Context, challengeString string) error
}
type SimpleDeduplicator struct {
}
func (s *SimpleDeduplicator) InsertChallenge(ctx context.Context, challengeString string) error {
return nil
}
// NewLoginLogicWithSimpleDEduplicator new a LoginLogic, do not use db to deduplicate challenge
func NewLoginLogicWithSimpleDeduplicator(vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
return newLoginLogic(&SimpleDeduplicator{}, vcfg, vf)
}
// NewLoginLogic new a LoginLogic
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
func NewLoginLogic(db *gorm.DB, vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
return newLoginLogic(orm.NewChallenge(db), vcfg, vf)
}
func newLoginLogic(deduplicator ChallengeDeduplicator, vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
proverVersionHardForkMap := make(map[string]string)
for _, cfg := range cfg.ProverManager.Verifier.Verifiers {
for _, cfg := range vcfg.Verifiers {
proverVersionHardForkMap[cfg.ForkName] = cfg.MinProverVersion
}
return &LoginLogic{
cfg: cfg,
cfg: vcfg,
openVmVks: vf.OpenVMVkMap,
challengeOrm: orm.NewChallenge(db),
deduplicator: deduplicator,
proverVersionHardForkMap: proverVersionHardForkMap,
}
}
// InsertChallengeString insert and check the challenge string is existed
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
return l.challengeOrm.InsertChallenge(ctx.Copy(), challenge)
}
func (l *LoginLogic) Check(login *types.LoginParameter) error {
// Verify the completeness of login message
func VerifyMsg(login *types.LoginParameter) error {
verify, err := login.Verify()
if err != nil || !verify {
log.Error("auth message verify failure", "prover_name", login.Message.ProverName,
"prover_version", login.Message.ProverVersion, "message", login.Message)
return errors.New("auth message verify failure")
}
return nil
}
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.MinProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.MinProverVersion, login.Message.ProverVersion)
// InsertChallengeString insert and check the challenge string is existed
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
return l.deduplicator.InsertChallenge(ctx.Copy(), challenge)
}
// Check if the login client is compatible with the setting in coordinator
func (l *LoginLogic) CompatiblityCheck(login *types.LoginParameter) error {
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.MinProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.MinProverVersion, login.Message.ProverVersion)
}
vks := make(map[string]struct{})
@@ -65,27 +93,32 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
vks[vk] = struct{}{}
}
for _, vk := range login.Message.VKs {
if _, ok := vks[vk]; !ok {
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
"prover_version", login.Message.ProverVersion, "message", login.Message)
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
version.Version, login.Message.ProverVersion)
// new coordinator / proxy do not check vks while login, code only for backward compatibility
if len(vks) != 0 {
for _, vk := range login.Message.VKs {
if _, ok := vks[vk]; !ok {
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
"prover_version", login.Message.ProverVersion, "message", login.Message)
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
version.Version, login.Message.ProverVersion)
}
// if the prover reports a same prover version
return errors.New("incompatible vk. please check your params files or config files")
}
// if the prover reports a same prover version
return errors.New("incompatible vk. please check your params files or config files")
}
}
if login.Message.ProverProviderType != types.ProverProviderTypeInternal && login.Message.ProverProviderType != types.ProverProviderTypeExternal {
switch login.Message.ProverProviderType {
case types.ProverProviderTypeInternal:
case types.ProverProviderTypeExternal:
case types.ProverProviderTypeProxy:
case types.ProverProviderTypeUndefined:
// for backward compatibility, set ProverProviderType as internal
if login.Message.ProverProviderType == types.ProverProviderTypeUndefined {
login.Message.ProverProviderType = types.ProverProviderTypeInternal
} else {
log.Error("invalid prover_provider_type", "value", login.Message.ProverProviderType, "prover name", login.Message.ProverName, "prover version", login.Message.ProverVersion)
return errors.New("invalid prover provider type.")
}
login.Message.ProverProviderType = types.ProverProviderTypeInternal
default:
log.Error("invalid prover_provider_type", "value", login.Message.ProverProviderType, "prover name", login.Message.ProverName, "prover version", login.Message.ProverVersion)
return errors.New("invalid prover provider type.")
}
return nil

View File

@@ -1,20 +1,16 @@
include ../../../../build/common.mk
.PHONY: help fmt clippy test test-ci test-all clean build
all: build
.PHONY: help fmt clippy test test-ci test-all
build:
@cargo build --release -p libzkp-c
@mkdir -p lib
@cp -f ../../../../target/release/$(LIB_ZKP_NAME) lib/
@cp -f ../../../../target/release/libzkp.so lib/
fmt:
@cargo fmt --all -- --check
clean:
@cargo clean --release -p libzkp -p libzkp-c -p l2geth
@rm -f lib/$(LIB_ZKP_NAME)
@rm -f lib/libzkp.so
clippy:
@cargo check --release --all-features

View File

@@ -1,8 +1,9 @@
//go:build !mock_verifier
package libzkp
/*
#cgo linux LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib -Wl,-rpath=${SRCDIR}/lib
#cgo darwin LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib -Wl,-rpath,${SRCDIR}/lib
#cgo LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib -Wl,-rpath=${SRCDIR}/lib
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
#include <stdlib.h>
#include "libzkp.h"
@@ -14,8 +15,6 @@ import (
"os"
"strings"
"unsafe"
"scroll-tech/common/types/message"
)
func init() {
@@ -73,31 +72,6 @@ func VerifyBundleProof(proofData, forkName string) bool {
return result != 0
}
// TaskType enum values matching the Rust enum
const (
TaskTypeChunk = 0
TaskTypeBatch = 1
TaskTypeBundle = 2
)
func fromMessageTaskType(taskType int) int {
switch message.ProofType(taskType) {
case message.ProofTypeChunk:
return TaskTypeChunk
case message.ProofTypeBatch:
return TaskTypeBatch
case message.ProofTypeBundle:
return TaskTypeBundle
default:
panic(fmt.Sprintf("unsupported proof type: %d", taskType))
}
}
// Generate a universal task
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
}
// Generate wrapped proof
func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
cProofJSON := goToCString(proofJSON)

View File

@@ -0,0 +1,57 @@
//go:build mock_verifier
package libzkp
import (
"encoding/json"
)
// // InitVerifier is a no-op in the mock.
// func InitVerifier(configJSON string) {}
// // VerifyChunkProof returns a fixed success in the mock.
// func VerifyChunkProof(proofData, forkName string) bool {
// return true
// }
// // VerifyBatchProof returns a fixed success in the mock.
// func VerifyBatchProof(proofData, forkName string) bool {
// return true
// }
// // VerifyBundleProof returns a fixed success in the mock.
// func VerifyBundleProof(proofData, forkName string) bool {
// return true
// }
func UniversalTaskCompatibilityFix(taskJSON string) (string, error) {
panic("should not run here")
}
// GenerateWrappedProof returns a fixed dummy proof string in the mock.
func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
payload := struct {
Metadata json.RawMessage `json:"metadata"`
Proof json.RawMessage `json:"proof"`
GitVersion string `json:"git_version"`
}{
Metadata: json.RawMessage(metadata),
Proof: json.RawMessage(proofJSON),
GitVersion: "mock-git-version",
}
out, err := json.Marshal(payload)
if err != nil {
panic(err)
}
return string(out)
}
// DumpVk is a no-op and returns nil in the mock.
func DumpVk(forkName, filePath string) error {
return nil
}
// SetDynamicFeature is a no-op in the mock.
func SetDynamicFeature(feats string) {}

View File

@@ -0,0 +1,27 @@
package libzkp
import (
"fmt"
"scroll-tech/common/types/message"
)
// TaskType enum values matching the Rust enum
const (
TaskTypeChunk = 0
TaskTypeBatch = 1
TaskTypeBundle = 2
)
func fromMessageTaskType(taskType int) int {
switch message.ProofType(taskType) {
case message.ProofTypeChunk:
return TaskTypeChunk
case message.ProofTypeBatch:
return TaskTypeBatch
case message.ProofTypeBundle:
return TaskTypeBundle
default:
panic(fmt.Sprintf("unsupported proof type: %d", taskType))
}
}

View File

@@ -5,6 +5,7 @@ package libzkp
import (
"encoding/json"
"fmt"
"strings"
"scroll-tech/common/types/message"
@@ -14,6 +15,10 @@ import (
func InitL2geth(configJSON string) {
}
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
}
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
fmt.Printf("call mocked generate universal task %d, taskJson %s\n", taskType, taskJSON)

View File

@@ -7,7 +7,10 @@ package libzkp
#include "libzkp.h"
*/
import "C" //nolint:typecheck
import "unsafe"
import (
"strings"
"unsafe"
)
// Initialize the handler for universal task
func InitL2geth(configJSON string) {
@@ -17,6 +20,11 @@ func InitL2geth(configJSON string) {
C.init_l2geth(cConfig)
}
// Generate a universal task
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
}
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
cTask := goToCString(taskJSON)
cForkName := goToCString(forkName)

View File

@@ -14,7 +14,7 @@ import (
)
// ChallengeMiddleware jwt challenge middleware
func ChallengeMiddleware(conf *config.Config) *jwt.GinJWTMiddleware {
func ChallengeMiddleware(auth *config.Auth) *jwt.GinJWTMiddleware {
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
Authenticator: func(c *gin.Context) (interface{}, error) {
return nil, nil
@@ -30,8 +30,8 @@ func ChallengeMiddleware(conf *config.Config) *jwt.GinJWTMiddleware {
}
},
Unauthorized: unauthorized,
Key: []byte(conf.Auth.Secret),
Timeout: time.Second * time.Duration(conf.Auth.ChallengeExpireDurationSec),
Key: []byte(auth.Secret),
Timeout: time.Second * time.Duration(auth.ChallengeExpireDurationSec),
TokenLookup: "header: Authorization, query: token, cookie: jwt",
TokenHeadName: "Bearer",
TimeFunc: time.Now,

View File

@@ -4,22 +4,57 @@ import (
"time"
jwt "github.com/appleboy/gin-jwt/v2"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/proxy"
"scroll-tech/coordinator/internal/types"
)
func nonIdendityAuthorizator(data interface{}, _ *gin.Context) bool {
return data != nil
}
// LoginMiddleware jwt auth middleware
func LoginMiddleware(conf *config.Config) *jwt.GinJWTMiddleware {
func LoginMiddleware(auth *config.Auth) *jwt.GinJWTMiddleware {
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
PayloadFunc: api.Auth.PayloadFunc,
IdentityHandler: api.Auth.IdentityHandler,
IdentityKey: types.PublicKey,
Key: []byte(conf.Auth.Secret),
Timeout: time.Second * time.Duration(conf.Auth.LoginExpireDurationSec),
Key: []byte(auth.Secret),
Timeout: time.Second * time.Duration(auth.LoginExpireDurationSec),
Authenticator: api.Auth.Login,
Authorizator: nonIdendityAuthorizator,
Unauthorized: unauthorized,
TokenLookup: "header: Authorization, query: token, cookie: jwt",
TokenHeadName: "Bearer",
TimeFunc: time.Now,
LoginResponse: loginResponse,
})
if err != nil {
log.Crit("new jwt middleware panic", "error", err)
}
if errInit := jwtMiddleware.MiddlewareInit(); errInit != nil {
log.Crit("init jwt middleware panic", "error", errInit)
}
return jwtMiddleware
}
// ProxyLoginMiddleware jwt auth middleware for proxy login
func ProxyLoginMiddleware(auth *config.Auth) *jwt.GinJWTMiddleware {
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
PayloadFunc: proxy.Auth.PayloadFunc,
IdentityHandler: proxy.Auth.IdentityHandler,
IdentityKey: types.PublicKey,
Key: []byte(auth.Secret),
Timeout: time.Second * time.Duration(auth.LoginExpireDurationSec),
Authenticator: proxy.Auth.Login,
Authorizator: nonIdendityAuthorizator,
Unauthorized: unauthorized,
TokenLookup: "header: Authorization, query: token, cookie: jwt",
TokenHeadName: "Bearer",

View File

@@ -28,8 +28,8 @@ func TestMain(m *testing.M) {
defer func() {
if testApps != nil {
testApps.Free()
tearDownEnv(t)
}
tearDownEnv(t)
}()
m.Run()
}

View File

@@ -8,6 +8,7 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/proxy"
"scroll-tech/coordinator/internal/middleware"
)
@@ -25,16 +26,45 @@ func Route(router *gin.Engine, cfg *config.Config, reg prometheus.Registerer) {
func v1(router *gin.RouterGroup, conf *config.Config) {
r := router.Group("/v1")
challengeMiddleware := middleware.ChallengeMiddleware(conf)
challengeMiddleware := middleware.ChallengeMiddleware(conf.Auth)
r.GET("/challenge", challengeMiddleware.LoginHandler)
loginMiddleware := middleware.LoginMiddleware(conf)
loginMiddleware := middleware.LoginMiddleware(conf.Auth)
r.POST("/login", challengeMiddleware.MiddlewareFunc(), loginMiddleware.LoginHandler)
// need jwt token api
r.Use(loginMiddleware.MiddlewareFunc())
{
r.POST("/proxy_login", loginMiddleware.LoginHandler)
r.POST("/get_task", api.GetTask.GetTasks)
r.POST("/submit_proof", api.SubmitProof.SubmitProof)
}
}
// Route register route for coordinator
func ProxyRoute(router *gin.Engine, cfg *config.ProxyConfig, reg prometheus.Registerer) {
router.Use(gin.Recovery())
observability.Use(router, "coordinator", reg)
r := router.Group("coordinator")
v1_proxy(r, cfg)
}
func v1_proxy(router *gin.RouterGroup, conf *config.ProxyConfig) {
r := router.Group("/v1")
challengeMiddleware := middleware.ChallengeMiddleware(conf.ProxyManager.Auth)
r.GET("/challenge", challengeMiddleware.LoginHandler)
loginMiddleware := middleware.ProxyLoginMiddleware(conf.ProxyManager.Auth)
r.POST("/login", challengeMiddleware.MiddlewareFunc(), loginMiddleware.LoginHandler)
// need jwt token api
r.Use(loginMiddleware.MiddlewareFunc())
{
r.POST("/get_task", proxy.GetTask.GetTasks)
r.POST("/submit_proof", proxy.SubmitProof.SubmitProof)
}
}

View File

@@ -64,6 +64,8 @@ func (r ProverProviderType) String() string {
return "prover provider type internal"
case ProverProviderTypeExternal:
return "prover provider type external"
case ProverProviderTypeProxy:
return "prover provider type proxy"
default:
return fmt.Sprintf("prover provider type: %d", r)
}
@@ -76,4 +78,6 @@ const (
ProverProviderTypeInternal
// ProverProviderTypeExternal is an external prover provider type
ProverProviderTypeExternal
// ProverProviderTypeProxy is an proxy prover provider type
ProverProviderTypeProxy = 3
)

View File

@@ -0,0 +1,48 @@
package types
import (
"encoding/json"
"reflect"
"testing"
"scroll-tech/common/types"
)
func TestResponseDecodeData_GetTaskSchema(t *testing.T) {
// Arrange: build a dummy payload and wrap it in Response
in := GetTaskSchema{
UUID: "uuid-123",
TaskID: "task-abc",
TaskType: 1,
UseSnark: true,
TaskData: "dummy-data",
HardForkName: "cancun",
}
resp := types.Response{
ErrCode: 0,
ErrMsg: "",
Data: in,
}
// Act: JSON round-trip the Response to simulate real HTTP encoding/decoding
b, err := json.Marshal(resp)
if err != nil {
t.Fatalf("marshal response: %v", err)
}
var decoded types.Response
if err := json.Unmarshal(b, &decoded); err != nil {
t.Fatalf("unmarshal response: %v", err)
}
var out GetTaskSchema
if err := decoded.DecodeData(&out); err != nil {
t.Fatalf("DecodeData error: %v", err)
}
// Assert: structs match after decode
if !reflect.DeepEqual(in, out) {
t.Fatalf("decoded struct mismatch:\nwant: %+v\n got: %+v", in, out)
}
}

View File

@@ -30,12 +30,14 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/cron"
"scroll-tech/coordinator/internal/controller/proxy"
"scroll-tech/coordinator/internal/orm"
"scroll-tech/coordinator/internal/route"
)
var (
conf *config.Config
conf *config.Config
proxyConf *config.ProxyConfig
testApps *testcontainers.TestcontainerApps
@@ -51,6 +53,9 @@ var (
chunk *encoding.Chunk
batch *encoding.Batch
tokenTimeout int
envSet bool
portUsed map[int64]struct{}
)
func TestMain(m *testing.M) {
@@ -63,18 +68,44 @@ func TestMain(m *testing.M) {
}
func randomURL() string {
id, _ := rand.Int(rand.Reader, big.NewInt(2000-1))
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
return randmURLBatch(1)[0]
}
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
var err error
db, err = testApps.GetGormDBClient()
// Generate a batch of random localhost URLs with different ports, similar to randomURL.
func randmURLBatch(n int) []string {
if n <= 0 {
return nil
}
urls := make([]string, 0, n)
if portUsed == nil {
portUsed = make(map[int64]struct{})
}
for len(urls) < n {
id, _ := rand.Int(rand.Reader, big.NewInt(2000-1))
port := 20000 + 2000 + id.Int64()
if _, exist := portUsed[port]; exist {
continue
}
portUsed[port] = struct{}{}
urls = append(urls, fmt.Sprintf("localhost:%d", port))
}
return urls
}
assert.NoError(t, err)
func setupCoordinatorDb(t *testing.T) {
var err error
assert.NotNil(t, db, "setEnv must be called before")
// db, err = testApps.GetGormDBClient()
// assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
}
func launchCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
assert.NotNil(t, db, "db must be set")
tokenTimeout = 60
conf = &config.Config{
@@ -114,6 +145,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
EuclidV2Time: new(uint64),
}, db, nil)
route.Route(router, conf, nil)
t.Log("coordinator server url", coordinatorURL)
srv := &http.Server{
Addr: coordinatorURL,
Handler: router,
@@ -129,7 +161,77 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
return proofCollector, srv
}
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
setupCoordinatorDb(t)
return launchCoordinator(t, proversPerSession, coordinatorURL)
}
func setupProxyDb(t *testing.T) {
assert.NotNil(t, db, "setEnv must be called before")
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetModuleDB(sqlDB, "proxy"))
}
func launchProxy(t *testing.T, proxyURL string, coordinatorURL []string, usePersistent bool) *http.Server {
var err error
assert.NoError(t, err)
coordinators := make(map[string]*config.UpStream)
for i, n := range coordinatorURL {
coordinators[fmt.Sprintf("coordinator_%d", i)] = testProxyUpStreamCfg(n)
}
tokenTimeout = 60
proxyConf = &config.ProxyConfig{
ProxyName: "test_proxy",
ProxyManager: &config.ProxyManager{
Verifier: &config.VerifierConfig{
MinProverVersion: "v4.4.89",
Verifiers: []config.AssetConfig{{
AssetsPath: "",
ForkName: "euclidV2",
}},
},
Client: testProxyClientCfg(),
Auth: &config.Auth{
Secret: "proxy",
ChallengeExpireDurationSec: tokenTimeout,
LoginExpireDurationSec: tokenTimeout,
},
},
Coordinators: coordinators,
}
router := gin.New()
if usePersistent {
proxy.InitController(proxyConf, db, nil)
} else {
proxy.InitController(proxyConf, nil, nil)
}
route.ProxyRoute(router, proxyConf, nil)
t.Log("proxy server url", proxyURL)
srv := &http.Server{
Addr: proxyURL,
Handler: router,
}
go func() {
runErr := srv.ListenAndServe()
if runErr != nil && !errors.Is(runErr, http.ErrServerClosed) {
assert.NoError(t, runErr)
}
}()
time.Sleep(time.Second * 2)
return srv
}
func setEnv(t *testing.T) {
if envSet {
t.Log("SetEnv is re-entried")
return
}
var err error
version.Version = "v4.5.45"
@@ -146,6 +248,7 @@ func setEnv(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
assert.NoError(t, migrate.MigrateModule(sqlDB, "proxy"))
batchOrm = orm.NewBatch(db)
chunkOrm = orm.NewChunk(db)
@@ -169,6 +272,7 @@ func setEnv(t *testing.T) {
assert.NoError(t, err)
batch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk}}
envSet = true
}
func TestApis(t *testing.T) {

View File

@@ -34,6 +34,8 @@ type mockProver struct {
privKey *ecdsa.PrivateKey
proofType message.ProofType
coordinatorURL string
token string
useCacheToken bool
}
func newMockProver(t *testing.T, proverName string, coordinatorURL string, proofType message.ProofType, version string) *mockProver {
@@ -50,6 +52,14 @@ func newMockProver(t *testing.T, proverName string, coordinatorURL string, proof
return prover
}
func (r *mockProver) resetConnection(coordinatorURL string) {
r.coordinatorURL = coordinatorURL
}
func (r *mockProver) setUseCacheToken(enable bool) {
r.useCacheToken = enable
}
// connectToCoordinator sets up a websocket client to connect to the prover manager.
func (r *mockProver) connectToCoordinator(t *testing.T, proverTypes []types.ProverType) (string, int, string) {
challengeString := r.challenge(t)
@@ -115,6 +125,7 @@ func (r *mockProver) login(t *testing.T, challengeString string, proverTypes []t
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode())
assert.Empty(t, result.ErrMsg)
r.token = loginData.Token
return loginData.Token, 0, ""
}
@@ -144,11 +155,14 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool {
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*types.GetTaskSchema, int, string) {
// get task from coordinator
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
if errCode != 0 {
return nil, errCode, errMsg
if !r.useCacheToken || r.token == "" {
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
if errCode != 0 {
return nil, errCode, errMsg
}
assert.NotEmpty(t, token)
assert.Equal(t, token, r.token)
}
assert.NotEmpty(t, token)
type response struct {
ErrCode int `json:"errcode"`
@@ -160,7 +174,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*
client := resty.New()
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.token)).
SetBody(map[string]interface{}{"universal": true, "prover_height": 100, "task_types": []int{int(proofType)}}).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
@@ -174,11 +188,14 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*
//nolint:unparam
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) {
// get task from coordinator
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
if errCode != 0 {
return errCode, errMsg
if !r.useCacheToken || r.token == "" {
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
if errCode != 0 {
return errCode, errMsg
}
assert.NotEmpty(t, token)
assert.Equal(t, token, r.token)
}
assert.NotEmpty(t, token)
type response struct {
ErrCode int `json:"errcode"`
@@ -190,8 +207,8 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
client := resty.New()
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "universal": true}).
SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_types": []int{int(proofType)}, "universal": true}).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err)
@@ -249,10 +266,13 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
Universal: true,
}
token, authErrCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(message.ProofType(proverTaskSchema.TaskType))})
assert.Equal(t, authErrCode, 0)
assert.Equal(t, errMsg, "")
assert.NotEmpty(t, token)
if !r.useCacheToken || r.token == "" {
token, authErrCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(message.ProofType(proverTaskSchema.TaskType))})
assert.Equal(t, authErrCode, 0)
assert.Equal(t, errMsg, "")
assert.NotEmpty(t, token)
assert.Equal(t, token, r.token)
}
submitProofData, err := json.Marshal(submitProof)
assert.NoError(t, err)
@@ -262,7 +282,7 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
client := resty.New()
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.token)).
SetBody(string(submitProofData)).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/submit_proof")

View File

@@ -0,0 +1,297 @@
package test
import (
"context"
"fmt"
"net/http"
"strings"
"testing"
"time"
"github.com/scroll-tech/da-codec/encoding"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/proxy"
)
func testProxyClientCfg() *config.ProxyClient {
return &config.ProxyClient{
Secret: "test-secret-key",
ProxyName: "test-proxy",
ProxyVersion: version.Version,
}
}
var testCompatibileMode bool
func testProxyUpStreamCfg(coordinatorURL string) *config.UpStream {
return &config.UpStream{
BaseUrl: fmt.Sprintf("http://%s", coordinatorURL),
RetryWaitTime: 3,
ConnectionTimeoutSec: 30,
CompatibileMode: testCompatibileMode,
}
}
func testProxyClient(t *testing.T) {
// Setup coordinator and http server.
coordinatorURL := randomURL()
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
proofCollector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
}()
cliCfg := testProxyClientCfg()
upCfg := testProxyUpStreamCfg(coordinatorURL)
clientManager, err := proxy.NewClientManager("test_coordinator", cliCfg, upCfg)
assert.NoError(t, err)
assert.NotNil(t, clientManager)
// Create context with timeout
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Test Client method
client := clientManager.ClientAsProxy(ctx)
// Client should not be nil if login succeeds
// Note: This might be nil if the coordinator is not properly set up for proxy authentication
// but the test validates that the Client method completes without panic
assert.NotNil(t, client)
token1 := client.Token()
assert.NotEmpty(t, token1)
t.Logf("Client token: %s (%v)", token1, client)
if !upCfg.CompatibileMode {
time.Sleep(time.Second * 2)
client.Reset()
client = clientManager.ClientAsProxy(ctx)
assert.NotNil(t, client)
token2 := client.Token()
assert.NotEmpty(t, token2)
t.Logf("Client token (sec): %s (%v)", token2, client)
assert.NotEqual(t, token1, token2, "token should not be identical")
}
}
func testProxyHandshake(t *testing.T) {
// Setup proxy http server.
proxyURL := randomURL()
proxyHttpHandler := launchProxy(t, proxyURL, []string{}, false)
defer func() {
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
}()
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL, message.ProofTypeChunk, version.Version)
assert.True(t, chunkProver.healthCheckSuccess(t))
}
func testProxyGetTask(t *testing.T) {
// Setup coordinator and http server.
urls := randmURLBatch(2)
coordinatorURL := urls[0]
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
}()
proxyURL := urls[1]
proxyHttpHandler := launchProxy(t, proxyURL, []string{coordinatorURL}, false)
defer func() {
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
}()
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL, message.ProofTypeChunk, version.Version)
chunkProver.setUseCacheToken(true)
code, _ := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
assert.Equal(t, int(types.ErrCoordinatorEmptyProofData), code)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
assert.NoError(t, err)
task, code, msg := chunkProver.getProverTask(t, message.ProofTypeChunk)
assert.Empty(t, code)
if code == 0 {
t.Log("get task id", task.TaskID)
} else {
t.Log("get task error msg", msg)
}
}
func testProxyProof(t *testing.T) {
urls := randmURLBatch(3)
coordinatorURL0 := urls[0]
setupCoordinatorDb(t)
collector0, httpHandler0 := launchCoordinator(t, 3, coordinatorURL0)
defer func() {
collector0.Stop()
httpHandler0.Shutdown(context.Background())
}()
coordinatorURL1 := urls[1]
collector1, httpHandler1 := launchCoordinator(t, 3, coordinatorURL1)
defer func() {
collector1.Stop()
httpHandler1.Shutdown(context.Background())
}()
coordinators := map[string]*http.Server{
"coordinator_0": httpHandler0,
"coordinator_1": httpHandler1,
}
proxyURL := urls[2]
proxyHttpHandler := launchProxy(t, proxyURL, []string{coordinatorURL0, coordinatorURL1}, false)
defer func() {
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
}()
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
assert.NoError(t, err)
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL, message.ProofTypeChunk, version.Version)
chunkProver.setUseCacheToken(true)
task, code, msg := chunkProver.getProverTask(t, message.ProofTypeChunk)
assert.Empty(t, code)
if code == 0 {
t.Log("get task", task)
parts, _, _ := strings.Cut(task.TaskID, ":")
// close the coordinator which do not dispatch task first, so if we submit to wrong target,
// there would be a chance the submit failed (to the closed coordinator)
for n, srv := range coordinators {
if n != parts {
t.Log("close coordinator", n)
assert.NoError(t, srv.Shutdown(context.Background()))
}
}
exceptProofStatus := verifiedSuccess
chunkProver.submitProof(t, task, exceptProofStatus, types.Success)
} else {
t.Log("get task error msg", msg)
}
// verify proof status
var (
tick = time.Tick(1500 * time.Millisecond)
tickStop = time.Tick(time.Minute)
)
var (
chunkProofStatus types.ProvingStatus
chunkActiveAttempts int16
chunkMaxAttempts int16
)
for {
select {
case <-tick:
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskVerified {
return
}
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(chunkMaxAttempts))
assert.Equal(t, 0, int(chunkActiveAttempts))
case <-tickStop:
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String())
return
}
}
}
func testProxyPersistent(t *testing.T) {
urls := randmURLBatch(4)
coordinatorURL0 := urls[0]
setupCoordinatorDb(t)
collector0, httpHandler0 := launchCoordinator(t, 3, coordinatorURL0)
defer func() {
collector0.Stop()
httpHandler0.Shutdown(context.Background())
}()
coordinatorURL1 := urls[1]
collector1, httpHandler1 := launchCoordinator(t, 3, coordinatorURL1)
defer func() {
collector1.Stop()
httpHandler1.Shutdown(context.Background())
}()
setupProxyDb(t)
proxyURL1 := urls[2]
proxyHttpHandler := launchProxy(t, proxyURL1, []string{coordinatorURL0, coordinatorURL1}, true)
defer func() {
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
}()
proxyURL2 := urls[3]
proxyHttpHandler2 := launchProxy(t, proxyURL2, []string{coordinatorURL0, coordinatorURL1}, true)
defer func() {
assert.NoError(t, proxyHttpHandler2.Shutdown(context.Background()))
}()
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
assert.NoError(t, err)
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL1, message.ProofTypeChunk, version.Version)
chunkProver.setUseCacheToken(true)
task, _, _ := chunkProver.getProverTask(t, message.ProofTypeChunk)
assert.NotNil(t, task)
taskFrom, _, _ := strings.Cut(task.TaskID, ":")
t.Log("get task from coordinator:", taskFrom)
chunkProver.resetConnection(proxyURL2)
task, _, _ = chunkProver.getProverTask(t, message.ProofTypeChunk)
assert.NotNil(t, task)
taskFrom2, _, _ := strings.Cut(task.TaskID, ":")
assert.Equal(t, taskFrom, taskFrom2)
}
func TestProxyClient(t *testing.T) {
testCompatibileMode = false
// Set up the test environment.
setEnv(t)
t.Run("TestProxyClient", testProxyClient)
t.Run("TestProxyHandshake", testProxyHandshake)
t.Run("TestProxyGetTask", testProxyGetTask)
t.Run("TestProxyValidProof", testProxyProof)
t.Run("testProxyPersistent", testProxyPersistent)
}
func TestProxyClientCompatibleMode(t *testing.T) {
testCompatibileMode = true
// Set up the test environment.
setEnv(t)
t.Run("TestProxyClient", testProxyClient)
t.Run("TestProxyHandshake", testProxyHandshake)
t.Run("TestProxyGetTask", testProxyGetTask)
t.Run("TestProxyValidProof", testProxyProof)
t.Run("testProxyPersistent", testProxyPersistent)
}

View File

@@ -5,7 +5,7 @@ use alloy::{
};
use eyre::Result;
use libzkp::tasks::ChunkInterpreter;
use sbv_primitives::types::{Network, consensus::TxL1Message};
use sbv_primitives::types::{consensus::TxL1Message, Network};
use serde::{Deserialize, Serialize};
fn default_max_retry() -> u32 {

View File

@@ -3,15 +3,12 @@ name = "libzkp"
version.workspace = true
edition.workspace = true
[lib]
crate-type = ["rlib", "cdylib"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
scroll-zkvm-types = { workspace = true, features = ["scroll"] }
scroll-zkvm-verifier.workspace = true
alloy-primitives.workspace = true # depress the effect of "native-keccak"
alloy-primitives.workspace = true #depress the effect of "native-keccak"
sbv-primitives = {workspace = true, features = ["scroll-compress-info", "scroll"]}
sbv-core = { workspace = true, features = ["scroll"] }
base64.workspace = true

View File

@@ -138,10 +138,7 @@ pub fn gen_universal_task(
// always respect the fork_name_str (which has been normalized) being passed
// if the fork_name wrapped in task is not match, consider it a malformed task
if fork_name_str != task.fork_name.as_str() {
eyre::bail!(
"fork name in chunk task not match the calling arg, expected {fork_name_str}, get {}",
task.fork_name
);
eyre::bail!("fork name in chunk task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
}
if fork_name_str != version.fork.as_str() {
eyre::bail!(
@@ -159,10 +156,7 @@ pub fn gen_universal_task(
task.fork_name = task.fork_name.to_lowercase();
let version = Version::from(task.version);
if fork_name_str != task.fork_name.as_str() {
eyre::bail!(
"fork name in batch task not match the calling arg, expected {fork_name_str}, get {}",
task.fork_name
);
eyre::bail!("fork name in batch task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
}
if fork_name_str != version.fork.as_str() {
eyre::bail!(
@@ -180,10 +174,7 @@ pub fn gen_universal_task(
task.fork_name = task.fork_name.to_lowercase();
let version = Version::from(task.version);
if fork_name_str != task.fork_name.as_str() {
eyre::bail!(
"fork name in bundle task not match the calling arg, expected {fork_name_str}, get {}",
task.fork_name
);
eyre::bail!("fork name in bundle task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
}
if fork_name_str != version.fork.as_str() {
eyre::bail!(

View File

@@ -13,7 +13,7 @@ use scroll_zkvm_types::{
utils::{serialize_vk, vec_as_base64},
version,
};
use serde::{Deserialize, Serialize, de::DeserializeOwned};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
/// A wrapper around the actual inner proof.
#[derive(Clone, Serialize, Deserialize)]
@@ -213,7 +213,7 @@ impl<Metadata: ProofMetadata> PersistableProof for WrappedProof<Metadata> {
#[cfg(test)]
mod tests {
use base64::{Engine, prelude::BASE64_STANDARD};
use base64::{prelude::BASE64_STANDARD, Engine};
use sbv_primitives::B256;
use scroll_zkvm_types::{bundle::BundleInfo, proof::EvmProof};

View File

@@ -3,14 +3,14 @@ use eyre::Result;
use sbv_primitives::{B256, U256};
use scroll_zkvm_types::{
batch::{
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderValidium, BatchInfo, BatchWitness,
Envelope, EnvelopeV6, EnvelopeV7, LegacyBatchWitness, N_BLOB_BYTES, ReferenceHeader,
build_point_eval_witness,
build_point_eval_witness, BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderValidium,
BatchInfo, BatchWitness, Envelope, EnvelopeV6, EnvelopeV7, LegacyBatchWitness,
ReferenceHeader, N_BLOB_BYTES,
},
chunk::ChunkInfo,
public_inputs::{ForkName, MultiVersionPublicInputs, Version},
task::ProvingTask,
utils::{RancorError, to_rkyv_bytes},
utils::{to_rkyv_bytes, RancorError},
version::{Codec, Domain, STFVersion},
};
@@ -150,32 +150,18 @@ impl BatchProvingTask {
match &self.batch_header {
BatchHeaderV::Validium(_) => assert!(
version.is_validium(),
"version {:?} is not match with parsed header, get validium header but version is not validium",
version,
"version {:?} is not match with parsed header, get validium header but version is not validium", version,
),
BatchHeaderV::V6(_) => assert_eq!(
version.fork,
ForkName::EuclidV1,
BatchHeaderV::V6(_) => assert_eq!(version.fork, ForkName::EuclidV1,
"hardfork mismatch for da-codec@v6 header: found={:?}, expected={:?}",
version.fork,
ForkName::EuclidV1,
),
BatchHeaderV::V7_to_V10(_) => assert!(
matches!(
version.fork,
ForkName::EuclidV2
| ForkName::Feynman
| ForkName::Galileo
| ForkName::GalileoV2
),
matches!(version.fork, ForkName::EuclidV2 | ForkName::Feynman | ForkName::Galileo | ForkName::GalileoV2),
"hardfork mismatch for da-codec@v7/8/9/10 header: found={}, expected={:?}",
version.fork,
[
ForkName::EuclidV2,
ForkName::Feynman,
ForkName::Galileo,
ForkName::GalileoV2
],
[ForkName::EuclidV2, ForkName::Feynman, ForkName::Galileo, ForkName::GalileoV2],
),
}

View File

@@ -17,7 +17,7 @@ pub mod base64 {
pub mod point_eval {
use c_kzg;
use sbv_primitives::{B256 as H256, U256, types::eips::eip4844::BLS_MODULUS};
use sbv_primitives::{types::eips::eip4844::BLS_MODULUS, B256 as H256, U256};
use scroll_zkvm_types::utils::sha256_rv32;
/// Given the blob-envelope, translate it to a fixed size EIP-4844 blob.

View File

@@ -4,7 +4,7 @@ use scroll_zkvm_types::{
bundle::{BundleInfo, BundleWitness, LegacyBundleWitness},
public_inputs::{MultiVersionPublicInputs, Version},
task::ProvingTask,
utils::{RancorError, to_rkyv_bytes},
utils::{to_rkyv_bytes, RancorError},
};
use crate::proofs::BatchProof;

View File

@@ -1,11 +1,11 @@
use eyre::Result;
use sbv_core::BlockWitness;
use sbv_primitives::{B256, types::consensus::BlockHeader};
use sbv_primitives::{types::consensus::BlockHeader, B256};
use scroll_zkvm_types::{
chunk::{ChunkInfo, ChunkWitness, LegacyChunkWitness, ValidiumInputs, execute},
chunk::{execute, ChunkInfo, ChunkWitness, LegacyChunkWitness, ValidiumInputs},
public_inputs::{MultiVersionPublicInputs, Version},
task::ProvingTask,
utils::{RancorError, to_rkyv_bytes},
utils::{to_rkyv_bytes, RancorError},
};
use super::chunk_interpreter::*;
@@ -224,8 +224,8 @@ impl ChunkProvingTask {
attempts += 1;
if attempts >= MAX_FETCH_NODES_ATTEMPTS {
return Err(eyre!(
"failed to fetch nodes after {MAX_FETCH_NODES_ATTEMPTS} attempts: {e}"
));
"failed to fetch nodes after {MAX_FETCH_NODES_ATTEMPTS} attempts: {e}"
));
}
let node_hash =

View File

@@ -1,6 +1,6 @@
use eyre::Result;
use sbv_core::BlockWitness;
use sbv_primitives::{B256, Bytes, types::consensus::TxL1Message};
use sbv_primitives::{types::consensus::TxL1Message, Bytes, B256};
/// An interpreter which is cirtical in translating chunk data
/// since we need to grep block witness and storage node data

View File

@@ -1,12 +1,12 @@
use std::{
panic::{AssertUnwindSafe, catch_unwind},
panic::{catch_unwind, AssertUnwindSafe},
path::Path,
};
use git_version::git_version;
use serde::{
Serialize,
de::{Deserialize, DeserializeOwned},
Serialize,
};
use eyre::Result;

View File

@@ -11,5 +11,5 @@ crate-type = ["cdylib"]
[dependencies]
libzkp = { path = "../libzkp" }
l2geth = { path = "../l2geth"}
tracing-subscriber.workspace = true
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
tracing.workspace = true

View File

@@ -1,6 +1,6 @@
mod utils;
use std::ffi::{CString, c_char};
use std::ffi::{c_char, CString};
use libzkp::TaskType;
use utils::{c_char_to_str, c_char_to_vec};
@@ -20,7 +20,7 @@ fn enable_dump() -> bool {
}
/// # Safety
#[unsafe(no_mangle)]
#[no_mangle]
pub unsafe extern "C" fn init_tracing() {
use tracing_subscriber::filter::{EnvFilter, LevelFilter};
@@ -47,14 +47,14 @@ pub unsafe extern "C" fn init_tracing() {
}
/// # Safety
#[unsafe(no_mangle)]
#[no_mangle]
pub unsafe extern "C" fn init_verifier(config: *const c_char) {
let config_str = c_char_to_str(config);
libzkp::verifier_init(config_str).unwrap();
}
/// # Safety
#[unsafe(no_mangle)]
#[no_mangle]
pub unsafe extern "C" fn init_l2geth(config: *const c_char) {
let config_str = c_char_to_str(config);
l2geth::init(config_str).unwrap();
@@ -92,7 +92,7 @@ fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskT
}
/// # Safety
#[unsafe(no_mangle)]
#[no_mangle]
pub unsafe extern "C" fn verify_chunk_proof(
proof: *const c_char,
fork_name: *const c_char,
@@ -101,7 +101,7 @@ pub unsafe extern "C" fn verify_chunk_proof(
}
/// # Safety
#[unsafe(no_mangle)]
#[no_mangle]
pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
@@ -110,7 +110,7 @@ pub unsafe extern "C" fn verify_batch_proof(
}
/// # Safety
#[unsafe(no_mangle)]
#[no_mangle]
pub unsafe extern "C" fn verify_bundle_proof(
proof: *const c_char,
fork_name: *const c_char,
@@ -119,7 +119,7 @@ pub unsafe extern "C" fn verify_bundle_proof(
}
/// # Safety
#[unsafe(no_mangle)]
#[no_mangle]
pub unsafe extern "C" fn dump_vk(fork_name: *const c_char, file: *const c_char) {
let fork_name_str = c_char_to_str(fork_name);
let file_str = c_char_to_str(file);
@@ -145,7 +145,7 @@ fn failed_handling_result() -> HandlingResult {
}
/// # Safety
#[unsafe(no_mangle)]
#[no_mangle]
pub unsafe extern "C" fn gen_universal_task(
task_type: i32,
task: *const c_char,
@@ -166,7 +166,10 @@ pub unsafe extern "C" fn gen_universal_task(
);
return failed_handling_result();
}
Some(unsafe { std::slice::from_raw_parts(decryption_key, decryption_key_len) })
Some(std::slice::from_raw_parts(
decryption_key,
decryption_key_len,
))
} else {
None
};
@@ -182,7 +185,7 @@ pub unsafe extern "C" fn gen_universal_task(
};
let expected_vk = if expected_vk_len > 0 {
unsafe { std::slice::from_raw_parts(expected_vk, expected_vk_len) }
std::slice::from_raw_parts(expected_vk, expected_vk_len)
} else {
&[]
};
@@ -221,18 +224,18 @@ pub unsafe extern "C" fn gen_universal_task(
}
/// # Safety
#[unsafe(no_mangle)]
#[no_mangle]
pub unsafe extern "C" fn release_task_result(result: HandlingResult) {
if !result.universal_task.is_null() {
let _ = unsafe { CString::from_raw(result.universal_task) };
let _ = CString::from_raw(result.universal_task);
}
if !result.metadata.is_null() {
let _ = unsafe { CString::from_raw(result.metadata) };
let _ = CString::from_raw(result.metadata);
}
}
/// # Safety
#[unsafe(no_mangle)]
#[no_mangle]
pub unsafe extern "C" fn gen_wrapped_proof(
proof: *const c_char,
metadata: *const c_char,
@@ -241,7 +244,7 @@ pub unsafe extern "C" fn gen_wrapped_proof(
) -> *mut c_char {
let proof_str = c_char_to_str(proof);
let metadata_str = c_char_to_str(metadata);
let vk_data = unsafe { std::slice::from_raw_parts(vk as *const u8, vk_len) };
let vk_data = std::slice::from_raw_parts(vk as *const u8, vk_len);
match libzkp::gen_wrapped_proof(proof_str, metadata_str, vk_data) {
Ok(result) => CString::new(result).unwrap().into_raw(),
@@ -253,7 +256,7 @@ pub unsafe extern "C" fn gen_wrapped_proof(
}
/// # Safety
#[unsafe(no_mangle)]
#[no_mangle]
pub unsafe extern "C" fn univ_task_compatibility_fix(task_json: *const c_char) -> *mut c_char {
let task_json_str = c_char_to_str(task_json);
match libzkp::univ_task_compatibility_fix(task_json_str) {
@@ -266,9 +269,9 @@ pub unsafe extern "C" fn univ_task_compatibility_fix(task_json: *const c_char) -
}
/// # Safety
#[unsafe(no_mangle)]
#[no_mangle]
pub unsafe extern "C" fn release_string(ptr: *mut c_char) {
if !ptr.is_null() {
let _ = unsafe { CString::from_raw(ptr) };
let _ = CString::from_raw(ptr);
}
}

View File

@@ -6,28 +6,35 @@ edition.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
axiom-sdk.workspace = true
scroll-zkvm-types.workspace = true
scroll-zkvm-prover.workspace = true
libzkp = { path = "../libzkp"}
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "504e71f" }
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "05648db" }
serde.workspace = true
serde_json.workspace = true
once_cell.workspace =true
base64.workspace = true
tiny-keccak = { workspace = true, features = ["sha3", "keccak"] }
eyre.workspace = true
tracing.workspace = true
futures = "0.3.30"
futures-util = "0.3"
reqwest = { version = "0.12", features = ["gzip", "stream"] }
reqwest = { version = "0.12.4", features = ["gzip", "stream"] }
reqwest-middleware = "0.3"
reqwest-retry = "0.5"
hex = "0.4.3"
jiff.workspace = true
tokio = { workspace = true, features = ["full"] }
rand = "0.8.5"
tokio = "1.37.0"
async-trait = "0.1"
sled = "0.34.7"
http = "1.1.0"
clap = { version = "4.5", features = ["derive"] }
url = { version = "2.5.4", features = ["serde"] }
tempfile = "3.24"
ctor = "0.2.8"
url = { version = "2.5.4", features = ["serde",] }
serde_bytes = "0.11.15"
[features]
default = []
cuda = ["scroll-zkvm-prover/cuda"]
cuda = ["scroll-zkvm-prover/cuda"]

View File

@@ -1,32 +1,21 @@
#[macro_use]
extern crate tracing;
mod prover;
mod types;
mod zk_circuits_handler;
use crate::prover::ProverKind;
use clap::{ArgAction, Parser, Subcommand};
use prover::{LocalProver, LocalProverConfig};
use scroll_proving_sdk::{
prover::{ProverBuilder, types::ProofType},
utils::{VERSION, init_tracing},
};
use std::{
fs::File,
io::BufReader,
path::{Path, PathBuf},
prover::{types::ProofType, ProverBuilder},
utils::{get_version, init_tracing},
};
use std::{fs::File, io::BufReader, path::Path};
#[derive(Parser, Debug)]
#[command(disable_version_flag = true)]
struct Args {
/// Prover kind
#[arg(long = "prover.kind", value_enum, default_value_t = ProverKind::Local)]
prover_kind: ProverKind,
/// Path of config file
#[arg(long = "config", default_value = "conf/config.json")]
config_file: PathBuf,
config_file: String,
#[arg(long = "forkname")]
fork_name: Option<String>,
@@ -53,11 +42,8 @@ enum Commands {
#[derive(Debug, serde::Deserialize)]
struct HandleSet {
#[serde(default)]
chunks: Vec<String>,
#[serde(default)]
batches: Vec<String>,
#[serde(default)]
bundles: Vec<String>,
}
@@ -68,13 +54,13 @@ async fn main() -> eyre::Result<()> {
let args = Args::parse();
if args.version {
println!("version is {VERSION}");
println!("version is {}", get_version());
std::process::exit(0);
}
info!(version = %VERSION, "Starting prover");
let (sdk_config, prover) = args.prover_kind.create_from_file(&args.config_file)?;
info!(prover = ?prover, "Loaded prover");
let cfg = LocalProverConfig::from_file(args.config_file)?;
let sdk_config = cfg.sdk_config.clone();
let local_prover = LocalProver::new(cfg.clone());
match args.command {
Some(Commands::Handle { task_path }) => {
@@ -82,37 +68,37 @@ async fn main() -> eyre::Result<()> {
let reader = BufReader::new(file);
let handle_set: HandleSet = serde_json::from_reader(reader)?;
let prover = ProverBuilder::new(sdk_config, prover)
let prover = ProverBuilder::new(sdk_config, local_prover)
.build()
.await
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;
let prover = std::sync::Arc::new(prover);
info!("Handling task set 1: chunks ...");
println!("Handling task set 1: chunks ...");
assert!(
prover
.clone()
.one_shot(&handle_set.chunks, ProofType::Chunk)
.await
);
info!("Done! Handling task set 2: batches ...");
println!("Done! Handling task set 2: batches ...");
assert!(
prover
.clone()
.one_shot(&handle_set.batches, ProofType::Batch)
.await
);
info!("Done! Handling task set 3: bundles ...");
println!("Done! Handling task set 3: bundles ...");
assert!(
prover
.clone()
.one_shot(&handle_set.bundles, ProofType::Bundle)
.await
);
info!("All done!");
println!("All done!");
}
None => {
let prover = ProverBuilder::new(sdk_config, prover)
let prover = ProverBuilder::new(sdk_config, local_prover)
.build()
.await
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;

View File

@@ -1,96 +1,329 @@
use crate::zk_circuits_handler::{universal::UniversalHandler, CircuitsHandler};
use async_trait::async_trait;
use eyre::Result;
use scroll_proving_sdk::{
config::Config as SdkConfig,
prover::{
ProvingService,
proving_service::{
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
QueryTaskResponse,
QueryTaskResponse, TaskStatus,
},
types::ProofType,
ProvingService,
},
};
use scroll_zkvm_types::ProvingTask;
use serde::{Deserialize, Serialize};
use std::path::Path;
use std::{
collections::HashMap,
fs::File,
path::{Path, PathBuf},
sync::{Arc, LazyLock},
time::{SystemTime, UNIX_EPOCH},
};
use tokio::{runtime::Handle, sync::Mutex, task::JoinHandle};
mod local;
pub use local::{LocalProver, LocalProverConfig};
mod axiom;
pub use axiom::{AxiomProver, AxiomProverConfig};
#[derive(Debug)]
pub enum Prover {
Local(LocalProver),
Axiom(AxiomProver),
#[derive(Clone, Serialize, Deserialize)]
pub struct AssetsLocationData {
/// the base url to form a general downloading url for an asset, MUST HAVE A TRAILING SLASH
pub base_url: url::Url,
#[serde(default)]
/// a altered url for specififed vk
pub asset_detours: HashMap<String, url::Url>,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, clap::ValueEnum)]
pub enum ProverKind {
Local,
Axiom,
}
impl ProverKind {
pub fn create_from_file<P: AsRef<Path>>(
&self,
file_name: P,
) -> eyre::Result<(SdkConfig, Prover)> {
match self {
ProverKind::Local => {
let config = LocalProverConfig::from_file(file_name)?;
let sdk_config = config.sdk_config.clone();
let prover = LocalProver::new(config);
Ok((sdk_config, Prover::Local(prover)))
impl AssetsLocationData {
pub fn gen_asset_url(&self, vk_as_path: &str, proof_type: ProofType) -> Result<url::Url> {
Ok(self.base_url.join(
match proof_type {
ProofType::Chunk => format!("chunk/{vk_as_path}/"),
ProofType::Batch => format!("batch/{vk_as_path}/"),
ProofType::Bundle => format!("bundle/{vk_as_path}/"),
t => eyre::bail!("unrecognized proof type: {}", t as u8),
}
ProverKind::Axiom => {
let config = AxiomProverConfig::from_file(file_name)?;
let sdk_config = config.sdk_config.clone();
let prover = AxiomProver::new(config);
Ok((sdk_config, Prover::Axiom(prover)))
.as_str(),
)?)
}
pub fn validate(&self) -> Result<()> {
if !self.base_url.path().ends_with('/') {
eyre::bail!(
"base_url must have a trailing slash, got: {}",
self.base_url
);
}
Ok(())
}
pub async fn get_asset(
&self,
vk: &str,
url_base: &url::Url,
base_path: impl AsRef<Path>,
) -> Result<PathBuf> {
let download_files = ["app.vmexe", "openvm.toml"];
// Step 1: Create a local path for storage
let storage_path = base_path.as_ref().join(vk);
std::fs::create_dir_all(&storage_path)?;
// Step 2 & 3: Download each file if needed
let client = reqwest::Client::new();
for filename in download_files.iter() {
let local_file_path = storage_path.join(filename);
let download_url = url_base.join(filename)?;
// Check if file already exists
if local_file_path.exists() {
// Get file metadata to check size
if let Ok(metadata) = std::fs::metadata(&local_file_path) {
// Make a HEAD request to get remote file size
if let Ok(head_resp) = client.head(download_url.clone()).send().await {
if let Some(content_length) = head_resp.headers().get("content-length") {
if let Ok(remote_size) =
content_length.to_str().unwrap_or("0").parse::<u64>()
{
// If sizes match, skip download
if metadata.len() == remote_size {
println!("File {} already exists with matching size, skipping download", filename);
continue;
}
}
}
}
}
}
println!("Downloading {} from {}", filename, download_url);
let response = client.get(download_url).send().await?;
if !response.status().is_success() {
eyre::bail!(
"Failed to download {}: HTTP status {}",
filename,
response.status()
);
}
// Stream the content directly to file instead of loading into memory
let mut file = std::fs::File::create(&local_file_path)?;
let mut stream = response.bytes_stream();
use futures_util::StreamExt;
while let Some(chunk) = stream.next().await {
std::io::Write::write_all(&mut file, &chunk?)?;
}
}
// Step 4: Return the storage path
Ok(storage_path)
}
}
#[derive(Clone, Serialize, Deserialize)]
pub struct LocalProverConfig {
pub sdk_config: SdkConfig,
pub circuits: HashMap<String, CircuitConfig>,
}
impl LocalProverConfig {
pub fn from_reader<R>(reader: R) -> Result<Self>
where
R: std::io::Read,
{
serde_json::from_reader(reader).map_err(|e| eyre::eyre!(e))
}
pub fn from_file(file_name: String) -> Result<Self> {
let file = File::open(file_name)?;
Self::from_reader(&file)
}
}
#[derive(Clone, Serialize, Deserialize)]
pub struct CircuitConfig {
/// The path to save assets for a specified hard fork phase
pub workspace_path: String,
#[serde(flatten)]
/// The location data for dynamic loading
pub location_data: AssetsLocationData,
/// cached vk value to save some initial cost, for debugging only
#[serde(default)]
pub vks: HashMap<ProofType, String>,
}
pub struct LocalProver {
config: LocalProverConfig,
next_task_id: u64,
current_task: Option<JoinHandle<Result<String>>>,
handlers: HashMap<String, Arc<dyn CircuitsHandler>>,
}
#[async_trait]
impl ProvingService for Prover {
impl ProvingService for LocalProver {
fn is_local(&self) -> bool {
match self {
Prover::Local(p) => p.is_local(),
Prover::Axiom(p) => p.is_local(),
true
}
async fn get_vks(&self, _: GetVkRequest) -> GetVkResponse {
// get vk has been deprecated in new prover with dynamic asset loading scheme
GetVkResponse {
vks: vec![],
error: None,
}
}
async fn get_vks(&self, req: GetVkRequest) -> GetVkResponse {
match self {
Prover::Local(p) => p.get_vks(req).await,
Prover::Axiom(p) => p.get_vks(req).await,
}
}
async fn prove(&mut self, req: ProveRequest) -> ProveResponse {
match self {
Prover::Local(p) => p.prove(req).await,
Prover::Axiom(p) => p.prove(req).await,
match self.do_prove(req).await {
Ok(resp) => resp,
Err(e) => ProveResponse {
status: TaskStatus::Failed,
error: Some(format!("failed to request proof: {}", e)),
..Default::default()
},
}
}
async fn query_task(&mut self, req: QueryTaskRequest) -> QueryTaskResponse {
match self {
Prover::Local(p) => p.query_task(req).await,
Prover::Axiom(p) => p.query_task(req).await,
if let Some(handle) = &mut self.current_task {
if handle.is_finished() {
return match handle.await {
Ok(Ok(proof)) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Success,
proof: Some(proof),
..Default::default()
},
Ok(Err(e)) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some(format!("proving task failed: {}", e)),
..Default::default()
},
Err(e) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some(format!("proving task panicked: {}", e)),
..Default::default()
},
};
} else {
return QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Proving,
..Default::default()
};
}
}
// If no handle is found
QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some("no proving task is running".to_string()),
..Default::default()
}
}
}
impl From<LocalProver> for Prover {
fn from(p: LocalProver) -> Self {
Prover::Local(p)
}
}
static GLOBAL_ASSET_URLS: LazyLock<HashMap<String, HashMap<String, url::Url>>> =
LazyLock::new(|| {
const ASSETS_JSON: &str = include_str!("../assets_url_preset.json");
serde_json::from_str(ASSETS_JSON).expect("Failed to parse assets_url_preset.json")
});
impl From<AxiomProver> for Prover {
fn from(p: AxiomProver) -> Self {
Prover::Axiom(p)
impl LocalProver {
pub fn new(mut config: LocalProverConfig) -> Self {
for (fork_name, circuit_config) in config.circuits.iter_mut() {
// validate each base url
circuit_config.location_data.validate().unwrap();
let mut template_url_mapping = GLOBAL_ASSET_URLS
.get(&fork_name.to_lowercase())
.cloned()
.unwrap_or_default();
// apply default settings in template
for (key, url) in circuit_config.location_data.asset_detours.drain() {
template_url_mapping.insert(key, url);
}
circuit_config.location_data.asset_detours = template_url_mapping;
// validate each detours url
for url in circuit_config.location_data.asset_detours.values() {
assert!(
url.path().ends_with('/'),
"url {} must be end with /",
url.as_str()
);
}
}
Self {
config,
next_task_id: 0,
current_task: None,
handlers: HashMap::new(),
}
}
async fn do_prove(&mut self, req: ProveRequest) -> Result<ProveResponse> {
self.next_task_id += 1;
let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
let created_at = duration.as_secs() as f64 + duration.subsec_nanos() as f64 * 1e-9;
let prover_task = UniversalHandler::get_task_from_input(&req.input)?;
let is_openvm_13 = prover_task.use_openvm_13;
let prover_task: ProvingTask = prover_task.into();
let vk = hex::encode(&prover_task.vk);
let handler = if let Some(handler) = self.handlers.get(&vk) {
handler.clone()
} else {
let base_config = self
.config
.circuits
.get(&req.hard_fork_name)
.ok_or_else(|| {
eyre::eyre!(
"coordinator sent unexpected forkname {}",
req.hard_fork_name
)
})?;
let url_base = if let Some(url) = base_config.location_data.asset_detours.get(&vk) {
url.clone()
} else {
base_config
.location_data
.gen_asset_url(&vk, req.proof_type)?
};
let asset_path = base_config
.location_data
.get_asset(&vk, &url_base, &base_config.workspace_path)
.await?;
let circuits_handler = Arc::new(Mutex::new(UniversalHandler::new(
&asset_path,
is_openvm_13,
)?));
self.handlers.insert(vk, circuits_handler.clone());
circuits_handler
};
let handle = Handle::current();
let is_evm = req.proof_type == ProofType::Bundle;
let task_handle = tokio::task::spawn_blocking(move || {
handle.block_on(handler.get_proof_data(&prover_task, is_evm))
});
self.current_task = Some(task_handle);
Ok(ProveResponse {
task_id: self.next_task_id.to_string(),
proof_type: req.proof_type,
circuit_version: req.circuit_version,
hard_fork_name: req.hard_fork_name,
status: TaskStatus::Proving,
created_at,
input: Some(req.input),
..Default::default()
})
}
}

View File

@@ -1,329 +0,0 @@
use crate::zk_circuits_handler::universal::UniversalHandler;
use async_trait::async_trait;
use axiom_sdk::{
AxiomSdk, ProofType as AxiomProofType,
build::BuildSdk,
input::Input as AxiomInput,
prove::{ProveArgs, ProveSdk},
};
use eyre::Context;
use jiff::Timestamp;
use scroll_proving_sdk::{
config::Config as SdkConfig,
prover::{
ProofType, ProvingService,
proving_service::{
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
QueryTaskResponse, TaskStatus,
},
},
};
use scroll_zkvm_types::{
ProvingTask,
proof::{OpenVmEvmProof, OpenVmVersionedVmStarkProof, ProofEnum},
};
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, fs::File, io::Write, path::Path};
use tempfile::NamedTempFile;
use tracing::Level;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AxiomProverConfig {
pub axiom: AxiomConfig,
pub sdk_config: SdkConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AxiomConfig {
pub api_key: String,
// vk to program mapping
pub programs: HashMap<String, AxiomProgram>,
pub num_gpus: Option<usize>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AxiomProgram {
pub program_id: String,
pub config_id: String,
}
#[derive(Debug)]
pub struct AxiomProver {
config: AxiomProverConfig,
}
impl AxiomProverConfig {
pub fn from_reader<R>(reader: R) -> eyre::Result<Self>
where
R: std::io::Read,
{
serde_json::from_reader(reader).map_err(|e| eyre::eyre!(e))
}
pub fn from_file<P: AsRef<Path>>(file_name: P) -> eyre::Result<Self> {
let file = File::open(file_name)?;
Self::from_reader(&file)
}
}
#[async_trait]
impl ProvingService for AxiomProver {
fn is_local(&self) -> bool {
false
}
async fn get_vks(&self, _: GetVkRequest) -> GetVkResponse {
// get vk has been deprecated in new prover with dynamic asset loading scheme
GetVkResponse {
vks: vec![],
error: None,
}
}
#[instrument(skip(self), ret, level = Level::DEBUG)]
async fn prove(&mut self, req: ProveRequest) -> ProveResponse {
self.prove_inner(req)
.await
.unwrap_or_else(|e| ProveResponse {
status: TaskStatus::Failed,
error: Some(format!("failed to submit proof task to axiom: {}", e)),
..Default::default()
})
}
#[instrument(skip(self), ret, level = Level::DEBUG)]
async fn query_task(&mut self, req: QueryTaskRequest) -> QueryTaskResponse {
let task_id = req.task_id.clone();
self.query_task_inner(req)
.await
.unwrap_or_else(|e| QueryTaskResponse {
task_id,
status: TaskStatus::Failed,
error: Some(format!("failed to query axiom task: {}", e)),
..Default::default()
})
}
}
impl AxiomProver {
pub fn new(config: AxiomProverConfig) -> Self {
Self { config }
}
async fn make_axiom_request<R: Send + 'static>(
&self,
config_id: Option<String>,
req: impl FnOnce(AxiomSdk) -> eyre::Result<R> + Send + 'static,
) -> eyre::Result<R> {
let api_key = self.config.axiom.api_key.clone();
tokio::task::spawn_blocking(move || {
let config = axiom_sdk::AxiomConfig {
api_key: Some(api_key),
config_id,
..Default::default()
};
let sdk = AxiomSdk::new(config);
req(sdk)
})
.await
.context("failed to join axiom request")
.flatten()
}
#[instrument(skip_all, ret, err, level = Level::DEBUG)]
fn get_program(&self, vk: &[u8]) -> eyre::Result<AxiomProgram> {
let vk = hex::encode(vk);
debug!(vk = %vk);
self.config
.axiom
.programs
.get(vk.as_str())
.cloned()
.ok_or_else(|| eyre::eyre!("no axiom program configured for vk: {vk}"))
}
#[instrument(skip_all, err, level = Level::DEBUG)]
async fn prove_inner(&mut self, req: ProveRequest) -> eyre::Result<ProveResponse> {
let prover_task = UniversalHandler::get_task_from_input(&req.input)?;
if prover_task.use_openvm_13 {
eyre::bail!("axiom prover does not support openvm v1.3 tasks");
}
let prover_task: ProvingTask = prover_task.into();
let program = self.get_program(&prover_task.vk)?;
let num_gpus = self.config.axiom.num_gpus;
let mut input_file = NamedTempFile::new()?;
let input = prover_task.build_openvm_input();
serde_json::to_writer(&mut input_file, &input)?;
input_file.flush()?;
let proof_type = if req.proof_type == ProofType::Bundle {
AxiomProofType::Evm
} else {
AxiomProofType::Stark
};
let mut response = ProveResponse {
proof_type: req.proof_type,
created_at: Timestamp::now().as_duration().as_secs_f64(),
status: TaskStatus::Proving,
..Default::default()
};
response.task_id = self
.make_axiom_request(Some(program.config_id), move |sdk| {
sdk.generate_new_proof(ProveArgs {
program_id: Some(program.program_id.clone()),
input: Some(AxiomInput::FilePath(input_file.path().to_path_buf())),
proof_type: Some(proof_type),
num_gpus,
priority: None,
})
})
.await?;
info!(
proof_type = ?req.proof_type,
identifier = %prover_task.identifier,
task_id = %response.task_id,
"submitted axiom proving task"
);
Ok(response)
}
#[instrument(skip_all, err, level = Level::DEBUG)]
async fn query_task_inner(&mut self, req: QueryTaskRequest) -> eyre::Result<QueryTaskResponse> {
let mut response = QueryTaskResponse {
task_id: req.task_id.clone(),
..Default::default()
};
let task_id = req.task_id.clone();
let (status, proof_type, proof) = self
.make_axiom_request(None, move |sdk| {
let status = sdk.get_proof_status(&task_id)?;
debug!(status = ?status, "fetched axiom task status");
let program_status = sdk.get_build_status(&status.program_uuid)?;
let proof_type = match program_status.name.as_str() {
"chunk" => ProofType::Chunk,
"batch" => ProofType::Batch,
"bundle" => ProofType::Bundle,
_ => {
return Err(eyre::eyre!("unrecognized program in: {program_status:#?}",));
}
};
let axiom_proof_type: AxiomProofType = status.proof_type.parse()?;
let proof = if status.state == "Succeeded" {
let file = NamedTempFile::new()?;
sdk.get_generated_proof(
&status.id,
&axiom_proof_type,
Some(file.path().to_path_buf()),
)?;
Some(file)
} else {
None
};
Ok((status, proof_type, proof))
})
.await?;
// Queued, Executing, Executed, AppProving, AppProvingDone, PostProcessing, Failed,
// Succeeded
response.status = match status.state.as_str() {
"Queued" => TaskStatus::Queued,
"Executing" | "Executed" | "AppProving" | "AppProvingDone" | "PostProcessing" => {
TaskStatus::Proving
}
"Succeeded" => TaskStatus::Success,
"Failed" => TaskStatus::Failed,
other => {
return Err(eyre::eyre!("unrecognized axiom task status: {other}"));
}
};
debug!(status = ?response.status, "mapped axiom task status");
if response.status == TaskStatus::Failed {
response.error = Some(
status
.error_message
.unwrap_or_else(|| "unknown error".to_string()),
);
}
response.proof_type = proof_type;
let created_at: Timestamp = status.created_at.parse()?;
response.created_at = created_at.as_duration().as_secs_f64();
if let Some(launched_at) = status.launched_at
&& !launched_at.is_empty()
{
let started_at: Timestamp = launched_at.parse()?;
let started_at = started_at.as_duration();
response.started_at = Some(started_at.as_secs_f64());
if let Some(terminated_at) = status.terminated_at
&& !terminated_at.is_empty()
{
let finished_at: Timestamp = terminated_at.parse()?;
let finished_at = finished_at.as_duration();
response.finished_at = Some(finished_at.as_secs_f64());
let duration = finished_at.checked_sub(started_at).ok_or_else(|| {
eyre::eyre!(
"invalid timestamps: started_at={:?}, finished_at={:?}",
started_at,
finished_at
)
})?;
response.compute_time_sec = Some(duration.as_secs_f64());
info!(
task_id = %req.task_id,
launched_at = %format_args!("{launched_at:#}"),
terminated_at = %format_args!("{terminated_at:#}"),
duration = %format_args!("{duration:#}"),
priority = %status.priority,
"completed"
);
info!(
task_id = %req.task_id,
cells_used = %status.cells_used,
num_gpus = %status.num_gpus,
"resource usage"
);
if let Some(num_instructions) = status.num_instructions {
let mhz = num_instructions as f64 / (duration.as_secs_f64() * 1_000_000.0);
info!(
task_id = %req.task_id,
cycles = %num_instructions,
MHz = %format_args!("{mhz:.2}"),
"performance"
);
}
}
}
if let Some(proof_file) = proof {
let proof = match proof_type {
ProofType::Bundle => {
let proof: OpenVmEvmProof = serde_json::from_reader(proof_file)?;
ProofEnum::Evm(proof.into())
}
_ => {
let proof: OpenVmVersionedVmStarkProof = serde_json::from_reader(proof_file)?;
ProofEnum::Stark(proof.try_into()?)
}
};
response.proof = Some(serde_json::to_string(&proof)?);
}
Ok(response)
}
}

View File

@@ -1,342 +0,0 @@
use crate::zk_circuits_handler::{CircuitsHandler, universal::UniversalHandler};
use async_trait::async_trait;
use eyre::Result;
use scroll_proving_sdk::{
config::Config as SdkConfig,
prover::{
ProvingService,
proving_service::{
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
QueryTaskResponse, TaskStatus,
},
types::ProofType,
},
};
use scroll_zkvm_types::ProvingTask;
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
fmt,
fs::File,
path::{Path, PathBuf},
sync::{Arc, LazyLock},
time::{SystemTime, UNIX_EPOCH},
};
use tokio::{runtime::Handle, sync::Mutex, task::JoinHandle};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AssetsLocationData {
/// the base url to form a general downloading url for an asset, MUST HAVE A TRAILING SLASH
pub base_url: url::Url,
#[serde(default)]
/// a altered url for specififed vk
pub asset_detours: HashMap<String, url::Url>,
}
impl AssetsLocationData {
pub fn gen_asset_url(&self, vk_as_path: &str, proof_type: ProofType) -> Result<url::Url> {
Ok(self.base_url.join(
match proof_type {
ProofType::Chunk => format!("chunk/{vk_as_path}/"),
ProofType::Batch => format!("batch/{vk_as_path}/"),
ProofType::Bundle => format!("bundle/{vk_as_path}/"),
t => eyre::bail!("unrecognized proof type: {}", t as u8),
}
.as_str(),
)?)
}
pub fn validate(&self) -> Result<()> {
if !self.base_url.path().ends_with('/') {
eyre::bail!(
"base_url must have a trailing slash, got: {}",
self.base_url
);
}
Ok(())
}
pub async fn get_asset(
&self,
vk: &str,
url_base: &url::Url,
base_path: impl AsRef<Path>,
) -> Result<PathBuf> {
let download_files = ["app.vmexe", "openvm.toml"];
// Step 1: Create a local path for storage
let storage_path = base_path.as_ref().join(vk);
std::fs::create_dir_all(&storage_path)?;
// Step 2 & 3: Download each file if needed
let client = reqwest::Client::new();
for filename in download_files.iter() {
let local_file_path = storage_path.join(filename);
let download_url = url_base.join(filename)?;
// Check if file already exists
if local_file_path.exists() {
// Get file metadata to check size
if let Ok(metadata) = std::fs::metadata(&local_file_path) {
// Make a HEAD request to get remote file size
if let Ok(head_resp) = client.head(download_url.clone()).send().await {
if let Some(content_length) = head_resp.headers().get("content-length") {
if let Ok(remote_size) =
content_length.to_str().unwrap_or("0").parse::<u64>()
{
// If sizes match, skip download
if metadata.len() == remote_size {
println!(
"File {} already exists with matching size, skipping download",
filename
);
continue;
}
}
}
}
}
}
println!("Downloading {} from {}", filename, download_url);
let response = client.get(download_url).send().await?;
if !response.status().is_success() {
eyre::bail!(
"Failed to download {}: HTTP status {}",
filename,
response.status()
);
}
// Stream the content directly to file instead of loading into memory
let mut file = std::fs::File::create(&local_file_path)?;
let mut stream = response.bytes_stream();
use futures_util::StreamExt;
while let Some(chunk) = stream.next().await {
std::io::Write::write_all(&mut file, &chunk?)?;
}
}
// Step 4: Return the storage path
Ok(storage_path)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LocalProverConfig {
pub sdk_config: SdkConfig,
pub circuits: HashMap<String, CircuitConfig>,
}
impl LocalProverConfig {
pub fn from_reader<R>(reader: R) -> Result<Self>
where
R: std::io::Read,
{
serde_json::from_reader(reader).map_err(|e| eyre::eyre!(e))
}
pub fn from_file<P: AsRef<Path>>(file_name: P) -> Result<Self> {
let file = File::open(file_name)?;
Self::from_reader(&file)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CircuitConfig {
/// The path to save assets for a specified hard fork phase
pub workspace_path: String,
#[serde(flatten)]
/// The location data for dynamic loading
pub location_data: AssetsLocationData,
/// cached vk value to save some initial cost, for debugging only
#[serde(default)]
pub vks: HashMap<ProofType, String>,
}
pub struct LocalProver {
config: LocalProverConfig,
next_task_id: u64,
current_task: Option<JoinHandle<Result<String>>>,
handlers: HashMap<String, Arc<dyn CircuitsHandler>>,
}
impl fmt::Debug for LocalProver {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("LocalProver")
.field("config", &self.config)
.field("next_task_id", &self.next_task_id)
.finish()
}
}
#[async_trait]
impl ProvingService for LocalProver {
fn is_local(&self) -> bool {
true
}
async fn get_vks(&self, _: GetVkRequest) -> GetVkResponse {
// get vk has been deprecated in new prover with dynamic asset loading scheme
GetVkResponse {
vks: vec![],
error: None,
}
}
async fn prove(&mut self, req: ProveRequest) -> ProveResponse {
match self.do_prove(req).await {
Ok(resp) => resp,
Err(e) => ProveResponse {
status: TaskStatus::Failed,
error: Some(format!("failed to request proof: {}", e)),
..Default::default()
},
}
}
async fn query_task(&mut self, req: QueryTaskRequest) -> QueryTaskResponse {
if let Some(handle) = &mut self.current_task {
if handle.is_finished() {
return match handle.await {
Ok(Ok(proof)) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Success,
proof: Some(proof),
..Default::default()
},
Ok(Err(e)) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some(format!("proving task failed: {}", e)),
..Default::default()
},
Err(e) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some(format!("proving task panicked: {}", e)),
..Default::default()
},
};
} else {
return QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Proving,
..Default::default()
};
}
}
// If no handle is found
QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some("no proving task is running".to_string()),
..Default::default()
}
}
}
static GLOBAL_ASSET_URLS: LazyLock<HashMap<String, HashMap<String, url::Url>>> =
LazyLock::new(|| {
const ASSETS_JSON: &str = include_str!("../../assets_url_preset.json");
serde_json::from_str(ASSETS_JSON).expect("Failed to parse assets_url_preset.json")
});
impl LocalProver {
pub fn new(mut config: LocalProverConfig) -> Self {
for (fork_name, circuit_config) in config.circuits.iter_mut() {
// validate each base url
circuit_config.location_data.validate().unwrap();
let mut template_url_mapping = GLOBAL_ASSET_URLS
.get(&fork_name.to_lowercase())
.cloned()
.unwrap_or_default();
// apply default settings in template
for (key, url) in circuit_config.location_data.asset_detours.drain() {
template_url_mapping.insert(key, url);
}
circuit_config.location_data.asset_detours = template_url_mapping;
// validate each detours url
for url in circuit_config.location_data.asset_detours.values() {
assert!(
url.path().ends_with('/'),
"url {} must be end with /",
url.as_str()
);
}
}
Self {
config,
next_task_id: 0,
current_task: None,
handlers: HashMap::new(),
}
}
async fn do_prove(&mut self, req: ProveRequest) -> Result<ProveResponse> {
self.next_task_id += 1;
let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
let created_at = duration.as_secs() as f64 + duration.subsec_nanos() as f64 * 1e-9;
let prover_task = UniversalHandler::get_task_from_input(&req.input)?;
let is_openvm_13 = prover_task.use_openvm_13;
let prover_task: ProvingTask = prover_task.into();
let vk = hex::encode(&prover_task.vk);
let handler = if let Some(handler) = self.handlers.get(&vk) {
handler.clone()
} else {
let base_config = self
.config
.circuits
.get(&req.hard_fork_name)
.ok_or_else(|| {
eyre::eyre!(
"coordinator sent unexpected forkname {}",
req.hard_fork_name
)
})?;
let url_base = if let Some(url) = base_config.location_data.asset_detours.get(&vk) {
url.clone()
} else {
base_config
.location_data
.gen_asset_url(&vk, req.proof_type)?
};
let asset_path = base_config
.location_data
.get_asset(&vk, &url_base, &base_config.workspace_path)
.await?;
let circuits_handler = Arc::new(Mutex::new(UniversalHandler::new(
&asset_path,
is_openvm_13,
)?));
self.handlers.insert(vk, circuits_handler.clone());
circuits_handler
};
let handle = Handle::current();
let is_evm = req.proof_type == ProofType::Bundle;
let task_handle = tokio::task::spawn_blocking(move || {
handle.block_on(handler.get_proof_data(&prover_task, is_evm))
});
self.current_task = Some(task_handle);
Ok(ProveResponse {
task_id: self.next_task_id.to_string(),
proof_type: req.proof_type,
circuit_version: req.circuit_version,
hard_fork_name: req.hard_fork_name,
status: TaskStatus::Proving,
created_at,
input: Some(req.input),
..Default::default()
})
}
}

View File

@@ -9,13 +9,14 @@ import (
"github.com/pressly/goose/v3"
)
//go:embed migrations/*.sql
//go:embed migrations
var embedMigrations embed.FS
// MigrationsDir migration dir
const MigrationsDir string = "migrations"
func init() {
// note goose ignore ono-sql files by default so we do not need to specify *.sql
goose.SetBaseFS(embedMigrations)
goose.SetSequential(true)
goose.SetTableName("scroll_migrations")
@@ -24,6 +25,41 @@ func init() {
goose.SetVerbose(verbose)
}
// MigrateModule migrate db used by other module with specified goose TableName
// sql file for that module must be put as a sub-directory under `MigrationsDir`
func MigrateModule(db *sql.DB, moduleName string) error {
goose.SetTableName(moduleName + "_migrations")
defer func() {
goose.SetTableName("scroll_migrations")
}()
return goose.Up(db, MigrationsDir+"/"+moduleName, goose.WithAllowMissing())
}
// RollbackModule rollback the specified module to the given version
func RollbackModule(db *sql.DB, moduleName string, version *int64) error {
goose.SetTableName(moduleName + "_migrations")
defer func() {
goose.SetTableName("scroll_migrations")
}()
moduleDir := MigrationsDir + "/" + moduleName
if version != nil {
return goose.DownTo(db, moduleDir, *version)
}
return goose.Down(db, moduleDir)
}
// ResetModuleDB clean and migrate db for a module.
func ResetModuleDB(db *sql.DB, moduleName string) error {
if err := RollbackModule(db, moduleName, new(int64)); err != nil {
return err
}
return MigrateModule(db, moduleName)
}
// Migrate migrate db
func Migrate(db *sql.DB) error {
//return goose.Up(db, MIGRATIONS_DIR, goose.WithAllowMissing())

View File

@@ -0,0 +1,30 @@
-- +goose Up
-- +goose StatementBegin
create table prover_sessions
(
public_key TEXT NOT NULL,
upstream TEXT NOT NULL,
up_token TEXT NOT NULL,
expired TIMESTAMP(0) NOT NULL,
constraint uk_prover_sessions_public_key_upstream unique (public_key, upstream)
);
create index idx_prover_sessions_expired on prover_sessions (expired);
create table priority_upstream
(
public_key TEXT NOT NULL,
upstream TEXT NOT NULL,
update_time TIMESTAMP(0) NOT NULL DEFAULT now()
);
create unique index idx_priority_upstream_public_key on priority_upstream (public_key);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop index if exists idx_prover_sessions_expired;
drop table if exists prover_sessions;
drop table if exists priority_upstream;
-- +goose StatementEnd

View File

@@ -1,5 +1,3 @@
include ../../build/common.mk
.PHONY: clean setup_db test_tool all check_vars
include conf/.make.env
@@ -59,7 +57,6 @@ reset_db:
test_tool:
go build -o $(PWD)/build/bin/e2e_tool ../../rollup/tests/integration_tool
$(call macos_codesign,$(PWD)/build/bin/e2e_tool)
build/bin/e2e_tool: test_tool
@@ -70,4 +67,4 @@ reimport_data: reset_db import_data
coordinator_setup:
SCROLL_FORK_NAME=${SCROLL_FORK_NAME} $(MAKE) -C ../../coordinator localsetup
cp -f conf/genesis.json ../../coordinator/build/bin/conf
cp -f conf/genesis.json ../../coordinator/build/bin/conf

View File

@@ -2,34 +2,15 @@
It contains data from some blocks in a specified testnet, and helps to generate a series of chunks/batches/bundles from these blocks, filling the DB for the coordinator, so an e2e test (from chunk to bundle) can be run completely local
### Pre
1. install [goose](https://github.com/pressly/goose)
```bash
go install github.com/pressly/goose/v3/cmd/goose@latest
```
Prepare:
link the staff dir as "conf" from one of the dir with staff set, currently we have following staff sets:
- sepolia: with blocks from scroll sepolia forking, e.g. `ln -s sepolia-galileo conf`
- galileo: with blocks from scroll galileo forking
- cloak-xen: with blocks from xen sepolia, which is a cloak network
+ sepolia: with blocks from scroll sepolia
+ cloak-xen: with blocks from xen sepolia, which is a cloak network
Steps:
1. run `make all` under `tests/prover-e2e`, it would launch a postgreSql db in local docker container, which is ready to be used by coordinator (include some chunks/batches/bundles waiting to be proven)
2. setup assets by run `make coordinator_setup`, `SCROLL_ZKVM_VERSION` must be sepcified, and if we do e2e test for other forking than `Galileo`, `SCROLL_FORK_NAME` is also required, example:
```bash
SCROLL_FORK_NAME=feynman SCROLL_ZKVM_VERSION=v0.7.0 make coordinator_setup
```
2. setup assets by run `make coordinator_setup`
3. in `coordinator/build/bin/conf`, update necessary items in `config.template.json` and rename it as `config.json`
4. build and launch `coordinator_api` service locally
5. setup the `config.json` for zkvm prover to connect with the locally launched coordinator api:
- set the `sdk_config.coordinator.base_url` field into "http://localhost:8390",
6. in `zkvm-prover`, launch `make test_e2e_run`, which would specific prover run locally, connect to the local coordinator api service according to the `config.json`, and prove all tasks being injected to db in step 1.
5. setup the `config.json` for zkvm prover to connect with the locally launched coordinator api
6. in `zkvm-prover`, launch `make test_e2e_run`, which would specific prover run locally, connect to the local coordinator api service according to the `config.json`, and prove all tasks being injected to db in step 1.

View File

@@ -1,3 +1,3 @@
BEGIN_BLOCK?=15206785
END_BLOCK?=15206794
SCROLL_FORK_NAME=galileo
SCROLL_FORK_NAME=galileo

View File

@@ -1,16 +0,0 @@
FROM scrolltech/cuda-go-rust-builder:cuda-12.9.1-go-1.22.12-rust-nightly-2025-08-18 AS builder
WORKDIR /app
COPY . .
RUN cd /app/zkvm-prover && make cpu_prover
FROM debian:trixie-slim AS runtime
WORKDIR app
RUN apt-get update \
&& apt-get install -y --no-install-recommends ca-certificates \
&& update-ca-certificates \
&& rm -rf /var/lib/apt/lists/*
ENV RUST_LOG='off,scroll_proving_sdk=info,prover=info'
COPY --from=builder /app/target/release/prover ./prover
ENTRYPOINT ["./prover"]

View File

@@ -35,9 +35,6 @@ ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_VERSION}
E2E_HANDLE_SET ?= ../tests/prover-e2e/testset.json
DUMP_DIR ?= .work
cpu_prover:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --locked --release -p prover
prover:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --locked --release --features cuda -p prover
@@ -63,11 +60,8 @@ test_run:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json
test_e2e_run: ${E2E_HANDLE_SET}
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --prover.kind local --config ./config.json handle ${E2E_HANDLE_SET}
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json handle ${E2E_HANDLE_SET}
test_e2e_run_gpu: ${E2E_HANDLE_SET}
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release --features cuda -p prover -- --config ./config.json handle ${E2E_HANDLE_SET}
test_axiom_e2e_run: ${E2E_HANDLE_SET}
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --prover.kind axiom --config ./config.json handle ${E2E_HANDLE_SET}

View File

@@ -6,8 +6,7 @@
"base_url": "<the url of coordinator>",
"retry_count": 10,
"retry_wait_time_sec": 10,
"connection_timeout_sec": 1800,
"suppress_empty_task_error": false
"connection_timeout_sec": 1800
},
"prover": {
"supported_proof_types": [
@@ -15,10 +14,7 @@
2,
3
],
"circuit_version": "v0.13.1",
"n_workers": 1,
"poll_interval_sec": 20,
"randomized_delay_sec": 0
"circuit_version": "v0.13.1"
},
"health_listener_addr": "127.0.0.1:10080",
"db_path": ".work/db"
@@ -35,13 +31,6 @@
"galileoV2": {
"base_url": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/galileov2/",
"workspace_path": ".work/galileo"
}
},
"axiom_api_key": "<axiom api key>",
"axiom_programs": {
"<vk hex string>": {
"program_id": "prg_<axiom program id>",
"config_id": "cfg_<axiom config id>"
}
}
}
}
}