mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
52 Commits
coordinato
...
feat/axiom
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
42221c9b2a | ||
|
|
6b4c106eb5 | ||
|
|
43f1895ecf | ||
|
|
d169990168 | ||
|
|
d7d29f52a9 | ||
|
|
b5c398a711 | ||
|
|
5dba980cea | ||
|
|
ea275cbb8a | ||
|
|
763d2b7d7d | ||
|
|
1f263bb730 | ||
|
|
d3837daf3a | ||
|
|
69a20b610a | ||
|
|
48ecc66de8 | ||
|
|
9923856c9d | ||
|
|
612212da4d | ||
|
|
b9156ab149 | ||
|
|
056326ee3e | ||
|
|
54c9e278c8 | ||
|
|
7d6aea89aa | ||
|
|
8a05300ab3 | ||
|
|
5d378a015d | ||
|
|
88066d72e8 | ||
|
|
226d32f9bf | ||
|
|
c950ecc213 | ||
|
|
4cc4cc1064 | ||
|
|
ee8e4a39be | ||
|
|
a450fad09d | ||
|
|
3f95ffc3d7 | ||
|
|
49c0b1a844 | ||
|
|
970f8d488e | ||
|
|
bf60d16ea8 | ||
|
|
e962e713d8 | ||
|
|
785ce615d5 | ||
|
|
b64015c54d | ||
|
|
4e0573a820 | ||
|
|
4bc57bb95e | ||
|
|
4bed6845c3 | ||
|
|
b9846293b2 | ||
|
|
397f327776 | ||
|
|
fe9ce35249 | ||
|
|
4062c554a3 | ||
|
|
2b38078e02 | ||
|
|
ab6490ef35 | ||
|
|
d5f9d55075 | ||
|
|
ecfdbb342d | ||
|
|
98775e0bbb | ||
|
|
6a17d2c715 | ||
|
|
22b8ac7204 | ||
|
|
8f3346c738 | ||
|
|
2c7117ebc3 | ||
|
|
3b3bd5f9ee | ||
|
|
2d12839a8c |
1
.dockerignore
Normal file
1
.dockerignore
Normal file
@@ -0,0 +1 @@
|
||||
target/
|
||||
43
.github/workflows/docker.yml
vendored
43
.github/workflows/docker.yml
vendored
@@ -360,49 +360,6 @@ jobs:
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
|
||||
coordinator-proxy:
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: coordinator-proxy
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: coordinator-proxy
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-proxy.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
|
||||
coordinator-cron:
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
|
||||
3228
Cargo.lock
generated
3228
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
17
Cargo.toml
17
Cargo.toml
@@ -10,7 +10,7 @@ resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
authors = ["Scroll developers"]
|
||||
edition = "2021"
|
||||
edition = "2024"
|
||||
homepage = "https://scroll.io"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/scroll-tech/scroll"
|
||||
@@ -24,26 +24,19 @@ scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", tag =
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll", "rkyv"] }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2" }
|
||||
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll"] }
|
||||
axiom-sdk = { git = "https://github.com/axiom-crypto/axiom-api-cli.git", tag = "v1.0.9" }
|
||||
|
||||
metrics = "0.23.0"
|
||||
metrics-util = "0.17"
|
||||
metrics-tracing-context = "0.16.0"
|
||||
|
||||
anyhow = "1.0"
|
||||
alloy = { version = "1", default-features = false }
|
||||
alloy-primitives = { version = "1.4.1", default-features = false, features = ["tiny-keccak"] }
|
||||
# also use this to trigger "serde" feature for primitives
|
||||
alloy-serde = { version = "1", default-features = false }
|
||||
|
||||
jiff = "0.2"
|
||||
serde = { version = "1", default-features = false, features = ["derive"] }
|
||||
serde_json = { version = "1.0" }
|
||||
serde_derive = "1.0"
|
||||
serde_with = "3"
|
||||
itertools = "0.14"
|
||||
tiny-keccak = "2.0"
|
||||
tokio = "1"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
eyre = "0.6"
|
||||
once_cell = "1.20"
|
||||
base64 = "0.22"
|
||||
|
||||
|
||||
|
||||
16
build/common.mk
Normal file
16
build/common.mk
Normal file
@@ -0,0 +1,16 @@
|
||||
UNAME_S := $(shell uname -s)
|
||||
IS_DARWIN := $(findstring Darwin,$(UNAME_S))
|
||||
|
||||
SHLIB_EXT := so
|
||||
ifeq ($(UNAME_S),Darwin)
|
||||
SHLIB_EXT := dylib
|
||||
endif
|
||||
|
||||
LIB_ZKP_NAME := libzkp.$(SHLIB_EXT)
|
||||
|
||||
define macos_codesign
|
||||
@if [ -n "$(IS_DARWIN)" ]; then \
|
||||
codesign --force --sign - '$(1)'; \
|
||||
codesign --verify --deep --verbose '$(1)'; \
|
||||
fi
|
||||
endef
|
||||
@@ -1,26 +0,0 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as base
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
|
||||
# Build coordinator proxy
|
||||
FROM base as builder
|
||||
COPY . .
|
||||
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_proxy && mv ./build/bin/coordinator_proxy /bin/coordinator_proxy
|
||||
|
||||
# Pull coordinator proxy into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl jq -y
|
||||
COPY --from=builder /bin/coordinator_proxy /bin/
|
||||
RUN /bin/coordinator_proxy --version
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["/bin/coordinator_proxy"]
|
||||
@@ -1,8 +0,0 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -12,7 +12,6 @@ require (
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/mattn/go-isatty v0.0.20
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
@@ -148,6 +147,7 @@ require (
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
// Response the response schema
|
||||
@@ -14,19 +13,6 @@ type Response struct {
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
|
||||
func (resp *Response) DecodeData(out interface{}) error {
|
||||
// Decode generically unmarshaled JSON (map[string]any, []any) into a typed struct
|
||||
// honoring `json` tags and allowing weak type conversions.
|
||||
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
||||
TagName: "json",
|
||||
Result: out,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dec.Decode(resp.Data)
|
||||
}
|
||||
|
||||
// RenderJSON renders response with json
|
||||
func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) {
|
||||
var errMsg string
|
||||
|
||||
3
coordinator/.gitignore
vendored
3
coordinator/.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
/build/bin
|
||||
.idea
|
||||
internal/logic/verifier/lib
|
||||
internal/libzkp/lib/libzkp.so
|
||||
libzkp.so
|
||||
libzkp.dylib
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
include ../build/common.mk
|
||||
|
||||
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator libzkp
|
||||
|
||||
IMAGE_VERSION=latest
|
||||
REPO_ROOT_DIR=./..
|
||||
LIBZKP_PATH=./internal/logic/libzkp/lib/libzkp.so
|
||||
LIBZKP_PATH=./internal/logic/libzkp/lib/$(LIB_ZKP_NAME)
|
||||
|
||||
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
|
||||
ZKVM_VERSION=$(shell grep -m 1 "zkvm-prover?" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
@@ -27,6 +29,7 @@ libzkp: clean_libzkp $(LIBZKP_PATH)
|
||||
|
||||
coordinator_api: $(LIBZKP_PATH) ## Builds the Coordinator api instance.
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
$(call macos_codesign,$(PWD)/build/bin/coordinator_api)
|
||||
|
||||
coordinator_cron:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron
|
||||
@@ -34,10 +37,6 @@ coordinator_cron:
|
||||
coordinator_tool:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool
|
||||
|
||||
coordinator_proxy:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_proxy ./cmd/proxy
|
||||
|
||||
|
||||
localsetup: coordinator_api ## Local setup: build coordinator_api, copy config, and setup releases
|
||||
mkdir -p build/bin/conf
|
||||
@echo "Copying configuration files..."
|
||||
@@ -50,6 +49,8 @@ localsetup: coordinator_api ## Local setup: build coordinator_api, copy config,
|
||||
@echo "Setting up releases..."
|
||||
cd $(CURDIR)/build && bash setup_releases.sh
|
||||
|
||||
run_coordinator_api: coordinator_api
|
||||
cd build/bin && ./coordinator_api
|
||||
|
||||
#coordinator_api_skip_libzkp:
|
||||
# go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
|
||||
@@ -10,6 +10,8 @@ See [monorepo prerequisites](../README.md#prerequisites).
|
||||
|
||||
## Build
|
||||
|
||||
Using Go version 1.22
|
||||
|
||||
```bash
|
||||
make clean
|
||||
make coordinator_api
|
||||
|
||||
@@ -64,9 +64,10 @@ for ((i=0; i<$VERIFIER_COUNT; i++)); do
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/verifier.bin -O ${ASSET_DIR}/verifier.bin
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/root_verifier_vk -O ${ASSET_DIR}/root_verifier_vk
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/openVmVk.json -O ${ASSET_DIR}/openVmVk.json
|
||||
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/axiom_program_ids.json -O ${ASSET_DIR}/axiom_program_ids.json
|
||||
|
||||
echo "Completed downloading assets for $FORK_NAME"
|
||||
echo "---"
|
||||
done
|
||||
|
||||
echo "All verifier assets downloaded successfully"
|
||||
echo "All verifier assets downloaded successfully"
|
||||
|
||||
@@ -1,122 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/observability"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/proxy"
|
||||
"scroll-tech/coordinator/internal/route"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
// Set up coordinator app info.
|
||||
app = cli.NewApp()
|
||||
app.Action = action
|
||||
app.Name = "coordinator proxy"
|
||||
app.Usage = "Proxy for multiple Scroll L2 Coordinators"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, apiFlags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
// Register `coordinator-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, utils.CoordinatorAPIApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewProxyConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
var db *gorm.DB
|
||||
if dbCfg := cfg.ProxyManager.DB; dbCfg != nil {
|
||||
log.Info("Apply persistent storage", "via", cfg.ProxyManager.DB.DSN)
|
||||
db, err = database.InitDB(cfg.ProxyManager.DB)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
log.Error("can not close db connection", "error", err)
|
||||
}
|
||||
}()
|
||||
observability.Server(ctx, db)
|
||||
}
|
||||
registry := prometheus.DefaultRegisterer
|
||||
|
||||
apiSrv := server(ctx, cfg, db, registry)
|
||||
|
||||
log.Info(
|
||||
"Start coordinator api successfully.",
|
||||
"version", version.Version,
|
||||
)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
log.Info("start shutdown coordinator proxy server ...")
|
||||
|
||||
closeCtx, cancelExit := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancelExit()
|
||||
if err = apiSrv.Shutdown(closeCtx); err != nil {
|
||||
log.Warn("shutdown coordinator proxy server failure", "error", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
<-closeCtx.Done()
|
||||
log.Info("coordinator proxy server exiting success")
|
||||
return nil
|
||||
}
|
||||
|
||||
func server(ctx *cli.Context, cfg *config.ProxyConfig, db *gorm.DB, reg prometheus.Registerer) *http.Server {
|
||||
router := gin.New()
|
||||
proxy.InitController(cfg, db, reg)
|
||||
route.ProxyRoute(router, cfg, reg)
|
||||
port := ctx.String(httpPortFlag.Name)
|
||||
srv := &http.Server{
|
||||
Addr: fmt.Sprintf(":%s", port),
|
||||
Handler: router,
|
||||
ReadHeaderTimeout: time.Minute,
|
||||
}
|
||||
|
||||
go func() {
|
||||
if runServerErr := srv.ListenAndServe(); runServerErr != nil && !errors.Is(runServerErr, http.ErrServerClosed) {
|
||||
log.Crit("run coordinator proxy http server failure", "error", runServerErr)
|
||||
}
|
||||
}()
|
||||
return srv
|
||||
}
|
||||
|
||||
// Run coordinator.
|
||||
func Run() {
|
||||
// RunApp the coordinator.
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package app
|
||||
|
||||
import "github.com/urfave/cli/v2"
|
||||
|
||||
var (
|
||||
apiFlags = []cli.Flag{
|
||||
// http flags
|
||||
&httpEnabledFlag,
|
||||
&httpListenAddrFlag,
|
||||
&httpPortFlag,
|
||||
}
|
||||
// httpEnabledFlag enable rpc server.
|
||||
httpEnabledFlag = cli.BoolFlag{
|
||||
Name: "http",
|
||||
Usage: "Enable the HTTP-RPC server",
|
||||
Value: false,
|
||||
}
|
||||
// httpListenAddrFlag set the http address.
|
||||
httpListenAddrFlag = cli.StringFlag{
|
||||
Name: "http.addr",
|
||||
Usage: "HTTP-RPC server listening interface",
|
||||
Value: "localhost",
|
||||
}
|
||||
// httpPortFlag set http.port.
|
||||
httpPortFlag = cli.IntFlag{
|
||||
Name: "http.port",
|
||||
Usage: "HTTP-RPC server listening port",
|
||||
Value: 8590,
|
||||
}
|
||||
)
|
||||
@@ -1,7 +0,0 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/coordinator/cmd/proxy/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
{
|
||||
"proxy_manager": {
|
||||
"proxy_cli": {
|
||||
"proxy_name": "proxy_name",
|
||||
"secret": "client private key"
|
||||
},
|
||||
"auth": {
|
||||
"secret": "proxy secret key",
|
||||
"challenge_expire_duration_sec": 3600,
|
||||
"login_expire_duration_sec": 3600
|
||||
},
|
||||
"verifier": {
|
||||
"min_prover_version": "v4.4.45",
|
||||
"verifiers": []
|
||||
},
|
||||
"db": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://localhost/scroll?sslmode=disable",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
}
|
||||
},
|
||||
"coordinators": {
|
||||
"sepolia": {
|
||||
"base_url": "http://localhost:8555",
|
||||
"retry_count": 10,
|
||||
"retry_wait_time_sec": 10,
|
||||
"connection_timeout_sec": 30
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// Proxy loads proxy configuration items.
|
||||
type ProxyManager struct {
|
||||
// Zk verifier config help to confine the connected prover.
|
||||
Verifier *VerifierConfig `json:"verifier"`
|
||||
Client *ProxyClient `json:"proxy_cli"`
|
||||
Auth *Auth `json:"auth"`
|
||||
DB *database.Config `json:"db,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ProxyManager) Normalize() {
|
||||
if m.Client.Secret == "" {
|
||||
m.Client.Secret = m.Auth.Secret
|
||||
}
|
||||
|
||||
if m.Client.ProxyVersion == "" {
|
||||
m.Client.ProxyVersion = m.Verifier.MinProverVersion
|
||||
}
|
||||
}
|
||||
|
||||
// Proxy client configuration for connect to upstream as a client
|
||||
type ProxyClient struct {
|
||||
ProxyName string `json:"proxy_name"`
|
||||
ProxyVersion string `json:"proxy_version,omitempty"`
|
||||
Secret string `json:"secret,omitempty"`
|
||||
}
|
||||
|
||||
// Coordinator configuration
|
||||
type UpStream struct {
|
||||
BaseUrl string `json:"base_url"`
|
||||
RetryCount uint `json:"retry_count"`
|
||||
RetryWaitTime uint `json:"retry_wait_time_sec"`
|
||||
ConnectionTimeoutSec uint `json:"connection_timeout_sec"`
|
||||
CompatibileMode bool `json:"compatible_mode,omitempty"`
|
||||
}
|
||||
|
||||
// Config load configuration items.
|
||||
type ProxyConfig struct {
|
||||
ProxyManager *ProxyManager `json:"proxy_manager"`
|
||||
ProxyName string `json:"proxy_name"`
|
||||
Coordinators map[string]*UpStream `json:"coordinators"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new instance of Config.
|
||||
func NewProxyConfig(file string) (*ProxyConfig, error) {
|
||||
buf, err := os.ReadFile(filepath.Clean(file))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &ProxyConfig{}
|
||||
err = json.Unmarshal(buf, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Override config with environment variables
|
||||
err = utils.OverrideConfigWithEnv(cfg, "SCROLL_COORDINATOR_PROXY")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
@@ -19,56 +19,28 @@ type AuthController struct {
|
||||
loginLogic *auth.LoginLogic
|
||||
}
|
||||
|
||||
func NewAuthControllerWithLogic(loginLogic *auth.LoginLogic) *AuthController {
|
||||
return &AuthController{
|
||||
loginLogic: loginLogic,
|
||||
}
|
||||
}
|
||||
|
||||
// NewAuthController returns an LoginController instance
|
||||
func NewAuthController(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *AuthController {
|
||||
return &AuthController{
|
||||
loginLogic: auth.NewLoginLogic(db, cfg.ProverManager.Verifier, vf),
|
||||
loginLogic: auth.NewLoginLogic(db, cfg, vf),
|
||||
}
|
||||
}
|
||||
|
||||
// Login the api controller for login, used as the Authenticator in JWT
|
||||
// It can work in two mode: full process for normal login, or if login request
|
||||
// is posted from proxy, run a simpler process to login a client
|
||||
// Login the api controller for login
|
||||
func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
|
||||
|
||||
// check if the login is post by proxy
|
||||
var viaProxy bool
|
||||
if proverType, proverTypeExist := c.Get(types.ProverProviderTypeKey); proverTypeExist {
|
||||
proverType := uint8(proverType.(float64))
|
||||
viaProxy = proverType == types.ProverProviderTypeProxy
|
||||
}
|
||||
|
||||
var login types.LoginParameter
|
||||
if err := c.ShouldBind(&login); err != nil {
|
||||
return "", fmt.Errorf("missing the public_key, err:%w", err)
|
||||
}
|
||||
|
||||
// if not, process with normal login
|
||||
if !viaProxy {
|
||||
// check login parameter's token is equal to bearer token, the Authorization must be existed
|
||||
// if not exist, the jwt token will intercept it
|
||||
brearToken := c.GetHeader("Authorization")
|
||||
if brearToken != "Bearer "+login.Message.Challenge {
|
||||
return "", errors.New("check challenge failure for the not equal challenge string")
|
||||
}
|
||||
|
||||
if err := auth.VerifyMsg(&login); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// check the challenge is used, if used, return failure
|
||||
if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil {
|
||||
return "", fmt.Errorf("login insert challenge string failure:%w", err)
|
||||
}
|
||||
// check login parameter's token is equal to bearer token, the Authorization must be existed
|
||||
// if not exist, the jwt token will intercept it
|
||||
brearToken := c.GetHeader("Authorization")
|
||||
if brearToken != "Bearer "+login.Message.Challenge {
|
||||
return "", errors.New("check challenge failure for the not equal challenge string")
|
||||
}
|
||||
|
||||
if err := a.loginLogic.CompatiblityCheck(&login); err != nil {
|
||||
if err := a.loginLogic.Check(&login); err != nil {
|
||||
return "", fmt.Errorf("check the login parameter failure: %w", err)
|
||||
}
|
||||
|
||||
@@ -77,6 +49,11 @@ func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
|
||||
return "", fmt.Errorf("prover hard fork name failure:%w", err)
|
||||
}
|
||||
|
||||
// check the challenge is used, if used, return failure
|
||||
if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil {
|
||||
return "", fmt.Errorf("login insert challenge string failure:%w", err)
|
||||
}
|
||||
|
||||
returnData := types.LoginParameterWithHardForkName{
|
||||
HardForkName: hardForkNames,
|
||||
LoginParameter: login,
|
||||
@@ -108,6 +85,10 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
c.Set(types.ProverName, proverName)
|
||||
}
|
||||
|
||||
if publicKey, ok := claims[types.PublicKey]; ok {
|
||||
c.Set(types.PublicKey, publicKey)
|
||||
}
|
||||
|
||||
if proverVersion, ok := claims[types.ProverVersion]; ok {
|
||||
c.Set(types.ProverVersion, proverVersion)
|
||||
}
|
||||
@@ -120,9 +101,5 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
c.Set(types.ProverProviderTypeKey, providerType)
|
||||
}
|
||||
|
||||
if publicKey, ok := claims[types.PublicKey]; ok {
|
||||
return publicKey
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,150 +0,0 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
jwt "github.com/appleboy/gin-jwt/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
"scroll-tech/coordinator/internal/logic/auth"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// AuthController is login API
|
||||
type AuthController struct {
|
||||
apiLogin *api.AuthController
|
||||
clients Clients
|
||||
proverMgr *ProverManager
|
||||
}
|
||||
|
||||
const upstreamConnTimeout = time.Second * 5
|
||||
const LoginParamCache = "login_param"
|
||||
const ProverTypesKey = "prover_types"
|
||||
const SignatureKey = "prover_signature"
|
||||
|
||||
// NewAuthController returns an LoginController instance
|
||||
func NewAuthController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager) *AuthController {
|
||||
|
||||
// use a dummy Verifier to create login logic (we do not use any information in verifier)
|
||||
dummyVf := verifier.Verifier{
|
||||
OpenVMVkMap: make(map[string]struct{}),
|
||||
}
|
||||
loginLogic := auth.NewLoginLogicWithSimpleDeduplicator(cfg.ProxyManager.Verifier, &dummyVf)
|
||||
|
||||
authController := &AuthController{
|
||||
apiLogin: api.NewAuthControllerWithLogic(loginLogic),
|
||||
clients: clients,
|
||||
proverMgr: proverMgr,
|
||||
}
|
||||
|
||||
return authController
|
||||
}
|
||||
|
||||
// Login extended the Login hander in api controller
|
||||
func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
|
||||
|
||||
loginRes, err := a.apiLogin.Login(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
loginParam := loginRes.(types.LoginParameterWithHardForkName)
|
||||
|
||||
if loginParam.LoginParameter.Message.ProverProviderType == types.ProverProviderTypeProxy {
|
||||
return nil, fmt.Errorf("proxy do not support recursive login")
|
||||
}
|
||||
|
||||
session := a.proverMgr.GetOrCreate(loginParam.PublicKey)
|
||||
log.Debug("start handling login", "cli", loginParam.Message.ProverName)
|
||||
|
||||
loginCtx, cf := context.WithTimeout(context.Background(), upstreamConnTimeout)
|
||||
var wg sync.WaitGroup
|
||||
for _, cli := range a.clients {
|
||||
wg.Add(1)
|
||||
go func(cli Client) {
|
||||
defer wg.Done()
|
||||
if err := session.ProxyLogin(loginCtx, cli, &loginParam.LoginParameter); err != nil {
|
||||
log.Error("proxy login failed during token cache update",
|
||||
"userKey", loginParam.PublicKey,
|
||||
"upstream", cli.Name(),
|
||||
"error", err)
|
||||
}
|
||||
}(cli)
|
||||
}
|
||||
go func(cliName string) {
|
||||
wg.Wait()
|
||||
cf()
|
||||
log.Debug("first login attempt has completed", "cli", cliName)
|
||||
}(loginParam.Message.ProverName)
|
||||
|
||||
return loginParam.LoginParameter, nil
|
||||
}
|
||||
|
||||
// PayloadFunc returns jwt.MapClaims with {public key, prover name}.
|
||||
func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
|
||||
v, ok := data.(types.LoginParameter)
|
||||
if !ok {
|
||||
log.Error("PayloadFunc received unexpected type", "type", fmt.Sprintf("%T", data))
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
return jwt.MapClaims{
|
||||
types.PublicKey: v.PublicKey,
|
||||
types.ProverName: v.Message.ProverName,
|
||||
types.ProverVersion: v.Message.ProverVersion,
|
||||
types.ProverProviderTypeKey: v.Message.ProverProviderType,
|
||||
SignatureKey: v.Signature,
|
||||
ProverTypesKey: v.Message.ProverTypes,
|
||||
}
|
||||
}
|
||||
|
||||
// IdentityHandler replies to client for /login
|
||||
func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
claims := jwt.ExtractClaims(c)
|
||||
loginParam := &types.LoginParameter{}
|
||||
|
||||
if proverName, ok := claims[types.ProverName]; ok {
|
||||
loginParam.Message.ProverName, _ = proverName.(string)
|
||||
}
|
||||
|
||||
if proverVersion, ok := claims[types.ProverVersion]; ok {
|
||||
loginParam.Message.ProverVersion, _ = proverVersion.(string)
|
||||
}
|
||||
|
||||
if providerType, ok := claims[types.ProverProviderTypeKey]; ok {
|
||||
num, _ := providerType.(float64)
|
||||
loginParam.Message.ProverProviderType = types.ProverProviderType(num)
|
||||
}
|
||||
|
||||
if signature, ok := claims[SignatureKey]; ok {
|
||||
loginParam.Signature, _ = signature.(string)
|
||||
}
|
||||
|
||||
if proverTypes, ok := claims[ProverTypesKey]; ok {
|
||||
arr, _ := proverTypes.([]any)
|
||||
for _, elm := range arr {
|
||||
num, _ := elm.(float64)
|
||||
loginParam.Message.ProverTypes = append(loginParam.Message.ProverTypes, types.ProverType(num))
|
||||
}
|
||||
}
|
||||
|
||||
if publicKey, ok := claims[types.PublicKey]; ok {
|
||||
loginParam.PublicKey, _ = publicKey.(string)
|
||||
}
|
||||
|
||||
if loginParam.PublicKey != "" {
|
||||
|
||||
c.Set(LoginParamCache, loginParam)
|
||||
c.Set(types.ProverName, loginParam.Message.ProverName)
|
||||
// publickey will also be set since we have specified public_key as identical key
|
||||
return loginParam.PublicKey
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,246 +0,0 @@
|
||||
//nolint:errcheck,bodyclose // body is closed in the following handleHttpResp call
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
|
||||
ctypes "scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
type ProxyCli interface {
|
||||
Login(ctx context.Context, genLogin func(string) (*types.LoginParameter, error)) (*ctypes.Response, error)
|
||||
ProxyLogin(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error)
|
||||
Token() string
|
||||
Reset()
|
||||
}
|
||||
|
||||
type ProverCli interface {
|
||||
GetTask(ctx context.Context, param *types.GetTaskParameter) (*ctypes.Response, error)
|
||||
SubmitProof(ctx context.Context, param *types.SubmitProofParameter) (*ctypes.Response, error)
|
||||
}
|
||||
|
||||
// Client wraps an http client with a preset host for coordinator API calls
|
||||
type upClient struct {
|
||||
httpClient *http.Client
|
||||
baseURL string
|
||||
loginToken string
|
||||
compatibileMode bool
|
||||
resetFromMgr func()
|
||||
}
|
||||
|
||||
// NewClient creates a new Client with the specified host
|
||||
func newUpClient(cfg *config.UpStream) *upClient {
|
||||
return &upClient{
|
||||
httpClient: &http.Client{
|
||||
Timeout: time.Duration(cfg.ConnectionTimeoutSec) * time.Second,
|
||||
},
|
||||
baseURL: cfg.BaseUrl,
|
||||
compatibileMode: cfg.CompatibileMode,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *upClient) Reset() {
|
||||
if c.resetFromMgr != nil {
|
||||
c.resetFromMgr()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *upClient) Token() string {
|
||||
return c.loginToken
|
||||
}
|
||||
|
||||
// need a parsable schema definition
|
||||
type loginSchema struct {
|
||||
Time string `json:"time"`
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
// Login performs the complete login process: get challenge then login
|
||||
func (c *upClient) Login(ctx context.Context, genLogin func(string) (*types.LoginParameter, error)) (*ctypes.Response, error) {
|
||||
// Step 1: Get challenge
|
||||
url := fmt.Sprintf("%s/coordinator/v1/challenge", c.baseURL)
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create challenge request: %w", err)
|
||||
}
|
||||
|
||||
challengeResp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get challenge: %w", err)
|
||||
}
|
||||
|
||||
parsedResp, err := handleHttpResp(challengeResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if parsedResp.ErrCode != 0 {
|
||||
return nil, fmt.Errorf("challenge failed: %d (%s)", parsedResp.ErrCode, parsedResp.ErrMsg)
|
||||
}
|
||||
|
||||
// Ste p2: Parse challenge response
|
||||
var challengeSchema loginSchema
|
||||
if err := parsedResp.DecodeData(&challengeSchema); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse challenge response: %w", err)
|
||||
}
|
||||
|
||||
// Step 3: Use the token from challenge as Bearer token for login
|
||||
url = fmt.Sprintf("%s/coordinator/v1/login", c.baseURL)
|
||||
|
||||
param, err := genLogin(challengeSchema.Token)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to setup login parameter: %w", err)
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(param)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal login parameter: %w", err)
|
||||
}
|
||||
|
||||
req, err = http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create login request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "Bearer "+challengeSchema.Token)
|
||||
|
||||
loginResp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to perform login request: %w", err)
|
||||
}
|
||||
return handleHttpResp(loginResp)
|
||||
}
|
||||
|
||||
func handleHttpResp(resp *http.Response) (*ctypes.Response, error) {
|
||||
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusUnauthorized {
|
||||
defer resp.Body.Close()
|
||||
var respWithData ctypes.Response
|
||||
// Note: Body is consumed after decoding, caller should not read it again
|
||||
if err := json.NewDecoder(resp.Body).Decode(&respWithData); err == nil {
|
||||
return &respWithData, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("login parsing expected response failed: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
return nil, fmt.Errorf("login request failed with status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
func (c *upClient) proxyLoginCompatibleMode(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error) {
|
||||
mimePrivK, err := buildPrivateKey([]byte(param.PublicKey))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mimePkHex := common.Bytes2Hex(crypto.CompressPubkey(&mimePrivK.PublicKey))
|
||||
|
||||
genLoginParam := func(challenge string) (*types.LoginParameter, error) {
|
||||
|
||||
// Create login parameter with proxy settings
|
||||
loginParam := &types.LoginParameter{
|
||||
Message: param.Message,
|
||||
PublicKey: mimePkHex,
|
||||
}
|
||||
loginParam.Message.Challenge = challenge
|
||||
|
||||
// Sign the message with the private key
|
||||
if err := loginParam.SignWithKey(mimePrivK); err != nil {
|
||||
return nil, fmt.Errorf("failed to sign login parameter: %w", err)
|
||||
}
|
||||
|
||||
return loginParam, nil
|
||||
}
|
||||
|
||||
return c.Login(ctx, genLoginParam)
|
||||
}
|
||||
|
||||
// ProxyLogin makes a POST request to /v1/proxy_login with LoginParameter
|
||||
func (c *upClient) ProxyLogin(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error) {
|
||||
|
||||
if c.compatibileMode {
|
||||
return c.proxyLoginCompatibleMode(ctx, param)
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/coordinator/v1/proxy_login", c.baseURL)
|
||||
|
||||
jsonData, err := json.Marshal(param)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal proxy login parameter: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create proxy login request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "Bearer "+c.loginToken)
|
||||
|
||||
proxyLoginResp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to perform proxy login request: %w", err)
|
||||
}
|
||||
return handleHttpResp(proxyLoginResp)
|
||||
}
|
||||
|
||||
// GetTask makes a POST request to /v1/get_task with GetTaskParameter
|
||||
func (c *upClient) GetTask(ctx context.Context, param *types.GetTaskParameter) (*ctypes.Response, error) {
|
||||
url := fmt.Sprintf("%s/coordinator/v1/get_task", c.baseURL)
|
||||
|
||||
jsonData, err := json.Marshal(param)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal get task parameter: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create get task request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
if c.loginToken != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+c.loginToken)
|
||||
}
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return handleHttpResp(resp)
|
||||
}
|
||||
|
||||
// SubmitProof makes a POST request to /v1/submit_proof with SubmitProofParameter
|
||||
func (c *upClient) SubmitProof(ctx context.Context, param *types.SubmitProofParameter) (*ctypes.Response, error) {
|
||||
url := fmt.Sprintf("%s/coordinator/v1/submit_proof", c.baseURL)
|
||||
|
||||
jsonData, err := json.Marshal(param)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal submit proof parameter: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create submit proof request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
if c.loginToken != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+c.loginToken)
|
||||
}
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return handleHttpResp(resp)
|
||||
}
|
||||
@@ -1,220 +0,0 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
// a client to access upstream coordinator with specified identity
|
||||
// so prover can contact with coordinator as itself
|
||||
Client(string) ProverCli
|
||||
// the client to access upstream as proxy itself
|
||||
ClientAsProxy(context.Context) ProxyCli
|
||||
Name() string
|
||||
}
|
||||
|
||||
type ClientManager struct {
|
||||
name string
|
||||
cliCfg *config.ProxyClient
|
||||
cfg *config.UpStream
|
||||
privKey *ecdsa.PrivateKey
|
||||
|
||||
cachedCli struct {
|
||||
sync.RWMutex
|
||||
cli *upClient
|
||||
completionCtx context.Context
|
||||
}
|
||||
}
|
||||
|
||||
// transformToValidPrivateKey safely transforms arbitrary bytes into valid private key bytes
|
||||
func buildPrivateKey(inputBytes []byte) (*ecdsa.PrivateKey, error) {
|
||||
// Try appending bytes from 0x0 to 0x20 until we get a valid private key
|
||||
for appendByte := byte(0x0); appendByte <= 0x20; appendByte++ {
|
||||
// Append the byte to input
|
||||
extendedBytes := append(inputBytes, appendByte)
|
||||
|
||||
// Calculate 256-bit hash
|
||||
hash := crypto.Keccak256(extendedBytes)
|
||||
|
||||
// Try to create private key from hash
|
||||
if k, err := crypto.ToECDSA(hash); err == nil {
|
||||
return k, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to generate valid private key from input bytes")
|
||||
}
|
||||
|
||||
func NewClientManager(name string, cliCfg *config.ProxyClient, cfg *config.UpStream) (*ClientManager, error) {
|
||||
|
||||
log.Info("init client", "name", name, "upcfg", cfg.BaseUrl, "compatible mode", cfg.CompatibileMode)
|
||||
privKey, err := buildPrivateKey([]byte(cliCfg.Secret))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ClientManager{
|
||||
name: name,
|
||||
privKey: privKey,
|
||||
cfg: cfg,
|
||||
cliCfg: cliCfg,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type ctxKeyType string
|
||||
|
||||
const loginCliKey ctxKeyType = "cli"
|
||||
|
||||
func (cliMgr *ClientManager) doLogin(ctx context.Context, loginCli *upClient) {
|
||||
if cliMgr.cfg.CompatibileMode {
|
||||
loginCli.loginToken = "dummy"
|
||||
log.Info("Skip login process for compatible mode")
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate wait time between 2 seconds and cfg.RetryWaitTime
|
||||
minWait := 2 * time.Second
|
||||
waitDuration := time.Duration(cliMgr.cfg.RetryWaitTime) * time.Second
|
||||
if waitDuration < minWait {
|
||||
waitDuration = minWait
|
||||
}
|
||||
|
||||
for {
|
||||
log.Info("proxy attempting login to upstream coordinator", "name", cliMgr.name)
|
||||
loginResp, err := loginCli.Login(ctx, cliMgr.genLoginParam)
|
||||
if err == nil && loginResp.ErrCode == 0 {
|
||||
var loginResult loginSchema
|
||||
err = loginResp.DecodeData(&loginResult)
|
||||
if err != nil {
|
||||
log.Error("login parsing data fail", "error", err)
|
||||
} else {
|
||||
loginCli.loginToken = loginResult.Token
|
||||
log.Info("login to upstream coordinator successful", "name", cliMgr.name, "time", loginResult.Time)
|
||||
// TODO: we need to parse time if we start making use of it
|
||||
return
|
||||
}
|
||||
} else if err != nil {
|
||||
log.Error("login process fail", "error", err)
|
||||
} else {
|
||||
log.Error("login get fail resp", "code", loginResp.ErrCode, "msg", loginResp.ErrMsg)
|
||||
}
|
||||
|
||||
log.Info("login to upstream coordinator failed, retrying", "name", cliMgr.name, "error", err, "waitDuration", waitDuration)
|
||||
|
||||
timer := time.NewTimer(waitDuration)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
case <-timer.C:
|
||||
// Continue to next retry
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cliMgr *ClientManager) Name() string {
|
||||
return cliMgr.name
|
||||
}
|
||||
|
||||
func (cliMgr *ClientManager) Client(token string) ProverCli {
|
||||
loginCli := newUpClient(cliMgr.cfg)
|
||||
loginCli.loginToken = token
|
||||
return loginCli
|
||||
}
|
||||
|
||||
func (cliMgr *ClientManager) ClientAsProxy(ctx context.Context) ProxyCli {
|
||||
cliMgr.cachedCli.RLock()
|
||||
if cliMgr.cachedCli.cli != nil {
|
||||
defer cliMgr.cachedCli.RUnlock()
|
||||
return cliMgr.cachedCli.cli
|
||||
}
|
||||
cliMgr.cachedCli.RUnlock()
|
||||
|
||||
cliMgr.cachedCli.Lock()
|
||||
if cliMgr.cachedCli.cli != nil {
|
||||
defer cliMgr.cachedCli.Unlock()
|
||||
return cliMgr.cachedCli.cli
|
||||
}
|
||||
|
||||
var completionCtx context.Context
|
||||
// Check if completion context is set
|
||||
if cliMgr.cachedCli.completionCtx != nil {
|
||||
completionCtx = cliMgr.cachedCli.completionCtx
|
||||
} else {
|
||||
// Set new completion context and launch login goroutine
|
||||
ctx, completionDone := context.WithCancel(context.TODO())
|
||||
loginCli := newUpClient(cliMgr.cfg)
|
||||
loginCli.resetFromMgr = func() {
|
||||
cliMgr.cachedCli.Lock()
|
||||
if cliMgr.cachedCli.cli == loginCli {
|
||||
log.Info("cached client cleared", "name", cliMgr.name)
|
||||
cliMgr.cachedCli.cli = nil
|
||||
}
|
||||
cliMgr.cachedCli.Unlock()
|
||||
}
|
||||
completionCtx = context.WithValue(ctx, loginCliKey, loginCli)
|
||||
cliMgr.cachedCli.completionCtx = completionCtx
|
||||
|
||||
// Launch keep-login goroutine
|
||||
go func() {
|
||||
defer completionDone()
|
||||
cliMgr.doLogin(context.Background(), loginCli)
|
||||
|
||||
cliMgr.cachedCli.Lock()
|
||||
cliMgr.cachedCli.cli = loginCli
|
||||
cliMgr.cachedCli.completionCtx = nil
|
||||
|
||||
cliMgr.cachedCli.Unlock()
|
||||
|
||||
}()
|
||||
}
|
||||
cliMgr.cachedCli.Unlock()
|
||||
|
||||
// Wait for completion or request cancellation
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-completionCtx.Done():
|
||||
cli := completionCtx.Value(loginCliKey).(*upClient)
|
||||
return cli
|
||||
}
|
||||
}
|
||||
|
||||
func (cliMgr *ClientManager) genLoginParam(challenge string) (*types.LoginParameter, error) {
|
||||
|
||||
// Generate public key string
|
||||
publicKeyHex := common.Bytes2Hex(crypto.CompressPubkey(&cliMgr.privKey.PublicKey))
|
||||
|
||||
// Create login parameter with proxy settings
|
||||
loginParam := &types.LoginParameter{
|
||||
Message: types.Message{
|
||||
Challenge: challenge,
|
||||
ProverName: cliMgr.cliCfg.ProxyName,
|
||||
ProverVersion: version.Version,
|
||||
ProverProviderType: types.ProverProviderTypeProxy,
|
||||
ProverTypes: []types.ProverType{}, // Default empty
|
||||
VKs: []string{}, // Default empty
|
||||
},
|
||||
PublicKey: publicKeyHex,
|
||||
}
|
||||
|
||||
// Sign the message with the private key
|
||||
if err := loginParam.SignWithKey(cliMgr.privKey); err != nil {
|
||||
return nil, fmt.Errorf("failed to sign login parameter: %w", err)
|
||||
}
|
||||
|
||||
return loginParam, nil
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
// GetTask the prover task controller
|
||||
GetTask *GetTaskController
|
||||
// SubmitProof the submit proof controller
|
||||
SubmitProof *SubmitProofController
|
||||
// Auth the auth controller
|
||||
Auth *AuthController
|
||||
)
|
||||
|
||||
// Clients manager a series of thread-safe clients for requesting upstream
|
||||
// coordinators
|
||||
type Clients map[string]Client
|
||||
|
||||
// InitController inits Controller with database
|
||||
func InitController(cfg *config.ProxyConfig, db *gorm.DB, reg prometheus.Registerer) {
|
||||
// normalize cfg
|
||||
cfg.ProxyManager.Normalize()
|
||||
|
||||
clients := make(map[string]Client)
|
||||
|
||||
for nm, upCfg := range cfg.Coordinators {
|
||||
cli, err := NewClientManager(nm, cfg.ProxyManager.Client, upCfg)
|
||||
if err != nil {
|
||||
panic("create new client fail")
|
||||
}
|
||||
clients[cli.Name()] = cli
|
||||
}
|
||||
|
||||
proverManager := NewProverManagerWithPersistent(100, db)
|
||||
priorityManager := NewPriorityUpstreamManagerPersistent(db)
|
||||
|
||||
Auth = NewAuthController(cfg, clients, proverManager)
|
||||
GetTask = NewGetTaskController(cfg, clients, proverManager, priorityManager, reg)
|
||||
SubmitProof = NewSubmitProofController(cfg, clients, proverManager, priorityManager, reg)
|
||||
}
|
||||
@@ -1,229 +0,0 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
func getSessionData(ctx *gin.Context) (string, string) {
|
||||
|
||||
publicKeyData, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
|
||||
publicKey, castOk := publicKeyData.(string)
|
||||
if !publicKeyExist || !castOk {
|
||||
nerr := fmt.Errorf("no public key binding: %v", publicKeyData)
|
||||
log.Warn("get_task parameter fail", "error", nerr)
|
||||
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
|
||||
return "", ""
|
||||
}
|
||||
|
||||
publicNameData, publicNameExist := ctx.Get(coordinatorType.ProverName)
|
||||
publicName, castOk := publicNameData.(string)
|
||||
if !publicNameExist || !castOk {
|
||||
log.Error("no public name binding for unknown reason, but we still forward with name = 'unknown'", "data", publicNameData)
|
||||
publicName = "unknown"
|
||||
}
|
||||
|
||||
return publicKey, publicName
|
||||
}
|
||||
|
||||
// PriorityUpstreamManager manages priority upstream mappings with thread safety
|
||||
type PriorityUpstreamManager struct {
|
||||
sync.RWMutex
|
||||
*proverPriorityPersist
|
||||
data map[string]string
|
||||
}
|
||||
|
||||
// NewPriorityUpstreamManager creates a new PriorityUpstreamManager
|
||||
func NewPriorityUpstreamManager() *PriorityUpstreamManager {
|
||||
return &PriorityUpstreamManager{
|
||||
data: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// NewPriorityUpstreamManager creates a new PriorityUpstreamManager
|
||||
func NewPriorityUpstreamManagerPersistent(db *gorm.DB) *PriorityUpstreamManager {
|
||||
return &PriorityUpstreamManager{
|
||||
data: make(map[string]string),
|
||||
proverPriorityPersist: NewProverPriorityPersist(db),
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves the priority upstream for a given key
|
||||
func (p *PriorityUpstreamManager) Get(key string) (string, bool) {
|
||||
|
||||
p.RLock()
|
||||
value, exists := p.data[key]
|
||||
p.RUnlock()
|
||||
|
||||
if !exists {
|
||||
if v, err := p.proverPriorityPersist.Get(key); err != nil {
|
||||
log.Error("persistent priority record read failure", "error", err, "key", key)
|
||||
} else if v != "" {
|
||||
log.Debug("restore record from persistent layer", "key", key, "value", v)
|
||||
return v, true
|
||||
}
|
||||
}
|
||||
|
||||
return value, exists
|
||||
}
|
||||
|
||||
// Set sets the priority upstream for a given key
|
||||
func (p *PriorityUpstreamManager) Set(key, value string) {
|
||||
defer func() {
|
||||
if err := p.proverPriorityPersist.Update(key, value); err != nil {
|
||||
log.Error("update priority record failure", "error", err, "key", key, "value", value)
|
||||
}
|
||||
}()
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.data[key] = value
|
||||
}
|
||||
|
||||
// Delete removes the priority upstream for a given key
|
||||
func (p *PriorityUpstreamManager) Delete(key string) {
|
||||
defer func() {
|
||||
if err := p.proverPriorityPersist.Del(key); err != nil {
|
||||
log.Error("delete priority record failure", "error", err, "key", key)
|
||||
}
|
||||
}()
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
delete(p.data, key)
|
||||
}
|
||||
|
||||
// GetTaskController the get prover task api controller
|
||||
type GetTaskController struct {
|
||||
proverMgr *ProverManager
|
||||
clients Clients
|
||||
priorityUpstream *PriorityUpstreamManager
|
||||
|
||||
//workingRnd *rand.Rand
|
||||
//getTaskAccessCounter *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewGetTaskController create a get prover task controller
|
||||
func NewGetTaskController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager, priorityMgr *PriorityUpstreamManager, reg prometheus.Registerer) *GetTaskController {
|
||||
// TODO: implement proxy get task controller initialization
|
||||
return &GetTaskController{
|
||||
priorityUpstream: priorityMgr,
|
||||
proverMgr: proverMgr,
|
||||
clients: clients,
|
||||
}
|
||||
}
|
||||
|
||||
// func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error {
|
||||
// // TODO: implement proxy get task access counter
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// GetTasks get assigned chunk/batch task
|
||||
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
|
||||
var getTaskParameter coordinatorType.GetTaskParameter
|
||||
if err := ctx.ShouldBind(&getTaskParameter); err != nil {
|
||||
nerr := fmt.Errorf("prover task parameter invalid, err:%w", err)
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
|
||||
return
|
||||
}
|
||||
|
||||
publicKey, proverName := getSessionData(ctx)
|
||||
if publicKey == "" {
|
||||
return
|
||||
}
|
||||
|
||||
session := ptc.proverMgr.Get(publicKey)
|
||||
if session == nil {
|
||||
nerr := fmt.Errorf("can not get session for prover %s", proverName)
|
||||
types.RenderFailure(ctx, types.InternalServerError, nerr)
|
||||
return
|
||||
}
|
||||
|
||||
getTask := func(cli Client) (error, int) {
|
||||
log.Debug("Start get task", "up", cli.Name(), "cli", proverName)
|
||||
upStream := cli.Name()
|
||||
resp, err := session.GetTask(ctx, &getTaskParameter, cli)
|
||||
if err != nil {
|
||||
log.Error("Upstream error for get task", "error", err, "up", upStream, "cli", proverName)
|
||||
return err, types.ErrCoordinatorGetTaskFailure
|
||||
} else if resp.ErrCode != types.ErrCoordinatorEmptyProofData {
|
||||
|
||||
if resp.ErrCode != 0 {
|
||||
// simply dispatch the error from upstream to prover
|
||||
log.Error("Upstream has error resp for get task", "code", resp.ErrCode, "msg", resp.ErrMsg, "up", upStream, "cli", proverName)
|
||||
return fmt.Errorf("upstream failure %s:", resp.ErrMsg), resp.ErrCode
|
||||
}
|
||||
|
||||
var task coordinatorType.GetTaskSchema
|
||||
if err = resp.DecodeData(&task); err == nil {
|
||||
task.TaskID = formUpstreamWithTaskName(upStream, task.TaskID)
|
||||
ptc.priorityUpstream.Set(publicKey, upStream)
|
||||
log.Debug("Upstream get task", "up", upStream, "cli", proverName, "taskID", task.TaskID, "taskType", task.TaskType)
|
||||
types.RenderSuccess(ctx, &task)
|
||||
return nil, 0
|
||||
} else {
|
||||
log.Error("Upstream has wrong data for get task", "error", err, "up", upStream, "cli", proverName)
|
||||
return fmt.Errorf("decode task fail: %v", err), types.InternalServerError
|
||||
}
|
||||
}
|
||||
|
||||
return nil, resp.ErrCode
|
||||
}
|
||||
|
||||
// if the priority upstream is set, we try this upstream first until get the task resp or no task resp
|
||||
priorityUpstream, exist := ptc.priorityUpstream.Get(publicKey)
|
||||
if exist {
|
||||
cli := ptc.clients[priorityUpstream]
|
||||
log.Debug("Try get task from priority stream", "up", priorityUpstream, "cli", proverName)
|
||||
if cli != nil {
|
||||
err, code := getTask(cli)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, code, err)
|
||||
return
|
||||
} else if code == 0 {
|
||||
// get task done and rendered, return
|
||||
return
|
||||
}
|
||||
// only continue if get empty task (the task has been removed in upstream)
|
||||
log.Debug("can not get priority task from upstream", "up", priorityUpstream, "cli", proverName)
|
||||
|
||||
} else {
|
||||
log.Warn("A upstream is removed or lost for some reason while running", "up", priorityUpstream, "cli", proverName)
|
||||
}
|
||||
}
|
||||
ptc.priorityUpstream.Delete(publicKey)
|
||||
|
||||
// Create a slice to hold the keys
|
||||
keys := make([]string, 0, len(ptc.clients))
|
||||
for k := range ptc.clients {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
// Shuffle the keys using a local RNG (avoid deprecated rand.Seed)
|
||||
rand.Shuffle(len(keys), func(i, j int) {
|
||||
keys[i], keys[j] = keys[j], keys[i]
|
||||
})
|
||||
|
||||
// Iterate over the shuffled keys
|
||||
for _, n := range keys {
|
||||
if err, code := getTask(ptc.clients[n]); err == nil && code == 0 {
|
||||
// get task done
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("get no task from upstream", "cli", proverName)
|
||||
// if all get task failed, throw empty proof resp
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorEmptyProofData, fmt.Errorf("get empty prover task"))
|
||||
}
|
||||
@@ -1,125 +0,0 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
type proverDataPersist struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
// NewProverDataPersist creates a persistence instance backed by a gorm DB.
|
||||
func NewProverDataPersist(db *gorm.DB) *proverDataPersist {
|
||||
return &proverDataPersist{db: db}
|
||||
}
|
||||
|
||||
// gorm model mapping to table `prover_sessions`
|
||||
type proverSessionRecord struct {
|
||||
PublicKey string `gorm:"column:public_key;not null"`
|
||||
Upstream string `gorm:"column:upstream;not null"`
|
||||
UpToken string `gorm:"column:up_token;not null"`
|
||||
Expired time.Time `gorm:"column:expired;not null"`
|
||||
}
|
||||
|
||||
func (proverSessionRecord) TableName() string { return "prover_sessions" }
|
||||
|
||||
// priority_upstream model
|
||||
type priorityUpstreamRecord struct {
|
||||
PublicKey string `gorm:"column:public_key;not null"`
|
||||
Upstream string `gorm:"column:upstream;not null"`
|
||||
}
|
||||
|
||||
func (priorityUpstreamRecord) TableName() string { return "priority_upstream" }
|
||||
|
||||
// get retrieves ProverSession for a given user key, returns empty if still not exists
|
||||
func (p *proverDataPersist) Get(userKey string) (*proverSession, error) {
|
||||
if p == nil || p.db == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var rows []proverSessionRecord
|
||||
if err := p.db.Where("public_key = ?", userKey).Find(&rows).Error; err != nil || len(rows) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := &proverSession{
|
||||
proverToken: make(map[string]loginToken),
|
||||
}
|
||||
for _, r := range rows {
|
||||
ls := &types.LoginSchema{
|
||||
Token: r.UpToken,
|
||||
Time: r.Expired,
|
||||
}
|
||||
ret.proverToken[r.Upstream] = loginToken{LoginSchema: ls}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (p *proverDataPersist) Update(userKey, up string, login *types.LoginSchema) error {
|
||||
if p == nil || p.db == nil || login == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
rec := proverSessionRecord{
|
||||
PublicKey: userKey,
|
||||
Upstream: up,
|
||||
UpToken: login.Token,
|
||||
Expired: login.Time,
|
||||
}
|
||||
|
||||
return p.db.Clauses(
|
||||
clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "public_key"}, {Name: "upstream"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"up_token", "expired"}),
|
||||
},
|
||||
).Create(&rec).Error
|
||||
}
|
||||
|
||||
type proverPriorityPersist struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
func NewProverPriorityPersist(db *gorm.DB) *proverPriorityPersist {
|
||||
return &proverPriorityPersist{db: db}
|
||||
}
|
||||
|
||||
func (p *proverPriorityPersist) Get(userKey string) (string, error) {
|
||||
if p == nil || p.db == nil {
|
||||
return "", nil
|
||||
}
|
||||
var rec priorityUpstreamRecord
|
||||
if err := p.db.Where("public_key = ?", userKey).First(&rec).Error; err != nil {
|
||||
if err != gorm.ErrRecordNotFound {
|
||||
return "", err
|
||||
} else {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
}
|
||||
return rec.Upstream, nil
|
||||
}
|
||||
|
||||
func (p *proverPriorityPersist) Update(userKey, up string) error {
|
||||
if p == nil || p.db == nil {
|
||||
return nil
|
||||
}
|
||||
rec := priorityUpstreamRecord{PublicKey: userKey, Upstream: up}
|
||||
return p.db.Clauses(
|
||||
clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "public_key"}},
|
||||
DoUpdates: clause.Assignments(map[string]interface{}{"upstream": up}),
|
||||
},
|
||||
).Create(&rec).Error
|
||||
}
|
||||
|
||||
func (p *proverPriorityPersist) Del(userKey string) error {
|
||||
if p == nil || p.db == nil {
|
||||
return nil
|
||||
}
|
||||
return p.db.Where("public_key = ?", userKey).Delete(&priorityUpstreamRecord{}).Error
|
||||
}
|
||||
@@ -1,285 +0,0 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
ctypes "scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
type ProverManager struct {
|
||||
sync.RWMutex
|
||||
data map[string]*proverSession
|
||||
willDeprecatedData map[string]*proverSession
|
||||
sizeLimit int
|
||||
persistent *proverDataPersist
|
||||
}
|
||||
|
||||
func NewProverManager(size int) *ProverManager {
|
||||
return &ProverManager{
|
||||
data: make(map[string]*proverSession),
|
||||
willDeprecatedData: make(map[string]*proverSession),
|
||||
sizeLimit: size,
|
||||
}
|
||||
}
|
||||
|
||||
func NewProverManagerWithPersistent(size int, db *gorm.DB) *ProverManager {
|
||||
return &ProverManager{
|
||||
data: make(map[string]*proverSession),
|
||||
willDeprecatedData: make(map[string]*proverSession),
|
||||
sizeLimit: size,
|
||||
persistent: NewProverDataPersist(db),
|
||||
}
|
||||
}
|
||||
|
||||
// get retrieves ProverSession for a given user key, returns empty if still not exists
|
||||
func (m *ProverManager) Get(userKey string) (ret *proverSession) {
|
||||
defer func() {
|
||||
if ret == nil {
|
||||
var err error
|
||||
ret, err = m.persistent.Get(userKey)
|
||||
if err != nil {
|
||||
log.Error("Get persistent layer for prover tokens fail", "error", err)
|
||||
} else if ret != nil {
|
||||
log.Debug("restore record from persistent", "key", userKey, "token", ret.proverToken)
|
||||
ret.persistent = m.persistent
|
||||
}
|
||||
}
|
||||
|
||||
if ret != nil {
|
||||
m.Lock()
|
||||
m.data[userKey] = ret
|
||||
m.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
if r, existed := m.data[userKey]; existed {
|
||||
return r
|
||||
} else {
|
||||
return m.willDeprecatedData[userKey]
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ProverManager) GetOrCreate(userKey string) *proverSession {
|
||||
|
||||
if ret := m.Get(userKey); ret != nil {
|
||||
return ret
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
ret := &proverSession{
|
||||
proverToken: make(map[string]loginToken),
|
||||
persistent: m.persistent,
|
||||
}
|
||||
|
||||
if len(m.data) >= m.sizeLimit {
|
||||
m.willDeprecatedData = m.data
|
||||
m.data = make(map[string]*proverSession)
|
||||
}
|
||||
|
||||
m.data[userKey] = ret
|
||||
return ret
|
||||
}
|
||||
|
||||
type loginToken struct {
|
||||
*types.LoginSchema
|
||||
phase uint
|
||||
}
|
||||
|
||||
// Client wraps an http client with a preset host for coordinator API calls
|
||||
type proverSession struct {
|
||||
persistent *proverDataPersist
|
||||
|
||||
sync.RWMutex
|
||||
proverToken map[string]loginToken
|
||||
completionCtx context.Context
|
||||
}
|
||||
|
||||
func (c *proverSession) maintainLogin(ctx context.Context, cliMgr Client, up string, param *types.LoginParameter, phase uint) (result loginToken, nerr error) {
|
||||
c.Lock()
|
||||
curPhase := c.proverToken[up].phase
|
||||
if c.completionCtx != nil {
|
||||
waitctx := c.completionCtx
|
||||
c.Unlock()
|
||||
select {
|
||||
case <-waitctx.Done():
|
||||
return c.maintainLogin(ctx, cliMgr, up, param, phase)
|
||||
case <-ctx.Done():
|
||||
nerr = fmt.Errorf("ctx fail")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if phase < curPhase {
|
||||
// outdate login phase, give up
|
||||
log.Debug("drop outdated proxy login attempt", "upstream", up, "cli", param.Message.ProverName, "phase", phase, "now", curPhase)
|
||||
defer c.Unlock()
|
||||
return c.proverToken[up], nil
|
||||
}
|
||||
|
||||
// occupy the update slot
|
||||
completeCtx, cf := context.WithCancel(ctx)
|
||||
defer cf()
|
||||
c.completionCtx = completeCtx
|
||||
defer func() {
|
||||
c.Lock()
|
||||
c.completionCtx = nil
|
||||
if result.LoginSchema != nil {
|
||||
c.proverToken[up] = result
|
||||
log.Info("maintain login status", "upstream", up, "cli", param.Message.ProverName, "phase", curPhase+1)
|
||||
}
|
||||
c.Unlock()
|
||||
if nerr != nil {
|
||||
log.Error("maintain login fail", "error", nerr, "upstream", up, "cli", param.Message.ProverName, "phase", curPhase)
|
||||
}
|
||||
}()
|
||||
c.Unlock()
|
||||
|
||||
log.Debug("start proxy login process", "upstream", up, "cli", param.Message.ProverName)
|
||||
|
||||
cli := cliMgr.ClientAsProxy(ctx)
|
||||
if cli == nil {
|
||||
nerr = fmt.Errorf("get upstream cli fail")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := cli.ProxyLogin(ctx, param)
|
||||
if err != nil {
|
||||
nerr = fmt.Errorf("proxylogin fail: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if resp.ErrCode == ctypes.ErrJWTTokenExpired {
|
||||
log.Info("up stream has expired, renew upstream connection", "up", up)
|
||||
cli.Reset()
|
||||
cli = cliMgr.ClientAsProxy(ctx)
|
||||
if cli == nil {
|
||||
nerr = fmt.Errorf("get upstream cli fail (secondary try)")
|
||||
return
|
||||
}
|
||||
|
||||
// like SDK, we would try one more time if the upstream token is expired
|
||||
resp, err = cli.ProxyLogin(ctx, param)
|
||||
if err != nil {
|
||||
nerr = fmt.Errorf("proxylogin fail: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if resp.ErrCode != 0 {
|
||||
nerr = fmt.Errorf("upstream fail: %d (%s)", resp.ErrCode, resp.ErrMsg)
|
||||
return
|
||||
}
|
||||
|
||||
var loginResult loginSchema
|
||||
if err := resp.DecodeData(&loginResult); err != nil {
|
||||
nerr = err
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug("Proxy login done", "upstream", up, "cli", param.Message.ProverName)
|
||||
result = loginToken{
|
||||
LoginSchema: &types.LoginSchema{
|
||||
Token: loginResult.Token,
|
||||
},
|
||||
phase: curPhase + 1,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// const expireTolerant = 10 * time.Minute
|
||||
|
||||
// ProxyLogin makes a POST request to /v1/proxy_login with LoginParameter
|
||||
func (c *proverSession) ProxyLogin(ctx context.Context, cli Client, param *types.LoginParameter) error {
|
||||
up := cli.Name()
|
||||
c.RLock()
|
||||
existedToken := c.proverToken[up]
|
||||
c.RUnlock()
|
||||
|
||||
newtoken, err := c.maintainLogin(ctx, cli, up, param, math.MaxUint)
|
||||
if newtoken.phase > existedToken.phase {
|
||||
if err := c.persistent.Update(param.PublicKey, up, newtoken.LoginSchema); err != nil {
|
||||
log.Error("Update persistent layer for prover tokens fail", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// GetTask makes a POST request to /v1/get_task with GetTaskParameter
|
||||
func (c *proverSession) GetTask(ctx context.Context, param *types.GetTaskParameter, cliMgr Client) (*ctypes.Response, error) {
|
||||
up := cliMgr.Name()
|
||||
c.RLock()
|
||||
log.Debug("call get task", "up", up, "tokens", c.proverToken)
|
||||
token := c.proverToken[up]
|
||||
c.RUnlock()
|
||||
|
||||
if token.LoginSchema != nil {
|
||||
resp, err := cliMgr.Client(token.Token).GetTask(ctx, param)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.ErrCode != ctypes.ErrJWTTokenExpired {
|
||||
return resp, nil
|
||||
}
|
||||
}
|
||||
|
||||
// like SDK, we would try one more time if the upstream token is expired
|
||||
// get param from ctx
|
||||
loginParam, ok := ctx.Value(LoginParamCache).(*types.LoginParameter)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unexpected error, no loginparam ctx value")
|
||||
}
|
||||
|
||||
newToken, err := c.maintainLogin(ctx, cliMgr, up, loginParam, token.phase)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("update prover token fail: %v", err)
|
||||
}
|
||||
|
||||
return cliMgr.Client(newToken.Token).GetTask(ctx, param)
|
||||
|
||||
}
|
||||
|
||||
// SubmitProof makes a POST request to /v1/submit_proof with SubmitProofParameter
|
||||
func (c *proverSession) SubmitProof(ctx context.Context, param *types.SubmitProofParameter, cliMgr Client) (*ctypes.Response, error) {
|
||||
up := cliMgr.Name()
|
||||
c.RLock()
|
||||
token := c.proverToken[up]
|
||||
c.RUnlock()
|
||||
|
||||
if token.LoginSchema != nil {
|
||||
resp, err := cliMgr.Client(token.Token).SubmitProof(ctx, param)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.ErrCode != ctypes.ErrJWTTokenExpired {
|
||||
return resp, nil
|
||||
}
|
||||
}
|
||||
|
||||
// like SDK, we would try one more time if the upstream token is expired
|
||||
// get param from ctx
|
||||
loginParam, ok := ctx.Value(LoginParamCache).(*types.LoginParameter)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unexpected error, no loginparam ctx value")
|
||||
}
|
||||
|
||||
newToken, err := c.maintainLogin(ctx, cliMgr, up, loginParam, token.phase)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("update prover token fail: %v", err)
|
||||
}
|
||||
|
||||
return cliMgr.Client(newToken.Token).SubmitProof(ctx, param)
|
||||
}
|
||||
@@ -1,107 +0,0 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestProverManagerGetAndCreate validates basic creation and retrieval semantics.
|
||||
func TestProverManagerGetAndCreate(t *testing.T) {
|
||||
pm := NewProverManager(2)
|
||||
|
||||
if got := pm.Get("user1"); got != nil {
|
||||
t.Fatalf("expected nil for non-existent key, got: %+v", got)
|
||||
}
|
||||
|
||||
sess1 := pm.GetOrCreate("user1")
|
||||
if sess1 == nil {
|
||||
t.Fatalf("expected non-nil session from GetOrCreate")
|
||||
}
|
||||
|
||||
// Should be stable on subsequent Get
|
||||
if got := pm.Get("user1"); got != sess1 {
|
||||
t.Fatalf("expected same session pointer on Get, got different instance: %p vs %p", got, sess1)
|
||||
}
|
||||
}
|
||||
|
||||
// TestProverManagerRolloverAndPromotion verifies rollover when sizeLimit is reached
|
||||
// and that old entries are accessible and promoted back to active data map.
|
||||
func TestProverManagerRolloverAndPromotion(t *testing.T) {
|
||||
pm := NewProverManager(2)
|
||||
|
||||
s1 := pm.GetOrCreate("u1")
|
||||
s2 := pm.GetOrCreate("u2")
|
||||
if s1 == nil || s2 == nil {
|
||||
t.Fatalf("expected sessions to be created for u1/u2")
|
||||
}
|
||||
|
||||
// Precondition: data should contain 2 entries, no deprecated yet.
|
||||
pm.RLock()
|
||||
if len(pm.data) != 2 {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected data len=2 before rollover, got %d", len(pm.data))
|
||||
}
|
||||
if len(pm.willDeprecatedData) != 0 {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected willDeprecatedData len=0 before rollover, got %d", len(pm.willDeprecatedData))
|
||||
}
|
||||
pm.RUnlock()
|
||||
|
||||
// Trigger rollover by creating a third key.
|
||||
s3 := pm.GetOrCreate("u3")
|
||||
if s3 == nil {
|
||||
t.Fatalf("expected session for u3 after rollover")
|
||||
}
|
||||
|
||||
// After rollover: current data should only have u3, deprecated should hold u1 and u2.
|
||||
pm.RLock()
|
||||
if len(pm.data) != 1 {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected data len=1 after rollover (only u3), got %d", len(pm.data))
|
||||
}
|
||||
if _, ok := pm.data["u3"]; !ok {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected 'u3' to be in active data after rollover")
|
||||
}
|
||||
if len(pm.willDeprecatedData) != 2 {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected willDeprecatedData len=2 after rollover, got %d", len(pm.willDeprecatedData))
|
||||
}
|
||||
pm.RUnlock()
|
||||
|
||||
// Accessing an old key should return the same pointer and promote it to active data map.
|
||||
got1 := pm.Get("u1")
|
||||
if got1 != s1 {
|
||||
t.Fatalf("expected same pointer for u1 after promotion, got %p want %p", got1, s1)
|
||||
}
|
||||
|
||||
// The promotion should add it to active data (without enforcing size limit on promotion).
|
||||
pm.RLock()
|
||||
if _, ok := pm.data["u1"]; !ok {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected 'u1' to be present in active data after promotion")
|
||||
}
|
||||
if len(pm.data) != 2 {
|
||||
// Now should contain u3 and u1
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected data len=2 after promotion of u1, got %d", len(pm.data))
|
||||
}
|
||||
pm.RUnlock()
|
||||
|
||||
// Access the other deprecated key and ensure behavior is consistent.
|
||||
got2 := pm.Get("u2")
|
||||
if got2 != s2 {
|
||||
t.Fatalf("expected same pointer for u2 after promotion, got %p want %p", got2, s2)
|
||||
}
|
||||
|
||||
pm.RLock()
|
||||
if _, ok := pm.data["u2"]; !ok {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected 'u2' to be present in active data after promotion")
|
||||
}
|
||||
// Note: promotion does not enforce sizeLimit, so data can grow beyond sizeLimit after promotions.
|
||||
if len(pm.data) != 3 {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected data len=3 after promoting both u1 and u2, got %d", len(pm.data))
|
||||
}
|
||||
pm.RUnlock()
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// SubmitProofController the submit proof api controller
|
||||
type SubmitProofController struct {
|
||||
proverMgr *ProverManager
|
||||
clients Clients
|
||||
priorityUpstream *PriorityUpstreamManager
|
||||
}
|
||||
|
||||
// NewSubmitProofController create the submit proof api controller instance
|
||||
func NewSubmitProofController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager, priorityMgr *PriorityUpstreamManager, reg prometheus.Registerer) *SubmitProofController {
|
||||
return &SubmitProofController{
|
||||
proverMgr: proverMgr,
|
||||
clients: clients,
|
||||
priorityUpstream: priorityMgr,
|
||||
}
|
||||
}
|
||||
|
||||
func upstreamFromTaskName(taskID string) (string, string) {
|
||||
parts, rest, found := strings.Cut(taskID, ":")
|
||||
if found {
|
||||
return parts, rest
|
||||
}
|
||||
return "", parts
|
||||
}
|
||||
|
||||
func formUpstreamWithTaskName(upstream string, taskID string) string {
|
||||
return fmt.Sprintf("%s:%s", upstream, taskID)
|
||||
}
|
||||
|
||||
// SubmitProof prover submit the proof to coordinator
|
||||
func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
|
||||
|
||||
var submitParameter coordinatorType.SubmitProofParameter
|
||||
if err := ctx.ShouldBind(&submitParameter); err != nil {
|
||||
nerr := fmt.Errorf("prover submitProof parameter invalid, err:%w", err)
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
|
||||
return
|
||||
}
|
||||
|
||||
publicKey, proverName := getSessionData(ctx)
|
||||
if publicKey == "" {
|
||||
return
|
||||
}
|
||||
|
||||
session := spc.proverMgr.Get(publicKey)
|
||||
if session == nil {
|
||||
nerr := fmt.Errorf("can not get session for prover %s", proverName)
|
||||
types.RenderFailure(ctx, types.InternalServerError, nerr)
|
||||
return
|
||||
}
|
||||
|
||||
upstream, realTaskID := upstreamFromTaskName(submitParameter.TaskID)
|
||||
cli, existed := spc.clients[upstream]
|
||||
if !existed {
|
||||
log.Warn("A upstream for submitting is removed or lost for some reason while running", "up", upstream)
|
||||
nerr := fmt.Errorf("Invalid upstream name (%s) from taskID %s", upstream, submitParameter.TaskID)
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
|
||||
return
|
||||
}
|
||||
log.Debug("Start submitting", "up", upstream, "cli", proverName, "id", realTaskID, "status", submitParameter.Status)
|
||||
submitParameter.TaskID = realTaskID
|
||||
|
||||
resp, err := session.SubmitProof(ctx, &submitParameter, cli)
|
||||
if err != nil {
|
||||
log.Error("Upstream has error resp for submit", "error", err, "up", upstream, "cli", proverName, "taskID", realTaskID)
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorGetTaskFailure, err)
|
||||
return
|
||||
} else if resp.ErrCode != 0 {
|
||||
log.Error("Upstream has error resp for get task", "code", resp.ErrCode, "msg", resp.ErrMsg, "up", upstream, "cli", proverName, "taskID", realTaskID)
|
||||
// simply dispatch the error from upstream to prover
|
||||
types.RenderFailure(ctx, resp.ErrCode, fmt.Errorf("%s", resp.ErrMsg))
|
||||
return
|
||||
} else {
|
||||
log.Debug("Submit proof to upstream", "up", upstream, "cli", proverName, "taskID", realTaskID)
|
||||
spc.priorityUpstream.Delete(publicKey)
|
||||
types.RenderSuccess(ctx, resp.Data)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
@@ -20,72 +19,45 @@ import (
|
||||
|
||||
// LoginLogic the auth logic
|
||||
type LoginLogic struct {
|
||||
cfg *config.VerifierConfig
|
||||
deduplicator ChallengeDeduplicator
|
||||
cfg *config.Config
|
||||
challengeOrm *orm.Challenge
|
||||
|
||||
openVmVks map[string]struct{}
|
||||
|
||||
proverVersionHardForkMap map[string]string
|
||||
}
|
||||
|
||||
type ChallengeDeduplicator interface {
|
||||
InsertChallenge(ctx context.Context, challengeString string) error
|
||||
}
|
||||
|
||||
type SimpleDeduplicator struct {
|
||||
}
|
||||
|
||||
func (s *SimpleDeduplicator) InsertChallenge(ctx context.Context, challengeString string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewLoginLogicWithSimpleDEduplicator new a LoginLogic, do not use db to deduplicate challenge
|
||||
func NewLoginLogicWithSimpleDeduplicator(vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
|
||||
return newLoginLogic(&SimpleDeduplicator{}, vcfg, vf)
|
||||
}
|
||||
|
||||
// NewLoginLogic new a LoginLogic
|
||||
func NewLoginLogic(db *gorm.DB, vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
|
||||
return newLoginLogic(orm.NewChallenge(db), vcfg, vf)
|
||||
}
|
||||
|
||||
func newLoginLogic(deduplicator ChallengeDeduplicator, vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
|
||||
|
||||
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
|
||||
proverVersionHardForkMap := make(map[string]string)
|
||||
|
||||
for _, cfg := range vcfg.Verifiers {
|
||||
for _, cfg := range cfg.ProverManager.Verifier.Verifiers {
|
||||
proverVersionHardForkMap[cfg.ForkName] = cfg.MinProverVersion
|
||||
}
|
||||
|
||||
return &LoginLogic{
|
||||
cfg: vcfg,
|
||||
cfg: cfg,
|
||||
openVmVks: vf.OpenVMVkMap,
|
||||
deduplicator: deduplicator,
|
||||
challengeOrm: orm.NewChallenge(db),
|
||||
proverVersionHardForkMap: proverVersionHardForkMap,
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the completeness of login message
|
||||
func VerifyMsg(login *types.LoginParameter) error {
|
||||
// InsertChallengeString insert and check the challenge string is existed
|
||||
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
|
||||
return l.challengeOrm.InsertChallenge(ctx.Copy(), challenge)
|
||||
}
|
||||
|
||||
func (l *LoginLogic) Check(login *types.LoginParameter) error {
|
||||
verify, err := login.Verify()
|
||||
if err != nil || !verify {
|
||||
log.Error("auth message verify failure", "prover_name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
return errors.New("auth message verify failure")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertChallengeString insert and check the challenge string is existed
|
||||
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
|
||||
return l.deduplicator.InsertChallenge(ctx.Copy(), challenge)
|
||||
}
|
||||
|
||||
// Check if the login client is compatible with the setting in coordinator
|
||||
func (l *LoginLogic) CompatiblityCheck(login *types.LoginParameter) error {
|
||||
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.MinProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.MinProverVersion, login.Message.ProverVersion)
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.MinProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.MinProverVersion, login.Message.ProverVersion)
|
||||
}
|
||||
|
||||
vks := make(map[string]struct{})
|
||||
@@ -93,32 +65,27 @@ func (l *LoginLogic) CompatiblityCheck(login *types.LoginParameter) error {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
|
||||
// new coordinator / proxy do not check vks while login, code only for backward compatibility
|
||||
if len(vks) != 0 {
|
||||
for _, vk := range login.Message.VKs {
|
||||
if _, ok := vks[vk]; !ok {
|
||||
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
|
||||
version.Version, login.Message.ProverVersion)
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return errors.New("incompatible vk. please check your params files or config files")
|
||||
for _, vk := range login.Message.VKs {
|
||||
if _, ok := vks[vk]; !ok {
|
||||
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
|
||||
version.Version, login.Message.ProverVersion)
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return errors.New("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
}
|
||||
|
||||
switch login.Message.ProverProviderType {
|
||||
case types.ProverProviderTypeInternal:
|
||||
case types.ProverProviderTypeExternal:
|
||||
case types.ProverProviderTypeProxy:
|
||||
case types.ProverProviderTypeUndefined:
|
||||
if login.Message.ProverProviderType != types.ProverProviderTypeInternal && login.Message.ProverProviderType != types.ProverProviderTypeExternal {
|
||||
// for backward compatibility, set ProverProviderType as internal
|
||||
login.Message.ProverProviderType = types.ProverProviderTypeInternal
|
||||
default:
|
||||
log.Error("invalid prover_provider_type", "value", login.Message.ProverProviderType, "prover name", login.Message.ProverName, "prover version", login.Message.ProverVersion)
|
||||
return errors.New("invalid prover provider type.")
|
||||
if login.Message.ProverProviderType == types.ProverProviderTypeUndefined {
|
||||
login.Message.ProverProviderType = types.ProverProviderTypeInternal
|
||||
} else {
|
||||
log.Error("invalid prover_provider_type", "value", login.Message.ProverProviderType, "prover name", login.Message.ProverName, "prover version", login.Message.ProverVersion)
|
||||
return errors.New("invalid prover provider type.")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -1,16 +1,20 @@
|
||||
.PHONY: help fmt clippy test test-ci test-all
|
||||
include ../../../../build/common.mk
|
||||
|
||||
.PHONY: help fmt clippy test test-ci test-all clean build
|
||||
|
||||
all: build
|
||||
|
||||
build:
|
||||
@cargo build --release -p libzkp-c
|
||||
@mkdir -p lib
|
||||
@cp -f ../../../../target/release/libzkp.so lib/
|
||||
@cp -f ../../../../target/release/$(LIB_ZKP_NAME) lib/
|
||||
|
||||
fmt:
|
||||
@cargo fmt --all -- --check
|
||||
|
||||
clean:
|
||||
@cargo clean --release -p libzkp -p libzkp-c -p l2geth
|
||||
@rm -f lib/libzkp.so
|
||||
@rm -f lib/$(LIB_ZKP_NAME)
|
||||
|
||||
clippy:
|
||||
@cargo check --release --all-features
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
//go:build !mock_verifier
|
||||
|
||||
package libzkp
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo linux LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo darwin LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib -Wl,-rpath,${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#include <stdlib.h>
|
||||
#include "libzkp.h"
|
||||
@@ -15,6 +14,8 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -72,6 +73,31 @@ func VerifyBundleProof(proofData, forkName string) bool {
|
||||
return result != 0
|
||||
}
|
||||
|
||||
// TaskType enum values matching the Rust enum
|
||||
const (
|
||||
TaskTypeChunk = 0
|
||||
TaskTypeBatch = 1
|
||||
TaskTypeBundle = 2
|
||||
)
|
||||
|
||||
func fromMessageTaskType(taskType int) int {
|
||||
switch message.ProofType(taskType) {
|
||||
case message.ProofTypeChunk:
|
||||
return TaskTypeChunk
|
||||
case message.ProofTypeBatch:
|
||||
return TaskTypeBatch
|
||||
case message.ProofTypeBundle:
|
||||
return TaskTypeBundle
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported proof type: %d", taskType))
|
||||
}
|
||||
}
|
||||
|
||||
// Generate a universal task
|
||||
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
|
||||
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
|
||||
}
|
||||
|
||||
// Generate wrapped proof
|
||||
func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
|
||||
cProofJSON := goToCString(proofJSON)
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
//go:build mock_verifier
|
||||
|
||||
package libzkp
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// // InitVerifier is a no-op in the mock.
|
||||
// func InitVerifier(configJSON string) {}
|
||||
|
||||
// // VerifyChunkProof returns a fixed success in the mock.
|
||||
// func VerifyChunkProof(proofData, forkName string) bool {
|
||||
// return true
|
||||
// }
|
||||
|
||||
// // VerifyBatchProof returns a fixed success in the mock.
|
||||
// func VerifyBatchProof(proofData, forkName string) bool {
|
||||
// return true
|
||||
// }
|
||||
|
||||
// // VerifyBundleProof returns a fixed success in the mock.
|
||||
// func VerifyBundleProof(proofData, forkName string) bool {
|
||||
// return true
|
||||
// }
|
||||
|
||||
func UniversalTaskCompatibilityFix(taskJSON string) (string, error) {
|
||||
panic("should not run here")
|
||||
}
|
||||
|
||||
// GenerateWrappedProof returns a fixed dummy proof string in the mock.
|
||||
func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
|
||||
|
||||
payload := struct {
|
||||
Metadata json.RawMessage `json:"metadata"`
|
||||
Proof json.RawMessage `json:"proof"`
|
||||
GitVersion string `json:"git_version"`
|
||||
}{
|
||||
Metadata: json.RawMessage(metadata),
|
||||
Proof: json.RawMessage(proofJSON),
|
||||
GitVersion: "mock-git-version",
|
||||
}
|
||||
|
||||
out, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// DumpVk is a no-op and returns nil in the mock.
|
||||
func DumpVk(forkName, filePath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDynamicFeature is a no-op in the mock.
|
||||
func SetDynamicFeature(feats string) {}
|
||||
@@ -1,27 +0,0 @@
|
||||
package libzkp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// TaskType enum values matching the Rust enum
|
||||
const (
|
||||
TaskTypeChunk = 0
|
||||
TaskTypeBatch = 1
|
||||
TaskTypeBundle = 2
|
||||
)
|
||||
|
||||
func fromMessageTaskType(taskType int) int {
|
||||
switch message.ProofType(taskType) {
|
||||
case message.ProofTypeChunk:
|
||||
return TaskTypeChunk
|
||||
case message.ProofTypeBatch:
|
||||
return TaskTypeBatch
|
||||
case message.ProofTypeBundle:
|
||||
return TaskTypeBundle
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported proof type: %d", taskType))
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,6 @@ package libzkp
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
@@ -15,10 +14,6 @@ import (
|
||||
func InitL2geth(configJSON string) {
|
||||
}
|
||||
|
||||
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
|
||||
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
|
||||
}
|
||||
|
||||
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
|
||||
|
||||
fmt.Printf("call mocked generate universal task %d, taskJson %s\n", taskType, taskJSON)
|
||||
|
||||
@@ -7,10 +7,7 @@ package libzkp
|
||||
#include "libzkp.h"
|
||||
*/
|
||||
import "C" //nolint:typecheck
|
||||
import (
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
import "unsafe"
|
||||
|
||||
// Initialize the handler for universal task
|
||||
func InitL2geth(configJSON string) {
|
||||
@@ -20,11 +17,6 @@ func InitL2geth(configJSON string) {
|
||||
C.init_l2geth(cConfig)
|
||||
}
|
||||
|
||||
// Generate a universal task
|
||||
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
|
||||
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
|
||||
}
|
||||
|
||||
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
|
||||
cTask := goToCString(taskJSON)
|
||||
cForkName := goToCString(forkName)
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
// ChallengeMiddleware jwt challenge middleware
|
||||
func ChallengeMiddleware(auth *config.Auth) *jwt.GinJWTMiddleware {
|
||||
func ChallengeMiddleware(conf *config.Config) *jwt.GinJWTMiddleware {
|
||||
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
|
||||
Authenticator: func(c *gin.Context) (interface{}, error) {
|
||||
return nil, nil
|
||||
@@ -30,8 +30,8 @@ func ChallengeMiddleware(auth *config.Auth) *jwt.GinJWTMiddleware {
|
||||
}
|
||||
},
|
||||
Unauthorized: unauthorized,
|
||||
Key: []byte(auth.Secret),
|
||||
Timeout: time.Second * time.Duration(auth.ChallengeExpireDurationSec),
|
||||
Key: []byte(conf.Auth.Secret),
|
||||
Timeout: time.Second * time.Duration(conf.Auth.ChallengeExpireDurationSec),
|
||||
TokenLookup: "header: Authorization, query: token, cookie: jwt",
|
||||
TokenHeadName: "Bearer",
|
||||
TimeFunc: time.Now,
|
||||
|
||||
@@ -4,57 +4,22 @@ import (
|
||||
"time"
|
||||
|
||||
jwt "github.com/appleboy/gin-jwt/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
"scroll-tech/coordinator/internal/controller/proxy"
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
func nonIdendityAuthorizator(data interface{}, _ *gin.Context) bool {
|
||||
return data != nil
|
||||
}
|
||||
|
||||
// LoginMiddleware jwt auth middleware
|
||||
func LoginMiddleware(auth *config.Auth) *jwt.GinJWTMiddleware {
|
||||
func LoginMiddleware(conf *config.Config) *jwt.GinJWTMiddleware {
|
||||
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
|
||||
PayloadFunc: api.Auth.PayloadFunc,
|
||||
IdentityHandler: api.Auth.IdentityHandler,
|
||||
IdentityKey: types.PublicKey,
|
||||
Key: []byte(auth.Secret),
|
||||
Timeout: time.Second * time.Duration(auth.LoginExpireDurationSec),
|
||||
Key: []byte(conf.Auth.Secret),
|
||||
Timeout: time.Second * time.Duration(conf.Auth.LoginExpireDurationSec),
|
||||
Authenticator: api.Auth.Login,
|
||||
Authorizator: nonIdendityAuthorizator,
|
||||
Unauthorized: unauthorized,
|
||||
TokenLookup: "header: Authorization, query: token, cookie: jwt",
|
||||
TokenHeadName: "Bearer",
|
||||
TimeFunc: time.Now,
|
||||
LoginResponse: loginResponse,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Crit("new jwt middleware panic", "error", err)
|
||||
}
|
||||
|
||||
if errInit := jwtMiddleware.MiddlewareInit(); errInit != nil {
|
||||
log.Crit("init jwt middleware panic", "error", errInit)
|
||||
}
|
||||
|
||||
return jwtMiddleware
|
||||
}
|
||||
|
||||
// ProxyLoginMiddleware jwt auth middleware for proxy login
|
||||
func ProxyLoginMiddleware(auth *config.Auth) *jwt.GinJWTMiddleware {
|
||||
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
|
||||
PayloadFunc: proxy.Auth.PayloadFunc,
|
||||
IdentityHandler: proxy.Auth.IdentityHandler,
|
||||
IdentityKey: types.PublicKey,
|
||||
Key: []byte(auth.Secret),
|
||||
Timeout: time.Second * time.Duration(auth.LoginExpireDurationSec),
|
||||
Authenticator: proxy.Auth.Login,
|
||||
Authorizator: nonIdendityAuthorizator,
|
||||
Unauthorized: unauthorized,
|
||||
TokenLookup: "header: Authorization, query: token, cookie: jwt",
|
||||
TokenHeadName: "Bearer",
|
||||
|
||||
@@ -28,8 +28,8 @@ func TestMain(m *testing.M) {
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
tearDownEnv(t)
|
||||
}
|
||||
tearDownEnv(t)
|
||||
}()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
"scroll-tech/coordinator/internal/controller/proxy"
|
||||
"scroll-tech/coordinator/internal/middleware"
|
||||
)
|
||||
|
||||
@@ -26,45 +25,16 @@ func Route(router *gin.Engine, cfg *config.Config, reg prometheus.Registerer) {
|
||||
func v1(router *gin.RouterGroup, conf *config.Config) {
|
||||
r := router.Group("/v1")
|
||||
|
||||
challengeMiddleware := middleware.ChallengeMiddleware(conf.Auth)
|
||||
challengeMiddleware := middleware.ChallengeMiddleware(conf)
|
||||
r.GET("/challenge", challengeMiddleware.LoginHandler)
|
||||
|
||||
loginMiddleware := middleware.LoginMiddleware(conf.Auth)
|
||||
loginMiddleware := middleware.LoginMiddleware(conf)
|
||||
r.POST("/login", challengeMiddleware.MiddlewareFunc(), loginMiddleware.LoginHandler)
|
||||
|
||||
// need jwt token api
|
||||
r.Use(loginMiddleware.MiddlewareFunc())
|
||||
{
|
||||
r.POST("/proxy_login", loginMiddleware.LoginHandler)
|
||||
r.POST("/get_task", api.GetTask.GetTasks)
|
||||
r.POST("/submit_proof", api.SubmitProof.SubmitProof)
|
||||
}
|
||||
}
|
||||
|
||||
// Route register route for coordinator
|
||||
func ProxyRoute(router *gin.Engine, cfg *config.ProxyConfig, reg prometheus.Registerer) {
|
||||
router.Use(gin.Recovery())
|
||||
|
||||
observability.Use(router, "coordinator", reg)
|
||||
|
||||
r := router.Group("coordinator")
|
||||
|
||||
v1_proxy(r, cfg)
|
||||
}
|
||||
|
||||
func v1_proxy(router *gin.RouterGroup, conf *config.ProxyConfig) {
|
||||
r := router.Group("/v1")
|
||||
|
||||
challengeMiddleware := middleware.ChallengeMiddleware(conf.ProxyManager.Auth)
|
||||
r.GET("/challenge", challengeMiddleware.LoginHandler)
|
||||
|
||||
loginMiddleware := middleware.ProxyLoginMiddleware(conf.ProxyManager.Auth)
|
||||
r.POST("/login", challengeMiddleware.MiddlewareFunc(), loginMiddleware.LoginHandler)
|
||||
|
||||
// need jwt token api
|
||||
r.Use(loginMiddleware.MiddlewareFunc())
|
||||
{
|
||||
r.POST("/get_task", proxy.GetTask.GetTasks)
|
||||
r.POST("/submit_proof", proxy.SubmitProof.SubmitProof)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,8 +64,6 @@ func (r ProverProviderType) String() string {
|
||||
return "prover provider type internal"
|
||||
case ProverProviderTypeExternal:
|
||||
return "prover provider type external"
|
||||
case ProverProviderTypeProxy:
|
||||
return "prover provider type proxy"
|
||||
default:
|
||||
return fmt.Sprintf("prover provider type: %d", r)
|
||||
}
|
||||
@@ -78,6 +76,4 @@ const (
|
||||
ProverProviderTypeInternal
|
||||
// ProverProviderTypeExternal is an external prover provider type
|
||||
ProverProviderTypeExternal
|
||||
// ProverProviderTypeProxy is an proxy prover provider type
|
||||
ProverProviderTypeProxy = 3
|
||||
)
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
func TestResponseDecodeData_GetTaskSchema(t *testing.T) {
|
||||
// Arrange: build a dummy payload and wrap it in Response
|
||||
in := GetTaskSchema{
|
||||
UUID: "uuid-123",
|
||||
TaskID: "task-abc",
|
||||
TaskType: 1,
|
||||
UseSnark: true,
|
||||
TaskData: "dummy-data",
|
||||
HardForkName: "cancun",
|
||||
}
|
||||
|
||||
resp := types.Response{
|
||||
ErrCode: 0,
|
||||
ErrMsg: "",
|
||||
Data: in,
|
||||
}
|
||||
|
||||
// Act: JSON round-trip the Response to simulate real HTTP encoding/decoding
|
||||
b, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal response: %v", err)
|
||||
}
|
||||
|
||||
var decoded types.Response
|
||||
if err := json.Unmarshal(b, &decoded); err != nil {
|
||||
t.Fatalf("unmarshal response: %v", err)
|
||||
}
|
||||
|
||||
var out GetTaskSchema
|
||||
if err := decoded.DecodeData(&out); err != nil {
|
||||
t.Fatalf("DecodeData error: %v", err)
|
||||
}
|
||||
|
||||
// Assert: structs match after decode
|
||||
if !reflect.DeepEqual(in, out) {
|
||||
t.Fatalf("decoded struct mismatch:\nwant: %+v\n got: %+v", in, out)
|
||||
}
|
||||
}
|
||||
@@ -30,14 +30,12 @@ import (
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
"scroll-tech/coordinator/internal/controller/cron"
|
||||
"scroll-tech/coordinator/internal/controller/proxy"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
"scroll-tech/coordinator/internal/route"
|
||||
)
|
||||
|
||||
var (
|
||||
conf *config.Config
|
||||
proxyConf *config.ProxyConfig
|
||||
conf *config.Config
|
||||
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
@@ -53,9 +51,6 @@ var (
|
||||
chunk *encoding.Chunk
|
||||
batch *encoding.Batch
|
||||
tokenTimeout int
|
||||
|
||||
envSet bool
|
||||
portUsed map[int64]struct{}
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -68,44 +63,18 @@ func TestMain(m *testing.M) {
|
||||
}
|
||||
|
||||
func randomURL() string {
|
||||
return randmURLBatch(1)[0]
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000-1))
|
||||
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
|
||||
}
|
||||
|
||||
// Generate a batch of random localhost URLs with different ports, similar to randomURL.
|
||||
func randmURLBatch(n int) []string {
|
||||
if n <= 0 {
|
||||
return nil
|
||||
}
|
||||
urls := make([]string, 0, n)
|
||||
if portUsed == nil {
|
||||
portUsed = make(map[int64]struct{})
|
||||
}
|
||||
for len(urls) < n {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000-1))
|
||||
port := 20000 + 2000 + id.Int64()
|
||||
if _, exist := portUsed[port]; exist {
|
||||
continue
|
||||
}
|
||||
portUsed[port] = struct{}{}
|
||||
urls = append(urls, fmt.Sprintf("localhost:%d", port))
|
||||
}
|
||||
return urls
|
||||
}
|
||||
|
||||
func setupCoordinatorDb(t *testing.T) {
|
||||
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
|
||||
var err error
|
||||
assert.NotNil(t, db, "setEnv must be called before")
|
||||
// db, err = testApps.GetGormDBClient()
|
||||
db, err = testApps.GetGormDBClient()
|
||||
|
||||
// assert.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
}
|
||||
|
||||
func launchCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
|
||||
|
||||
assert.NotNil(t, db, "db must be set")
|
||||
|
||||
tokenTimeout = 60
|
||||
conf = &config.Config{
|
||||
@@ -145,7 +114,6 @@ func launchCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL str
|
||||
EuclidV2Time: new(uint64),
|
||||
}, db, nil)
|
||||
route.Route(router, conf, nil)
|
||||
t.Log("coordinator server url", coordinatorURL)
|
||||
srv := &http.Server{
|
||||
Addr: coordinatorURL,
|
||||
Handler: router,
|
||||
@@ -161,77 +129,7 @@ func launchCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL str
|
||||
return proofCollector, srv
|
||||
}
|
||||
|
||||
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
|
||||
setupCoordinatorDb(t)
|
||||
return launchCoordinator(t, proversPerSession, coordinatorURL)
|
||||
}
|
||||
|
||||
func setupProxyDb(t *testing.T) {
|
||||
assert.NotNil(t, db, "setEnv must be called before")
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetModuleDB(sqlDB, "proxy"))
|
||||
}
|
||||
|
||||
func launchProxy(t *testing.T, proxyURL string, coordinatorURL []string, usePersistent bool) *http.Server {
|
||||
var err error
|
||||
assert.NoError(t, err)
|
||||
|
||||
coordinators := make(map[string]*config.UpStream)
|
||||
for i, n := range coordinatorURL {
|
||||
coordinators[fmt.Sprintf("coordinator_%d", i)] = testProxyUpStreamCfg(n)
|
||||
}
|
||||
|
||||
tokenTimeout = 60
|
||||
proxyConf = &config.ProxyConfig{
|
||||
ProxyName: "test_proxy",
|
||||
ProxyManager: &config.ProxyManager{
|
||||
Verifier: &config.VerifierConfig{
|
||||
MinProverVersion: "v4.4.89",
|
||||
Verifiers: []config.AssetConfig{{
|
||||
AssetsPath: "",
|
||||
ForkName: "euclidV2",
|
||||
}},
|
||||
},
|
||||
Client: testProxyClientCfg(),
|
||||
Auth: &config.Auth{
|
||||
Secret: "proxy",
|
||||
ChallengeExpireDurationSec: tokenTimeout,
|
||||
LoginExpireDurationSec: tokenTimeout,
|
||||
},
|
||||
},
|
||||
Coordinators: coordinators,
|
||||
}
|
||||
|
||||
router := gin.New()
|
||||
if usePersistent {
|
||||
proxy.InitController(proxyConf, db, nil)
|
||||
} else {
|
||||
proxy.InitController(proxyConf, nil, nil)
|
||||
}
|
||||
route.ProxyRoute(router, proxyConf, nil)
|
||||
t.Log("proxy server url", proxyURL)
|
||||
srv := &http.Server{
|
||||
Addr: proxyURL,
|
||||
Handler: router,
|
||||
}
|
||||
go func() {
|
||||
runErr := srv.ListenAndServe()
|
||||
if runErr != nil && !errors.Is(runErr, http.ErrServerClosed) {
|
||||
assert.NoError(t, runErr)
|
||||
}
|
||||
}()
|
||||
time.Sleep(time.Second * 2)
|
||||
|
||||
return srv
|
||||
}
|
||||
|
||||
func setEnv(t *testing.T) {
|
||||
if envSet {
|
||||
t.Log("SetEnv is re-entried")
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
version.Version = "v4.5.45"
|
||||
@@ -248,7 +146,6 @@ func setEnv(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
assert.NoError(t, migrate.MigrateModule(sqlDB, "proxy"))
|
||||
|
||||
batchOrm = orm.NewBatch(db)
|
||||
chunkOrm = orm.NewChunk(db)
|
||||
@@ -272,7 +169,6 @@ func setEnv(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
batch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk}}
|
||||
|
||||
envSet = true
|
||||
}
|
||||
|
||||
func TestApis(t *testing.T) {
|
||||
|
||||
@@ -34,8 +34,6 @@ type mockProver struct {
|
||||
privKey *ecdsa.PrivateKey
|
||||
proofType message.ProofType
|
||||
coordinatorURL string
|
||||
token string
|
||||
useCacheToken bool
|
||||
}
|
||||
|
||||
func newMockProver(t *testing.T, proverName string, coordinatorURL string, proofType message.ProofType, version string) *mockProver {
|
||||
@@ -52,14 +50,6 @@ func newMockProver(t *testing.T, proverName string, coordinatorURL string, proof
|
||||
return prover
|
||||
}
|
||||
|
||||
func (r *mockProver) resetConnection(coordinatorURL string) {
|
||||
r.coordinatorURL = coordinatorURL
|
||||
}
|
||||
|
||||
func (r *mockProver) setUseCacheToken(enable bool) {
|
||||
r.useCacheToken = enable
|
||||
}
|
||||
|
||||
// connectToCoordinator sets up a websocket client to connect to the prover manager.
|
||||
func (r *mockProver) connectToCoordinator(t *testing.T, proverTypes []types.ProverType) (string, int, string) {
|
||||
challengeString := r.challenge(t)
|
||||
@@ -125,7 +115,6 @@ func (r *mockProver) login(t *testing.T, challengeString string, proverTypes []t
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode())
|
||||
assert.Empty(t, result.ErrMsg)
|
||||
r.token = loginData.Token
|
||||
return loginData.Token, 0, ""
|
||||
}
|
||||
|
||||
@@ -155,14 +144,11 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool {
|
||||
|
||||
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*types.GetTaskSchema, int, string) {
|
||||
// get task from coordinator
|
||||
if !r.useCacheToken || r.token == "" {
|
||||
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
|
||||
if errCode != 0 {
|
||||
return nil, errCode, errMsg
|
||||
}
|
||||
assert.NotEmpty(t, token)
|
||||
assert.Equal(t, token, r.token)
|
||||
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
|
||||
if errCode != 0 {
|
||||
return nil, errCode, errMsg
|
||||
}
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
type response struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
@@ -174,7 +160,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*
|
||||
client := resty.New()
|
||||
resp, err := client.R().
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.token)).
|
||||
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
|
||||
SetBody(map[string]interface{}{"universal": true, "prover_height": 100, "task_types": []int{int(proofType)}}).
|
||||
SetResult(&result).
|
||||
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
|
||||
@@ -188,14 +174,11 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*
|
||||
//nolint:unparam
|
||||
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) {
|
||||
// get task from coordinator
|
||||
if !r.useCacheToken || r.token == "" {
|
||||
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
|
||||
if errCode != 0 {
|
||||
return errCode, errMsg
|
||||
}
|
||||
assert.NotEmpty(t, token)
|
||||
assert.Equal(t, token, r.token)
|
||||
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
|
||||
if errCode != 0 {
|
||||
return errCode, errMsg
|
||||
}
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
type response struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
@@ -207,8 +190,8 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
|
||||
client := resty.New()
|
||||
resp, err := client.R().
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.token)).
|
||||
SetBody(map[string]interface{}{"prover_height": 100, "task_types": []int{int(proofType)}, "universal": true}).
|
||||
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
|
||||
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "universal": true}).
|
||||
SetResult(&result).
|
||||
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
|
||||
assert.NoError(t, err)
|
||||
@@ -266,13 +249,10 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
|
||||
Universal: true,
|
||||
}
|
||||
|
||||
if !r.useCacheToken || r.token == "" {
|
||||
token, authErrCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(message.ProofType(proverTaskSchema.TaskType))})
|
||||
assert.Equal(t, authErrCode, 0)
|
||||
assert.Equal(t, errMsg, "")
|
||||
assert.NotEmpty(t, token)
|
||||
assert.Equal(t, token, r.token)
|
||||
}
|
||||
token, authErrCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(message.ProofType(proverTaskSchema.TaskType))})
|
||||
assert.Equal(t, authErrCode, 0)
|
||||
assert.Equal(t, errMsg, "")
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
submitProofData, err := json.Marshal(submitProof)
|
||||
assert.NoError(t, err)
|
||||
@@ -282,7 +262,7 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
|
||||
client := resty.New()
|
||||
resp, err := client.R().
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.token)).
|
||||
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
|
||||
SetBody(string(submitProofData)).
|
||||
SetResult(&result).
|
||||
Post("http://" + r.coordinatorURL + "/coordinator/v1/submit_proof")
|
||||
|
||||
@@ -1,297 +0,0 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/proxy"
|
||||
)
|
||||
|
||||
func testProxyClientCfg() *config.ProxyClient {
|
||||
|
||||
return &config.ProxyClient{
|
||||
Secret: "test-secret-key",
|
||||
ProxyName: "test-proxy",
|
||||
ProxyVersion: version.Version,
|
||||
}
|
||||
}
|
||||
|
||||
var testCompatibileMode bool
|
||||
|
||||
func testProxyUpStreamCfg(coordinatorURL string) *config.UpStream {
|
||||
|
||||
return &config.UpStream{
|
||||
BaseUrl: fmt.Sprintf("http://%s", coordinatorURL),
|
||||
RetryWaitTime: 3,
|
||||
ConnectionTimeoutSec: 30,
|
||||
CompatibileMode: testCompatibileMode,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func testProxyClient(t *testing.T) {
|
||||
|
||||
// Setup coordinator and http server.
|
||||
coordinatorURL := randomURL()
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
|
||||
defer func() {
|
||||
proofCollector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
cliCfg := testProxyClientCfg()
|
||||
upCfg := testProxyUpStreamCfg(coordinatorURL)
|
||||
|
||||
clientManager, err := proxy.NewClientManager("test_coordinator", cliCfg, upCfg)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, clientManager)
|
||||
|
||||
// Create context with timeout
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Test Client method
|
||||
client := clientManager.ClientAsProxy(ctx)
|
||||
|
||||
// Client should not be nil if login succeeds
|
||||
// Note: This might be nil if the coordinator is not properly set up for proxy authentication
|
||||
// but the test validates that the Client method completes without panic
|
||||
assert.NotNil(t, client)
|
||||
token1 := client.Token()
|
||||
assert.NotEmpty(t, token1)
|
||||
t.Logf("Client token: %s (%v)", token1, client)
|
||||
|
||||
if !upCfg.CompatibileMode {
|
||||
time.Sleep(time.Second * 2)
|
||||
client.Reset()
|
||||
client = clientManager.ClientAsProxy(ctx)
|
||||
assert.NotNil(t, client)
|
||||
token2 := client.Token()
|
||||
assert.NotEmpty(t, token2)
|
||||
t.Logf("Client token (sec): %s (%v)", token2, client)
|
||||
assert.NotEqual(t, token1, token2, "token should not be identical")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func testProxyHandshake(t *testing.T) {
|
||||
// Setup proxy http server.
|
||||
proxyURL := randomURL()
|
||||
proxyHttpHandler := launchProxy(t, proxyURL, []string{}, false)
|
||||
defer func() {
|
||||
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL, message.ProofTypeChunk, version.Version)
|
||||
assert.True(t, chunkProver.healthCheckSuccess(t))
|
||||
}
|
||||
|
||||
func testProxyGetTask(t *testing.T) {
|
||||
// Setup coordinator and http server.
|
||||
urls := randmURLBatch(2)
|
||||
coordinatorURL := urls[0]
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
proxyURL := urls[1]
|
||||
proxyHttpHandler := launchProxy(t, proxyURL, []string{coordinatorURL}, false)
|
||||
defer func() {
|
||||
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL, message.ProofTypeChunk, version.Version)
|
||||
chunkProver.setUseCacheToken(true)
|
||||
code, _ := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
assert.Equal(t, int(types.ErrCoordinatorEmptyProofData), code)
|
||||
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
|
||||
assert.NoError(t, err)
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
|
||||
task, code, msg := chunkProver.getProverTask(t, message.ProofTypeChunk)
|
||||
assert.Empty(t, code)
|
||||
if code == 0 {
|
||||
t.Log("get task id", task.TaskID)
|
||||
} else {
|
||||
t.Log("get task error msg", msg)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func testProxyProof(t *testing.T) {
|
||||
urls := randmURLBatch(3)
|
||||
coordinatorURL0 := urls[0]
|
||||
setupCoordinatorDb(t)
|
||||
collector0, httpHandler0 := launchCoordinator(t, 3, coordinatorURL0)
|
||||
defer func() {
|
||||
collector0.Stop()
|
||||
httpHandler0.Shutdown(context.Background())
|
||||
}()
|
||||
coordinatorURL1 := urls[1]
|
||||
collector1, httpHandler1 := launchCoordinator(t, 3, coordinatorURL1)
|
||||
defer func() {
|
||||
collector1.Stop()
|
||||
httpHandler1.Shutdown(context.Background())
|
||||
}()
|
||||
coordinators := map[string]*http.Server{
|
||||
"coordinator_0": httpHandler0,
|
||||
"coordinator_1": httpHandler1,
|
||||
}
|
||||
|
||||
proxyURL := urls[2]
|
||||
proxyHttpHandler := launchProxy(t, proxyURL, []string{coordinatorURL0, coordinatorURL1}, false)
|
||||
defer func() {
|
||||
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
|
||||
assert.NoError(t, err)
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL, message.ProofTypeChunk, version.Version)
|
||||
chunkProver.setUseCacheToken(true)
|
||||
task, code, msg := chunkProver.getProverTask(t, message.ProofTypeChunk)
|
||||
assert.Empty(t, code)
|
||||
if code == 0 {
|
||||
t.Log("get task", task)
|
||||
parts, _, _ := strings.Cut(task.TaskID, ":")
|
||||
// close the coordinator which do not dispatch task first, so if we submit to wrong target,
|
||||
// there would be a chance the submit failed (to the closed coordinator)
|
||||
for n, srv := range coordinators {
|
||||
if n != parts {
|
||||
t.Log("close coordinator", n)
|
||||
assert.NoError(t, srv.Shutdown(context.Background()))
|
||||
}
|
||||
}
|
||||
exceptProofStatus := verifiedSuccess
|
||||
chunkProver.submitProof(t, task, exceptProofStatus, types.Success)
|
||||
|
||||
} else {
|
||||
t.Log("get task error msg", msg)
|
||||
}
|
||||
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(1500 * time.Millisecond)
|
||||
tickStop = time.Tick(time.Minute)
|
||||
)
|
||||
|
||||
var (
|
||||
chunkProofStatus types.ProvingStatus
|
||||
chunkActiveAttempts int16
|
||||
chunkMaxAttempts int16
|
||||
)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskVerified {
|
||||
return
|
||||
}
|
||||
|
||||
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, int(chunkMaxAttempts))
|
||||
assert.Equal(t, 0, int(chunkActiveAttempts))
|
||||
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testProxyPersistent(t *testing.T) {
|
||||
urls := randmURLBatch(4)
|
||||
coordinatorURL0 := urls[0]
|
||||
setupCoordinatorDb(t)
|
||||
collector0, httpHandler0 := launchCoordinator(t, 3, coordinatorURL0)
|
||||
defer func() {
|
||||
collector0.Stop()
|
||||
httpHandler0.Shutdown(context.Background())
|
||||
}()
|
||||
coordinatorURL1 := urls[1]
|
||||
collector1, httpHandler1 := launchCoordinator(t, 3, coordinatorURL1)
|
||||
defer func() {
|
||||
collector1.Stop()
|
||||
httpHandler1.Shutdown(context.Background())
|
||||
}()
|
||||
|
||||
setupProxyDb(t)
|
||||
proxyURL1 := urls[2]
|
||||
proxyHttpHandler := launchProxy(t, proxyURL1, []string{coordinatorURL0, coordinatorURL1}, true)
|
||||
defer func() {
|
||||
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
proxyURL2 := urls[3]
|
||||
proxyHttpHandler2 := launchProxy(t, proxyURL2, []string{coordinatorURL0, coordinatorURL1}, true)
|
||||
defer func() {
|
||||
assert.NoError(t, proxyHttpHandler2.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
|
||||
assert.NoError(t, err)
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL1, message.ProofTypeChunk, version.Version)
|
||||
chunkProver.setUseCacheToken(true)
|
||||
task, _, _ := chunkProver.getProverTask(t, message.ProofTypeChunk)
|
||||
assert.NotNil(t, task)
|
||||
taskFrom, _, _ := strings.Cut(task.TaskID, ":")
|
||||
t.Log("get task from coordinator:", taskFrom)
|
||||
|
||||
chunkProver.resetConnection(proxyURL2)
|
||||
task, _, _ = chunkProver.getProverTask(t, message.ProofTypeChunk)
|
||||
assert.NotNil(t, task)
|
||||
taskFrom2, _, _ := strings.Cut(task.TaskID, ":")
|
||||
assert.Equal(t, taskFrom, taskFrom2)
|
||||
}
|
||||
|
||||
func TestProxyClient(t *testing.T) {
|
||||
testCompatibileMode = false
|
||||
// Set up the test environment.
|
||||
setEnv(t)
|
||||
t.Run("TestProxyClient", testProxyClient)
|
||||
t.Run("TestProxyHandshake", testProxyHandshake)
|
||||
t.Run("TestProxyGetTask", testProxyGetTask)
|
||||
t.Run("TestProxyValidProof", testProxyProof)
|
||||
t.Run("testProxyPersistent", testProxyPersistent)
|
||||
}
|
||||
|
||||
func TestProxyClientCompatibleMode(t *testing.T) {
|
||||
testCompatibileMode = true
|
||||
// Set up the test environment.
|
||||
setEnv(t)
|
||||
t.Run("TestProxyClient", testProxyClient)
|
||||
t.Run("TestProxyHandshake", testProxyHandshake)
|
||||
t.Run("TestProxyGetTask", testProxyGetTask)
|
||||
t.Run("TestProxyValidProof", testProxyProof)
|
||||
t.Run("testProxyPersistent", testProxyPersistent)
|
||||
}
|
||||
@@ -5,7 +5,7 @@ use alloy::{
|
||||
};
|
||||
use eyre::Result;
|
||||
use libzkp::tasks::ChunkInterpreter;
|
||||
use sbv_primitives::types::{consensus::TxL1Message, Network};
|
||||
use sbv_primitives::types::{Network, consensus::TxL1Message};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
fn default_max_retry() -> u32 {
|
||||
|
||||
@@ -3,12 +3,15 @@ name = "libzkp"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
[lib]
|
||||
crate-type = ["rlib", "cdylib"]
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[dependencies]
|
||||
scroll-zkvm-types = { workspace = true, features = ["scroll"] }
|
||||
scroll-zkvm-verifier.workspace = true
|
||||
|
||||
alloy-primitives.workspace = true #depress the effect of "native-keccak"
|
||||
alloy-primitives.workspace = true # depress the effect of "native-keccak"
|
||||
sbv-primitives = {workspace = true, features = ["scroll-compress-info", "scroll"]}
|
||||
sbv-core = { workspace = true, features = ["scroll"] }
|
||||
base64.workspace = true
|
||||
|
||||
@@ -138,7 +138,10 @@ pub fn gen_universal_task(
|
||||
// always respect the fork_name_str (which has been normalized) being passed
|
||||
// if the fork_name wrapped in task is not match, consider it a malformed task
|
||||
if fork_name_str != task.fork_name.as_str() {
|
||||
eyre::bail!("fork name in chunk task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
|
||||
eyre::bail!(
|
||||
"fork name in chunk task not match the calling arg, expected {fork_name_str}, get {}",
|
||||
task.fork_name
|
||||
);
|
||||
}
|
||||
if fork_name_str != version.fork.as_str() {
|
||||
eyre::bail!(
|
||||
@@ -156,7 +159,10 @@ pub fn gen_universal_task(
|
||||
task.fork_name = task.fork_name.to_lowercase();
|
||||
let version = Version::from(task.version);
|
||||
if fork_name_str != task.fork_name.as_str() {
|
||||
eyre::bail!("fork name in batch task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
|
||||
eyre::bail!(
|
||||
"fork name in batch task not match the calling arg, expected {fork_name_str}, get {}",
|
||||
task.fork_name
|
||||
);
|
||||
}
|
||||
if fork_name_str != version.fork.as_str() {
|
||||
eyre::bail!(
|
||||
@@ -174,7 +180,10 @@ pub fn gen_universal_task(
|
||||
task.fork_name = task.fork_name.to_lowercase();
|
||||
let version = Version::from(task.version);
|
||||
if fork_name_str != task.fork_name.as_str() {
|
||||
eyre::bail!("fork name in bundle task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
|
||||
eyre::bail!(
|
||||
"fork name in bundle task not match the calling arg, expected {fork_name_str}, get {}",
|
||||
task.fork_name
|
||||
);
|
||||
}
|
||||
if fork_name_str != version.fork.as_str() {
|
||||
eyre::bail!(
|
||||
|
||||
@@ -13,7 +13,7 @@ use scroll_zkvm_types::{
|
||||
utils::{serialize_vk, vec_as_base64},
|
||||
version,
|
||||
};
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
use serde::{Deserialize, Serialize, de::DeserializeOwned};
|
||||
|
||||
/// A wrapper around the actual inner proof.
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
@@ -213,7 +213,7 @@ impl<Metadata: ProofMetadata> PersistableProof for WrappedProof<Metadata> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
use base64::{Engine, prelude::BASE64_STANDARD};
|
||||
use sbv_primitives::B256;
|
||||
use scroll_zkvm_types::{bundle::BundleInfo, proof::EvmProof};
|
||||
|
||||
|
||||
@@ -3,14 +3,14 @@ use eyre::Result;
|
||||
use sbv_primitives::{B256, U256};
|
||||
use scroll_zkvm_types::{
|
||||
batch::{
|
||||
build_point_eval_witness, BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderValidium,
|
||||
BatchInfo, BatchWitness, Envelope, EnvelopeV6, EnvelopeV7, LegacyBatchWitness,
|
||||
ReferenceHeader, N_BLOB_BYTES,
|
||||
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderValidium, BatchInfo, BatchWitness,
|
||||
Envelope, EnvelopeV6, EnvelopeV7, LegacyBatchWitness, N_BLOB_BYTES, ReferenceHeader,
|
||||
build_point_eval_witness,
|
||||
},
|
||||
chunk::ChunkInfo,
|
||||
public_inputs::{ForkName, MultiVersionPublicInputs, Version},
|
||||
task::ProvingTask,
|
||||
utils::{to_rkyv_bytes, RancorError},
|
||||
utils::{RancorError, to_rkyv_bytes},
|
||||
version::{Codec, Domain, STFVersion},
|
||||
};
|
||||
|
||||
@@ -150,18 +150,32 @@ impl BatchProvingTask {
|
||||
match &self.batch_header {
|
||||
BatchHeaderV::Validium(_) => assert!(
|
||||
version.is_validium(),
|
||||
"version {:?} is not match with parsed header, get validium header but version is not validium", version,
|
||||
"version {:?} is not match with parsed header, get validium header but version is not validium",
|
||||
version,
|
||||
),
|
||||
BatchHeaderV::V6(_) => assert_eq!(version.fork, ForkName::EuclidV1,
|
||||
BatchHeaderV::V6(_) => assert_eq!(
|
||||
version.fork,
|
||||
ForkName::EuclidV1,
|
||||
"hardfork mismatch for da-codec@v6 header: found={:?}, expected={:?}",
|
||||
version.fork,
|
||||
ForkName::EuclidV1,
|
||||
),
|
||||
BatchHeaderV::V7_to_V10(_) => assert!(
|
||||
matches!(version.fork, ForkName::EuclidV2 | ForkName::Feynman | ForkName::Galileo | ForkName::GalileoV2),
|
||||
matches!(
|
||||
version.fork,
|
||||
ForkName::EuclidV2
|
||||
| ForkName::Feynman
|
||||
| ForkName::Galileo
|
||||
| ForkName::GalileoV2
|
||||
),
|
||||
"hardfork mismatch for da-codec@v7/8/9/10 header: found={}, expected={:?}",
|
||||
version.fork,
|
||||
[ForkName::EuclidV2, ForkName::Feynman, ForkName::Galileo, ForkName::GalileoV2],
|
||||
[
|
||||
ForkName::EuclidV2,
|
||||
ForkName::Feynman,
|
||||
ForkName::Galileo,
|
||||
ForkName::GalileoV2
|
||||
],
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ pub mod base64 {
|
||||
|
||||
pub mod point_eval {
|
||||
use c_kzg;
|
||||
use sbv_primitives::{types::eips::eip4844::BLS_MODULUS, B256 as H256, U256};
|
||||
use sbv_primitives::{B256 as H256, U256, types::eips::eip4844::BLS_MODULUS};
|
||||
use scroll_zkvm_types::utils::sha256_rv32;
|
||||
|
||||
/// Given the blob-envelope, translate it to a fixed size EIP-4844 blob.
|
||||
|
||||
@@ -4,7 +4,7 @@ use scroll_zkvm_types::{
|
||||
bundle::{BundleInfo, BundleWitness, LegacyBundleWitness},
|
||||
public_inputs::{MultiVersionPublicInputs, Version},
|
||||
task::ProvingTask,
|
||||
utils::{to_rkyv_bytes, RancorError},
|
||||
utils::{RancorError, to_rkyv_bytes},
|
||||
};
|
||||
|
||||
use crate::proofs::BatchProof;
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use eyre::Result;
|
||||
use sbv_core::BlockWitness;
|
||||
use sbv_primitives::{types::consensus::BlockHeader, B256};
|
||||
use sbv_primitives::{B256, types::consensus::BlockHeader};
|
||||
use scroll_zkvm_types::{
|
||||
chunk::{execute, ChunkInfo, ChunkWitness, LegacyChunkWitness, ValidiumInputs},
|
||||
chunk::{ChunkInfo, ChunkWitness, LegacyChunkWitness, ValidiumInputs, execute},
|
||||
public_inputs::{MultiVersionPublicInputs, Version},
|
||||
task::ProvingTask,
|
||||
utils::{to_rkyv_bytes, RancorError},
|
||||
utils::{RancorError, to_rkyv_bytes},
|
||||
};
|
||||
|
||||
use super::chunk_interpreter::*;
|
||||
@@ -224,8 +224,8 @@ impl ChunkProvingTask {
|
||||
attempts += 1;
|
||||
if attempts >= MAX_FETCH_NODES_ATTEMPTS {
|
||||
return Err(eyre!(
|
||||
"failed to fetch nodes after {MAX_FETCH_NODES_ATTEMPTS} attempts: {e}"
|
||||
));
|
||||
"failed to fetch nodes after {MAX_FETCH_NODES_ATTEMPTS} attempts: {e}"
|
||||
));
|
||||
}
|
||||
|
||||
let node_hash =
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use eyre::Result;
|
||||
use sbv_core::BlockWitness;
|
||||
use sbv_primitives::{types::consensus::TxL1Message, Bytes, B256};
|
||||
use sbv_primitives::{B256, Bytes, types::consensus::TxL1Message};
|
||||
|
||||
/// An interpreter which is cirtical in translating chunk data
|
||||
/// since we need to grep block witness and storage node data
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use std::{
|
||||
panic::{catch_unwind, AssertUnwindSafe},
|
||||
panic::{AssertUnwindSafe, catch_unwind},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use git_version::git_version;
|
||||
use serde::{
|
||||
de::{Deserialize, DeserializeOwned},
|
||||
Serialize,
|
||||
de::{Deserialize, DeserializeOwned},
|
||||
};
|
||||
|
||||
use eyre::Result;
|
||||
|
||||
@@ -11,5 +11,5 @@ crate-type = ["cdylib"]
|
||||
[dependencies]
|
||||
libzkp = { path = "../libzkp" }
|
||||
l2geth = { path = "../l2geth"}
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
tracing-subscriber.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
mod utils;
|
||||
|
||||
use std::ffi::{c_char, CString};
|
||||
use std::ffi::{CString, c_char};
|
||||
|
||||
use libzkp::TaskType;
|
||||
use utils::{c_char_to_str, c_char_to_vec};
|
||||
@@ -20,7 +20,7 @@ fn enable_dump() -> bool {
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn init_tracing() {
|
||||
use tracing_subscriber::filter::{EnvFilter, LevelFilter};
|
||||
|
||||
@@ -47,14 +47,14 @@ pub unsafe extern "C" fn init_tracing() {
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn init_verifier(config: *const c_char) {
|
||||
let config_str = c_char_to_str(config);
|
||||
libzkp::verifier_init(config_str).unwrap();
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn init_l2geth(config: *const c_char) {
|
||||
let config_str = c_char_to_str(config);
|
||||
l2geth::init(config_str).unwrap();
|
||||
@@ -92,7 +92,7 @@ fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskT
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn verify_chunk_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
@@ -101,7 +101,7 @@ pub unsafe extern "C" fn verify_chunk_proof(
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn verify_batch_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
@@ -110,7 +110,7 @@ pub unsafe extern "C" fn verify_batch_proof(
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn verify_bundle_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
@@ -119,7 +119,7 @@ pub unsafe extern "C" fn verify_bundle_proof(
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn dump_vk(fork_name: *const c_char, file: *const c_char) {
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let file_str = c_char_to_str(file);
|
||||
@@ -145,7 +145,7 @@ fn failed_handling_result() -> HandlingResult {
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn gen_universal_task(
|
||||
task_type: i32,
|
||||
task: *const c_char,
|
||||
@@ -166,10 +166,7 @@ pub unsafe extern "C" fn gen_universal_task(
|
||||
);
|
||||
return failed_handling_result();
|
||||
}
|
||||
Some(std::slice::from_raw_parts(
|
||||
decryption_key,
|
||||
decryption_key_len,
|
||||
))
|
||||
Some(unsafe { std::slice::from_raw_parts(decryption_key, decryption_key_len) })
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@@ -185,7 +182,7 @@ pub unsafe extern "C" fn gen_universal_task(
|
||||
};
|
||||
|
||||
let expected_vk = if expected_vk_len > 0 {
|
||||
std::slice::from_raw_parts(expected_vk, expected_vk_len)
|
||||
unsafe { std::slice::from_raw_parts(expected_vk, expected_vk_len) }
|
||||
} else {
|
||||
&[]
|
||||
};
|
||||
@@ -224,18 +221,18 @@ pub unsafe extern "C" fn gen_universal_task(
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn release_task_result(result: HandlingResult) {
|
||||
if !result.universal_task.is_null() {
|
||||
let _ = CString::from_raw(result.universal_task);
|
||||
let _ = unsafe { CString::from_raw(result.universal_task) };
|
||||
}
|
||||
if !result.metadata.is_null() {
|
||||
let _ = CString::from_raw(result.metadata);
|
||||
let _ = unsafe { CString::from_raw(result.metadata) };
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn gen_wrapped_proof(
|
||||
proof: *const c_char,
|
||||
metadata: *const c_char,
|
||||
@@ -244,7 +241,7 @@ pub unsafe extern "C" fn gen_wrapped_proof(
|
||||
) -> *mut c_char {
|
||||
let proof_str = c_char_to_str(proof);
|
||||
let metadata_str = c_char_to_str(metadata);
|
||||
let vk_data = std::slice::from_raw_parts(vk as *const u8, vk_len);
|
||||
let vk_data = unsafe { std::slice::from_raw_parts(vk as *const u8, vk_len) };
|
||||
|
||||
match libzkp::gen_wrapped_proof(proof_str, metadata_str, vk_data) {
|
||||
Ok(result) => CString::new(result).unwrap().into_raw(),
|
||||
@@ -256,7 +253,7 @@ pub unsafe extern "C" fn gen_wrapped_proof(
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn univ_task_compatibility_fix(task_json: *const c_char) -> *mut c_char {
|
||||
let task_json_str = c_char_to_str(task_json);
|
||||
match libzkp::univ_task_compatibility_fix(task_json_str) {
|
||||
@@ -269,9 +266,9 @@ pub unsafe extern "C" fn univ_task_compatibility_fix(task_json: *const c_char) -
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
#[unsafe(no_mangle)]
|
||||
pub unsafe extern "C" fn release_string(ptr: *mut c_char) {
|
||||
if !ptr.is_null() {
|
||||
let _ = CString::from_raw(ptr);
|
||||
let _ = unsafe { CString::from_raw(ptr) };
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,35 +6,28 @@ edition.workspace = true
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
axiom-sdk.workspace = true
|
||||
scroll-zkvm-types.workspace = true
|
||||
scroll-zkvm-prover.workspace = true
|
||||
libzkp = { path = "../libzkp"}
|
||||
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "05648db" }
|
||||
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "504e71f" }
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
once_cell.workspace =true
|
||||
base64.workspace = true
|
||||
tiny-keccak = { workspace = true, features = ["sha3", "keccak"] }
|
||||
eyre.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
futures = "0.3.30"
|
||||
futures-util = "0.3"
|
||||
|
||||
reqwest = { version = "0.12.4", features = ["gzip", "stream"] }
|
||||
reqwest-middleware = "0.3"
|
||||
reqwest-retry = "0.5"
|
||||
reqwest = { version = "0.12", features = ["gzip", "stream"] }
|
||||
hex = "0.4.3"
|
||||
|
||||
rand = "0.8.5"
|
||||
tokio = "1.37.0"
|
||||
jiff.workspace = true
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
async-trait = "0.1"
|
||||
sled = "0.34.7"
|
||||
http = "1.1.0"
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
ctor = "0.2.8"
|
||||
url = { version = "2.5.4", features = ["serde",] }
|
||||
serde_bytes = "0.11.15"
|
||||
url = { version = "2.5.4", features = ["serde"] }
|
||||
tempfile = "3.24"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
cuda = ["scroll-zkvm-prover/cuda"]
|
||||
cuda = ["scroll-zkvm-prover/cuda"]
|
||||
|
||||
@@ -1,21 +1,32 @@
|
||||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
mod prover;
|
||||
mod types;
|
||||
mod zk_circuits_handler;
|
||||
|
||||
use crate::prover::ProverKind;
|
||||
use clap::{ArgAction, Parser, Subcommand};
|
||||
use prover::{LocalProver, LocalProverConfig};
|
||||
use scroll_proving_sdk::{
|
||||
prover::{types::ProofType, ProverBuilder},
|
||||
utils::{get_version, init_tracing},
|
||||
prover::{ProverBuilder, types::ProofType},
|
||||
utils::{VERSION, init_tracing},
|
||||
};
|
||||
use std::{
|
||||
fs::File,
|
||||
io::BufReader,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
use std::{fs::File, io::BufReader, path::Path};
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(disable_version_flag = true)]
|
||||
struct Args {
|
||||
/// Prover kind
|
||||
#[arg(long = "prover.kind", value_enum, default_value_t = ProverKind::Local)]
|
||||
prover_kind: ProverKind,
|
||||
|
||||
/// Path of config file
|
||||
#[arg(long = "config", default_value = "conf/config.json")]
|
||||
config_file: String,
|
||||
config_file: PathBuf,
|
||||
|
||||
#[arg(long = "forkname")]
|
||||
fork_name: Option<String>,
|
||||
@@ -42,8 +53,11 @@ enum Commands {
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
struct HandleSet {
|
||||
#[serde(default)]
|
||||
chunks: Vec<String>,
|
||||
#[serde(default)]
|
||||
batches: Vec<String>,
|
||||
#[serde(default)]
|
||||
bundles: Vec<String>,
|
||||
}
|
||||
|
||||
@@ -54,13 +68,13 @@ async fn main() -> eyre::Result<()> {
|
||||
let args = Args::parse();
|
||||
|
||||
if args.version {
|
||||
println!("version is {}", get_version());
|
||||
println!("version is {VERSION}");
|
||||
std::process::exit(0);
|
||||
}
|
||||
info!(version = %VERSION, "Starting prover");
|
||||
|
||||
let cfg = LocalProverConfig::from_file(args.config_file)?;
|
||||
let sdk_config = cfg.sdk_config.clone();
|
||||
let local_prover = LocalProver::new(cfg.clone());
|
||||
let (sdk_config, prover) = args.prover_kind.create_from_file(&args.config_file)?;
|
||||
info!(prover = ?prover, "Loaded prover");
|
||||
|
||||
match args.command {
|
||||
Some(Commands::Handle { task_path }) => {
|
||||
@@ -68,37 +82,37 @@ async fn main() -> eyre::Result<()> {
|
||||
let reader = BufReader::new(file);
|
||||
let handle_set: HandleSet = serde_json::from_reader(reader)?;
|
||||
|
||||
let prover = ProverBuilder::new(sdk_config, local_prover)
|
||||
let prover = ProverBuilder::new(sdk_config, prover)
|
||||
.build()
|
||||
.await
|
||||
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;
|
||||
|
||||
let prover = std::sync::Arc::new(prover);
|
||||
println!("Handling task set 1: chunks ...");
|
||||
info!("Handling task set 1: chunks ...");
|
||||
assert!(
|
||||
prover
|
||||
.clone()
|
||||
.one_shot(&handle_set.chunks, ProofType::Chunk)
|
||||
.await
|
||||
);
|
||||
println!("Done! Handling task set 2: batches ...");
|
||||
info!("Done! Handling task set 2: batches ...");
|
||||
assert!(
|
||||
prover
|
||||
.clone()
|
||||
.one_shot(&handle_set.batches, ProofType::Batch)
|
||||
.await
|
||||
);
|
||||
println!("Done! Handling task set 3: bundles ...");
|
||||
info!("Done! Handling task set 3: bundles ...");
|
||||
assert!(
|
||||
prover
|
||||
.clone()
|
||||
.one_shot(&handle_set.bundles, ProofType::Bundle)
|
||||
.await
|
||||
);
|
||||
println!("All done!");
|
||||
info!("All done!");
|
||||
}
|
||||
None => {
|
||||
let prover = ProverBuilder::new(sdk_config, local_prover)
|
||||
let prover = ProverBuilder::new(sdk_config, prover)
|
||||
.build()
|
||||
.await
|
||||
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;
|
||||
|
||||
@@ -1,329 +1,96 @@
|
||||
use crate::zk_circuits_handler::{universal::UniversalHandler, CircuitsHandler};
|
||||
use async_trait::async_trait;
|
||||
use eyre::Result;
|
||||
use scroll_proving_sdk::{
|
||||
config::Config as SdkConfig,
|
||||
prover::{
|
||||
ProvingService,
|
||||
proving_service::{
|
||||
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
|
||||
QueryTaskResponse, TaskStatus,
|
||||
QueryTaskResponse,
|
||||
},
|
||||
types::ProofType,
|
||||
ProvingService,
|
||||
},
|
||||
};
|
||||
use scroll_zkvm_types::ProvingTask;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fs::File,
|
||||
path::{Path, PathBuf},
|
||||
sync::{Arc, LazyLock},
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
use tokio::{runtime::Handle, sync::Mutex, task::JoinHandle};
|
||||
use std::path::Path;
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct AssetsLocationData {
|
||||
/// the base url to form a general downloading url for an asset, MUST HAVE A TRAILING SLASH
|
||||
pub base_url: url::Url,
|
||||
#[serde(default)]
|
||||
/// a altered url for specififed vk
|
||||
pub asset_detours: HashMap<String, url::Url>,
|
||||
mod local;
|
||||
pub use local::{LocalProver, LocalProverConfig};
|
||||
|
||||
mod axiom;
|
||||
pub use axiom::{AxiomProver, AxiomProverConfig};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Prover {
|
||||
Local(LocalProver),
|
||||
Axiom(AxiomProver),
|
||||
}
|
||||
|
||||
impl AssetsLocationData {
|
||||
pub fn gen_asset_url(&self, vk_as_path: &str, proof_type: ProofType) -> Result<url::Url> {
|
||||
Ok(self.base_url.join(
|
||||
match proof_type {
|
||||
ProofType::Chunk => format!("chunk/{vk_as_path}/"),
|
||||
ProofType::Batch => format!("batch/{vk_as_path}/"),
|
||||
ProofType::Bundle => format!("bundle/{vk_as_path}/"),
|
||||
t => eyre::bail!("unrecognized proof type: {}", t as u8),
|
||||
}
|
||||
.as_str(),
|
||||
)?)
|
||||
}
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, clap::ValueEnum)]
|
||||
pub enum ProverKind {
|
||||
Local,
|
||||
Axiom,
|
||||
}
|
||||
|
||||
pub fn validate(&self) -> Result<()> {
|
||||
if !self.base_url.path().ends_with('/') {
|
||||
eyre::bail!(
|
||||
"base_url must have a trailing slash, got: {}",
|
||||
self.base_url
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_asset(
|
||||
impl ProverKind {
|
||||
pub fn create_from_file<P: AsRef<Path>>(
|
||||
&self,
|
||||
vk: &str,
|
||||
url_base: &url::Url,
|
||||
base_path: impl AsRef<Path>,
|
||||
) -> Result<PathBuf> {
|
||||
let download_files = ["app.vmexe", "openvm.toml"];
|
||||
|
||||
// Step 1: Create a local path for storage
|
||||
let storage_path = base_path.as_ref().join(vk);
|
||||
std::fs::create_dir_all(&storage_path)?;
|
||||
|
||||
// Step 2 & 3: Download each file if needed
|
||||
let client = reqwest::Client::new();
|
||||
|
||||
for filename in download_files.iter() {
|
||||
let local_file_path = storage_path.join(filename);
|
||||
let download_url = url_base.join(filename)?;
|
||||
|
||||
// Check if file already exists
|
||||
if local_file_path.exists() {
|
||||
// Get file metadata to check size
|
||||
if let Ok(metadata) = std::fs::metadata(&local_file_path) {
|
||||
// Make a HEAD request to get remote file size
|
||||
|
||||
if let Ok(head_resp) = client.head(download_url.clone()).send().await {
|
||||
if let Some(content_length) = head_resp.headers().get("content-length") {
|
||||
if let Ok(remote_size) =
|
||||
content_length.to_str().unwrap_or("0").parse::<u64>()
|
||||
{
|
||||
// If sizes match, skip download
|
||||
if metadata.len() == remote_size {
|
||||
println!("File {} already exists with matching size, skipping download", filename);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
file_name: P,
|
||||
) -> eyre::Result<(SdkConfig, Prover)> {
|
||||
match self {
|
||||
ProverKind::Local => {
|
||||
let config = LocalProverConfig::from_file(file_name)?;
|
||||
let sdk_config = config.sdk_config.clone();
|
||||
let prover = LocalProver::new(config);
|
||||
Ok((sdk_config, Prover::Local(prover)))
|
||||
}
|
||||
|
||||
println!("Downloading {} from {}", filename, download_url);
|
||||
|
||||
let response = client.get(download_url).send().await?;
|
||||
if !response.status().is_success() {
|
||||
eyre::bail!(
|
||||
"Failed to download {}: HTTP status {}",
|
||||
filename,
|
||||
response.status()
|
||||
);
|
||||
}
|
||||
|
||||
// Stream the content directly to file instead of loading into memory
|
||||
let mut file = std::fs::File::create(&local_file_path)?;
|
||||
let mut stream = response.bytes_stream();
|
||||
|
||||
use futures_util::StreamExt;
|
||||
while let Some(chunk) = stream.next().await {
|
||||
std::io::Write::write_all(&mut file, &chunk?)?;
|
||||
ProverKind::Axiom => {
|
||||
let config = AxiomProverConfig::from_file(file_name)?;
|
||||
let sdk_config = config.sdk_config.clone();
|
||||
let prover = AxiomProver::new(config);
|
||||
Ok((sdk_config, Prover::Axiom(prover)))
|
||||
}
|
||||
}
|
||||
|
||||
// Step 4: Return the storage path
|
||||
Ok(storage_path)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct LocalProverConfig {
|
||||
pub sdk_config: SdkConfig,
|
||||
pub circuits: HashMap<String, CircuitConfig>,
|
||||
}
|
||||
|
||||
impl LocalProverConfig {
|
||||
pub fn from_reader<R>(reader: R) -> Result<Self>
|
||||
where
|
||||
R: std::io::Read,
|
||||
{
|
||||
serde_json::from_reader(reader).map_err(|e| eyre::eyre!(e))
|
||||
}
|
||||
|
||||
pub fn from_file(file_name: String) -> Result<Self> {
|
||||
let file = File::open(file_name)?;
|
||||
Self::from_reader(&file)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
/// The path to save assets for a specified hard fork phase
|
||||
pub workspace_path: String,
|
||||
#[serde(flatten)]
|
||||
/// The location data for dynamic loading
|
||||
pub location_data: AssetsLocationData,
|
||||
/// cached vk value to save some initial cost, for debugging only
|
||||
#[serde(default)]
|
||||
pub vks: HashMap<ProofType, String>,
|
||||
}
|
||||
|
||||
pub struct LocalProver {
|
||||
config: LocalProverConfig,
|
||||
next_task_id: u64,
|
||||
current_task: Option<JoinHandle<Result<String>>>,
|
||||
|
||||
handlers: HashMap<String, Arc<dyn CircuitsHandler>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ProvingService for LocalProver {
|
||||
impl ProvingService for Prover {
|
||||
fn is_local(&self) -> bool {
|
||||
true
|
||||
}
|
||||
async fn get_vks(&self, _: GetVkRequest) -> GetVkResponse {
|
||||
// get vk has been deprecated in new prover with dynamic asset loading scheme
|
||||
GetVkResponse {
|
||||
vks: vec![],
|
||||
error: None,
|
||||
match self {
|
||||
Prover::Local(p) => p.is_local(),
|
||||
Prover::Axiom(p) => p.is_local(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_vks(&self, req: GetVkRequest) -> GetVkResponse {
|
||||
match self {
|
||||
Prover::Local(p) => p.get_vks(req).await,
|
||||
Prover::Axiom(p) => p.get_vks(req).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn prove(&mut self, req: ProveRequest) -> ProveResponse {
|
||||
match self.do_prove(req).await {
|
||||
Ok(resp) => resp,
|
||||
Err(e) => ProveResponse {
|
||||
status: TaskStatus::Failed,
|
||||
error: Some(format!("failed to request proof: {}", e)),
|
||||
..Default::default()
|
||||
},
|
||||
match self {
|
||||
Prover::Local(p) => p.prove(req).await,
|
||||
Prover::Axiom(p) => p.prove(req).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn query_task(&mut self, req: QueryTaskRequest) -> QueryTaskResponse {
|
||||
if let Some(handle) = &mut self.current_task {
|
||||
if handle.is_finished() {
|
||||
return match handle.await {
|
||||
Ok(Ok(proof)) => QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Success,
|
||||
proof: Some(proof),
|
||||
..Default::default()
|
||||
},
|
||||
Ok(Err(e)) => QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Failed,
|
||||
error: Some(format!("proving task failed: {}", e)),
|
||||
..Default::default()
|
||||
},
|
||||
Err(e) => QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Failed,
|
||||
error: Some(format!("proving task panicked: {}", e)),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
} else {
|
||||
return QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Proving,
|
||||
..Default::default()
|
||||
};
|
||||
}
|
||||
}
|
||||
// If no handle is found
|
||||
QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Failed,
|
||||
error: Some("no proving task is running".to_string()),
|
||||
..Default::default()
|
||||
match self {
|
||||
Prover::Local(p) => p.query_task(req).await,
|
||||
Prover::Axiom(p) => p.query_task(req).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static GLOBAL_ASSET_URLS: LazyLock<HashMap<String, HashMap<String, url::Url>>> =
|
||||
LazyLock::new(|| {
|
||||
const ASSETS_JSON: &str = include_str!("../assets_url_preset.json");
|
||||
serde_json::from_str(ASSETS_JSON).expect("Failed to parse assets_url_preset.json")
|
||||
});
|
||||
|
||||
impl LocalProver {
|
||||
pub fn new(mut config: LocalProverConfig) -> Self {
|
||||
for (fork_name, circuit_config) in config.circuits.iter_mut() {
|
||||
// validate each base url
|
||||
circuit_config.location_data.validate().unwrap();
|
||||
let mut template_url_mapping = GLOBAL_ASSET_URLS
|
||||
.get(&fork_name.to_lowercase())
|
||||
.cloned()
|
||||
.unwrap_or_default();
|
||||
|
||||
// apply default settings in template
|
||||
for (key, url) in circuit_config.location_data.asset_detours.drain() {
|
||||
template_url_mapping.insert(key, url);
|
||||
}
|
||||
|
||||
circuit_config.location_data.asset_detours = template_url_mapping;
|
||||
|
||||
// validate each detours url
|
||||
for url in circuit_config.location_data.asset_detours.values() {
|
||||
assert!(
|
||||
url.path().ends_with('/'),
|
||||
"url {} must be end with /",
|
||||
url.as_str()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Self {
|
||||
config,
|
||||
next_task_id: 0,
|
||||
current_task: None,
|
||||
handlers: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_prove(&mut self, req: ProveRequest) -> Result<ProveResponse> {
|
||||
self.next_task_id += 1;
|
||||
let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
|
||||
let created_at = duration.as_secs() as f64 + duration.subsec_nanos() as f64 * 1e-9;
|
||||
|
||||
let prover_task = UniversalHandler::get_task_from_input(&req.input)?;
|
||||
let is_openvm_13 = prover_task.use_openvm_13;
|
||||
let prover_task: ProvingTask = prover_task.into();
|
||||
let vk = hex::encode(&prover_task.vk);
|
||||
let handler = if let Some(handler) = self.handlers.get(&vk) {
|
||||
handler.clone()
|
||||
} else {
|
||||
let base_config = self
|
||||
.config
|
||||
.circuits
|
||||
.get(&req.hard_fork_name)
|
||||
.ok_or_else(|| {
|
||||
eyre::eyre!(
|
||||
"coordinator sent unexpected forkname {}",
|
||||
req.hard_fork_name
|
||||
)
|
||||
})?;
|
||||
let url_base = if let Some(url) = base_config.location_data.asset_detours.get(&vk) {
|
||||
url.clone()
|
||||
} else {
|
||||
base_config
|
||||
.location_data
|
||||
.gen_asset_url(&vk, req.proof_type)?
|
||||
};
|
||||
let asset_path = base_config
|
||||
.location_data
|
||||
.get_asset(&vk, &url_base, &base_config.workspace_path)
|
||||
.await?;
|
||||
let circuits_handler = Arc::new(Mutex::new(UniversalHandler::new(
|
||||
&asset_path,
|
||||
is_openvm_13,
|
||||
)?));
|
||||
self.handlers.insert(vk, circuits_handler.clone());
|
||||
circuits_handler
|
||||
};
|
||||
|
||||
let handle = Handle::current();
|
||||
let is_evm = req.proof_type == ProofType::Bundle;
|
||||
let task_handle = tokio::task::spawn_blocking(move || {
|
||||
handle.block_on(handler.get_proof_data(&prover_task, is_evm))
|
||||
});
|
||||
self.current_task = Some(task_handle);
|
||||
|
||||
Ok(ProveResponse {
|
||||
task_id: self.next_task_id.to_string(),
|
||||
proof_type: req.proof_type,
|
||||
circuit_version: req.circuit_version,
|
||||
hard_fork_name: req.hard_fork_name,
|
||||
status: TaskStatus::Proving,
|
||||
created_at,
|
||||
input: Some(req.input),
|
||||
..Default::default()
|
||||
})
|
||||
impl From<LocalProver> for Prover {
|
||||
fn from(p: LocalProver) -> Self {
|
||||
Prover::Local(p)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<AxiomProver> for Prover {
|
||||
fn from(p: AxiomProver) -> Self {
|
||||
Prover::Axiom(p)
|
||||
}
|
||||
}
|
||||
|
||||
329
crates/prover-bin/src/prover/axiom.rs
Normal file
329
crates/prover-bin/src/prover/axiom.rs
Normal file
@@ -0,0 +1,329 @@
|
||||
use crate::zk_circuits_handler::universal::UniversalHandler;
|
||||
use async_trait::async_trait;
|
||||
use axiom_sdk::{
|
||||
AxiomSdk, ProofType as AxiomProofType,
|
||||
build::BuildSdk,
|
||||
input::Input as AxiomInput,
|
||||
prove::{ProveArgs, ProveSdk},
|
||||
};
|
||||
use eyre::Context;
|
||||
use jiff::Timestamp;
|
||||
use scroll_proving_sdk::{
|
||||
config::Config as SdkConfig,
|
||||
prover::{
|
||||
ProofType, ProvingService,
|
||||
proving_service::{
|
||||
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
|
||||
QueryTaskResponse, TaskStatus,
|
||||
},
|
||||
},
|
||||
};
|
||||
use scroll_zkvm_types::{
|
||||
ProvingTask,
|
||||
proof::{OpenVmEvmProof, OpenVmVersionedVmStarkProof, ProofEnum},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{collections::HashMap, fs::File, io::Write, path::Path};
|
||||
use tempfile::NamedTempFile;
|
||||
use tracing::Level;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AxiomProverConfig {
|
||||
pub axiom: AxiomConfig,
|
||||
pub sdk_config: SdkConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AxiomConfig {
|
||||
pub api_key: String,
|
||||
// vk to program mapping
|
||||
pub programs: HashMap<String, AxiomProgram>,
|
||||
pub num_gpus: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AxiomProgram {
|
||||
pub program_id: String,
|
||||
pub config_id: String,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AxiomProver {
|
||||
config: AxiomProverConfig,
|
||||
}
|
||||
|
||||
impl AxiomProverConfig {
|
||||
pub fn from_reader<R>(reader: R) -> eyre::Result<Self>
|
||||
where
|
||||
R: std::io::Read,
|
||||
{
|
||||
serde_json::from_reader(reader).map_err(|e| eyre::eyre!(e))
|
||||
}
|
||||
|
||||
pub fn from_file<P: AsRef<Path>>(file_name: P) -> eyre::Result<Self> {
|
||||
let file = File::open(file_name)?;
|
||||
Self::from_reader(&file)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ProvingService for AxiomProver {
|
||||
fn is_local(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
async fn get_vks(&self, _: GetVkRequest) -> GetVkResponse {
|
||||
// get vk has been deprecated in new prover with dynamic asset loading scheme
|
||||
GetVkResponse {
|
||||
vks: vec![],
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self), ret, level = Level::DEBUG)]
|
||||
async fn prove(&mut self, req: ProveRequest) -> ProveResponse {
|
||||
self.prove_inner(req)
|
||||
.await
|
||||
.unwrap_or_else(|e| ProveResponse {
|
||||
status: TaskStatus::Failed,
|
||||
error: Some(format!("failed to submit proof task to axiom: {}", e)),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self), ret, level = Level::DEBUG)]
|
||||
async fn query_task(&mut self, req: QueryTaskRequest) -> QueryTaskResponse {
|
||||
let task_id = req.task_id.clone();
|
||||
self.query_task_inner(req)
|
||||
.await
|
||||
.unwrap_or_else(|e| QueryTaskResponse {
|
||||
task_id,
|
||||
status: TaskStatus::Failed,
|
||||
error: Some(format!("failed to query axiom task: {}", e)),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl AxiomProver {
|
||||
pub fn new(config: AxiomProverConfig) -> Self {
|
||||
Self { config }
|
||||
}
|
||||
|
||||
async fn make_axiom_request<R: Send + 'static>(
|
||||
&self,
|
||||
config_id: Option<String>,
|
||||
req: impl FnOnce(AxiomSdk) -> eyre::Result<R> + Send + 'static,
|
||||
) -> eyre::Result<R> {
|
||||
let api_key = self.config.axiom.api_key.clone();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let config = axiom_sdk::AxiomConfig {
|
||||
api_key: Some(api_key),
|
||||
config_id,
|
||||
..Default::default()
|
||||
};
|
||||
let sdk = AxiomSdk::new(config);
|
||||
req(sdk)
|
||||
})
|
||||
.await
|
||||
.context("failed to join axiom request")
|
||||
.flatten()
|
||||
}
|
||||
|
||||
#[instrument(skip_all, ret, err, level = Level::DEBUG)]
|
||||
fn get_program(&self, vk: &[u8]) -> eyre::Result<AxiomProgram> {
|
||||
let vk = hex::encode(vk);
|
||||
debug!(vk = %vk);
|
||||
self.config
|
||||
.axiom
|
||||
.programs
|
||||
.get(vk.as_str())
|
||||
.cloned()
|
||||
.ok_or_else(|| eyre::eyre!("no axiom program configured for vk: {vk}"))
|
||||
}
|
||||
|
||||
#[instrument(skip_all, err, level = Level::DEBUG)]
|
||||
async fn prove_inner(&mut self, req: ProveRequest) -> eyre::Result<ProveResponse> {
|
||||
let prover_task = UniversalHandler::get_task_from_input(&req.input)?;
|
||||
if prover_task.use_openvm_13 {
|
||||
eyre::bail!("axiom prover does not support openvm v1.3 tasks");
|
||||
}
|
||||
|
||||
let prover_task: ProvingTask = prover_task.into();
|
||||
|
||||
let program = self.get_program(&prover_task.vk)?;
|
||||
let num_gpus = self.config.axiom.num_gpus;
|
||||
|
||||
let mut input_file = NamedTempFile::new()?;
|
||||
let input = prover_task.build_openvm_input();
|
||||
serde_json::to_writer(&mut input_file, &input)?;
|
||||
input_file.flush()?;
|
||||
|
||||
let proof_type = if req.proof_type == ProofType::Bundle {
|
||||
AxiomProofType::Evm
|
||||
} else {
|
||||
AxiomProofType::Stark
|
||||
};
|
||||
|
||||
let mut response = ProveResponse {
|
||||
proof_type: req.proof_type,
|
||||
created_at: Timestamp::now().as_duration().as_secs_f64(),
|
||||
status: TaskStatus::Proving,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
response.task_id = self
|
||||
.make_axiom_request(Some(program.config_id), move |sdk| {
|
||||
sdk.generate_new_proof(ProveArgs {
|
||||
program_id: Some(program.program_id.clone()),
|
||||
input: Some(AxiomInput::FilePath(input_file.path().to_path_buf())),
|
||||
proof_type: Some(proof_type),
|
||||
num_gpus,
|
||||
priority: None,
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
info!(
|
||||
proof_type = ?req.proof_type,
|
||||
identifier = %prover_task.identifier,
|
||||
task_id = %response.task_id,
|
||||
"submitted axiom proving task"
|
||||
);
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
#[instrument(skip_all, err, level = Level::DEBUG)]
|
||||
async fn query_task_inner(&mut self, req: QueryTaskRequest) -> eyre::Result<QueryTaskResponse> {
|
||||
let mut response = QueryTaskResponse {
|
||||
task_id: req.task_id.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let task_id = req.task_id.clone();
|
||||
|
||||
let (status, proof_type, proof) = self
|
||||
.make_axiom_request(None, move |sdk| {
|
||||
let status = sdk.get_proof_status(&task_id)?;
|
||||
debug!(status = ?status, "fetched axiom task status");
|
||||
|
||||
let program_status = sdk.get_build_status(&status.program_uuid)?;
|
||||
let proof_type = match program_status.name.as_str() {
|
||||
"chunk" => ProofType::Chunk,
|
||||
"batch" => ProofType::Batch,
|
||||
"bundle" => ProofType::Bundle,
|
||||
_ => {
|
||||
return Err(eyre::eyre!("unrecognized program in: {program_status:#?}",));
|
||||
}
|
||||
};
|
||||
|
||||
let axiom_proof_type: AxiomProofType = status.proof_type.parse()?;
|
||||
let proof = if status.state == "Succeeded" {
|
||||
let file = NamedTempFile::new()?;
|
||||
sdk.get_generated_proof(
|
||||
&status.id,
|
||||
&axiom_proof_type,
|
||||
Some(file.path().to_path_buf()),
|
||||
)?;
|
||||
Some(file)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok((status, proof_type, proof))
|
||||
})
|
||||
.await?;
|
||||
|
||||
// Queued, Executing, Executed, AppProving, AppProvingDone, PostProcessing, Failed,
|
||||
// Succeeded
|
||||
response.status = match status.state.as_str() {
|
||||
"Queued" => TaskStatus::Queued,
|
||||
"Executing" | "Executed" | "AppProving" | "AppProvingDone" | "PostProcessing" => {
|
||||
TaskStatus::Proving
|
||||
}
|
||||
"Succeeded" => TaskStatus::Success,
|
||||
"Failed" => TaskStatus::Failed,
|
||||
other => {
|
||||
return Err(eyre::eyre!("unrecognized axiom task status: {other}"));
|
||||
}
|
||||
};
|
||||
debug!(status = ?response.status, "mapped axiom task status");
|
||||
|
||||
if response.status == TaskStatus::Failed {
|
||||
response.error = Some(
|
||||
status
|
||||
.error_message
|
||||
.unwrap_or_else(|| "unknown error".to_string()),
|
||||
);
|
||||
}
|
||||
|
||||
response.proof_type = proof_type;
|
||||
|
||||
let created_at: Timestamp = status.created_at.parse()?;
|
||||
response.created_at = created_at.as_duration().as_secs_f64();
|
||||
if let Some(launched_at) = status.launched_at
|
||||
&& !launched_at.is_empty()
|
||||
{
|
||||
let started_at: Timestamp = launched_at.parse()?;
|
||||
let started_at = started_at.as_duration();
|
||||
response.started_at = Some(started_at.as_secs_f64());
|
||||
|
||||
if let Some(terminated_at) = status.terminated_at
|
||||
&& !terminated_at.is_empty()
|
||||
{
|
||||
let finished_at: Timestamp = terminated_at.parse()?;
|
||||
let finished_at = finished_at.as_duration();
|
||||
response.finished_at = Some(finished_at.as_secs_f64());
|
||||
|
||||
let duration = finished_at.checked_sub(started_at).ok_or_else(|| {
|
||||
eyre::eyre!(
|
||||
"invalid timestamps: started_at={:?}, finished_at={:?}",
|
||||
started_at,
|
||||
finished_at
|
||||
)
|
||||
})?;
|
||||
response.compute_time_sec = Some(duration.as_secs_f64());
|
||||
info!(
|
||||
task_id = %req.task_id,
|
||||
launched_at = %format_args!("{launched_at:#}"),
|
||||
terminated_at = %format_args!("{terminated_at:#}"),
|
||||
duration = %format_args!("{duration:#}"),
|
||||
priority = %status.priority,
|
||||
"completed"
|
||||
);
|
||||
info!(
|
||||
task_id = %req.task_id,
|
||||
cells_used = %status.cells_used,
|
||||
num_gpus = %status.num_gpus,
|
||||
"resource usage"
|
||||
);
|
||||
if let Some(num_instructions) = status.num_instructions {
|
||||
let mhz = num_instructions as f64 / (duration.as_secs_f64() * 1_000_000.0);
|
||||
info!(
|
||||
task_id = %req.task_id,
|
||||
cycles = %num_instructions,
|
||||
MHz = %format_args!("{mhz:.2}"),
|
||||
"performance"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(proof_file) = proof {
|
||||
let proof = match proof_type {
|
||||
ProofType::Bundle => {
|
||||
let proof: OpenVmEvmProof = serde_json::from_reader(proof_file)?;
|
||||
ProofEnum::Evm(proof.into())
|
||||
}
|
||||
_ => {
|
||||
let proof: OpenVmVersionedVmStarkProof = serde_json::from_reader(proof_file)?;
|
||||
ProofEnum::Stark(proof.try_into()?)
|
||||
}
|
||||
};
|
||||
|
||||
response.proof = Some(serde_json::to_string(&proof)?);
|
||||
}
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
}
|
||||
342
crates/prover-bin/src/prover/local.rs
Normal file
342
crates/prover-bin/src/prover/local.rs
Normal file
@@ -0,0 +1,342 @@
|
||||
use crate::zk_circuits_handler::{CircuitsHandler, universal::UniversalHandler};
|
||||
use async_trait::async_trait;
|
||||
use eyre::Result;
|
||||
use scroll_proving_sdk::{
|
||||
config::Config as SdkConfig,
|
||||
prover::{
|
||||
ProvingService,
|
||||
proving_service::{
|
||||
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
|
||||
QueryTaskResponse, TaskStatus,
|
||||
},
|
||||
types::ProofType,
|
||||
},
|
||||
};
|
||||
use scroll_zkvm_types::ProvingTask;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fmt,
|
||||
fs::File,
|
||||
path::{Path, PathBuf},
|
||||
sync::{Arc, LazyLock},
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
use tokio::{runtime::Handle, sync::Mutex, task::JoinHandle};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AssetsLocationData {
|
||||
/// the base url to form a general downloading url for an asset, MUST HAVE A TRAILING SLASH
|
||||
pub base_url: url::Url,
|
||||
#[serde(default)]
|
||||
/// a altered url for specififed vk
|
||||
pub asset_detours: HashMap<String, url::Url>,
|
||||
}
|
||||
|
||||
impl AssetsLocationData {
|
||||
pub fn gen_asset_url(&self, vk_as_path: &str, proof_type: ProofType) -> Result<url::Url> {
|
||||
Ok(self.base_url.join(
|
||||
match proof_type {
|
||||
ProofType::Chunk => format!("chunk/{vk_as_path}/"),
|
||||
ProofType::Batch => format!("batch/{vk_as_path}/"),
|
||||
ProofType::Bundle => format!("bundle/{vk_as_path}/"),
|
||||
t => eyre::bail!("unrecognized proof type: {}", t as u8),
|
||||
}
|
||||
.as_str(),
|
||||
)?)
|
||||
}
|
||||
|
||||
pub fn validate(&self) -> Result<()> {
|
||||
if !self.base_url.path().ends_with('/') {
|
||||
eyre::bail!(
|
||||
"base_url must have a trailing slash, got: {}",
|
||||
self.base_url
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_asset(
|
||||
&self,
|
||||
vk: &str,
|
||||
url_base: &url::Url,
|
||||
base_path: impl AsRef<Path>,
|
||||
) -> Result<PathBuf> {
|
||||
let download_files = ["app.vmexe", "openvm.toml"];
|
||||
|
||||
// Step 1: Create a local path for storage
|
||||
let storage_path = base_path.as_ref().join(vk);
|
||||
std::fs::create_dir_all(&storage_path)?;
|
||||
|
||||
// Step 2 & 3: Download each file if needed
|
||||
let client = reqwest::Client::new();
|
||||
|
||||
for filename in download_files.iter() {
|
||||
let local_file_path = storage_path.join(filename);
|
||||
let download_url = url_base.join(filename)?;
|
||||
|
||||
// Check if file already exists
|
||||
if local_file_path.exists() {
|
||||
// Get file metadata to check size
|
||||
if let Ok(metadata) = std::fs::metadata(&local_file_path) {
|
||||
// Make a HEAD request to get remote file size
|
||||
|
||||
if let Ok(head_resp) = client.head(download_url.clone()).send().await {
|
||||
if let Some(content_length) = head_resp.headers().get("content-length") {
|
||||
if let Ok(remote_size) =
|
||||
content_length.to_str().unwrap_or("0").parse::<u64>()
|
||||
{
|
||||
// If sizes match, skip download
|
||||
if metadata.len() == remote_size {
|
||||
println!(
|
||||
"File {} already exists with matching size, skipping download",
|
||||
filename
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
println!("Downloading {} from {}", filename, download_url);
|
||||
|
||||
let response = client.get(download_url).send().await?;
|
||||
if !response.status().is_success() {
|
||||
eyre::bail!(
|
||||
"Failed to download {}: HTTP status {}",
|
||||
filename,
|
||||
response.status()
|
||||
);
|
||||
}
|
||||
|
||||
// Stream the content directly to file instead of loading into memory
|
||||
let mut file = std::fs::File::create(&local_file_path)?;
|
||||
let mut stream = response.bytes_stream();
|
||||
|
||||
use futures_util::StreamExt;
|
||||
while let Some(chunk) = stream.next().await {
|
||||
std::io::Write::write_all(&mut file, &chunk?)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Step 4: Return the storage path
|
||||
Ok(storage_path)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LocalProverConfig {
|
||||
pub sdk_config: SdkConfig,
|
||||
pub circuits: HashMap<String, CircuitConfig>,
|
||||
}
|
||||
|
||||
impl LocalProverConfig {
|
||||
pub fn from_reader<R>(reader: R) -> Result<Self>
|
||||
where
|
||||
R: std::io::Read,
|
||||
{
|
||||
serde_json::from_reader(reader).map_err(|e| eyre::eyre!(e))
|
||||
}
|
||||
|
||||
pub fn from_file<P: AsRef<Path>>(file_name: P) -> Result<Self> {
|
||||
let file = File::open(file_name)?;
|
||||
Self::from_reader(&file)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
/// The path to save assets for a specified hard fork phase
|
||||
pub workspace_path: String,
|
||||
#[serde(flatten)]
|
||||
/// The location data for dynamic loading
|
||||
pub location_data: AssetsLocationData,
|
||||
/// cached vk value to save some initial cost, for debugging only
|
||||
#[serde(default)]
|
||||
pub vks: HashMap<ProofType, String>,
|
||||
}
|
||||
|
||||
pub struct LocalProver {
|
||||
config: LocalProverConfig,
|
||||
next_task_id: u64,
|
||||
current_task: Option<JoinHandle<Result<String>>>,
|
||||
|
||||
handlers: HashMap<String, Arc<dyn CircuitsHandler>>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for LocalProver {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("LocalProver")
|
||||
.field("config", &self.config)
|
||||
.field("next_task_id", &self.next_task_id)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ProvingService for LocalProver {
|
||||
fn is_local(&self) -> bool {
|
||||
true
|
||||
}
|
||||
async fn get_vks(&self, _: GetVkRequest) -> GetVkResponse {
|
||||
// get vk has been deprecated in new prover with dynamic asset loading scheme
|
||||
GetVkResponse {
|
||||
vks: vec![],
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
async fn prove(&mut self, req: ProveRequest) -> ProveResponse {
|
||||
match self.do_prove(req).await {
|
||||
Ok(resp) => resp,
|
||||
Err(e) => ProveResponse {
|
||||
status: TaskStatus::Failed,
|
||||
error: Some(format!("failed to request proof: {}", e)),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async fn query_task(&mut self, req: QueryTaskRequest) -> QueryTaskResponse {
|
||||
if let Some(handle) = &mut self.current_task {
|
||||
if handle.is_finished() {
|
||||
return match handle.await {
|
||||
Ok(Ok(proof)) => QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Success,
|
||||
proof: Some(proof),
|
||||
..Default::default()
|
||||
},
|
||||
Ok(Err(e)) => QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Failed,
|
||||
error: Some(format!("proving task failed: {}", e)),
|
||||
..Default::default()
|
||||
},
|
||||
Err(e) => QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Failed,
|
||||
error: Some(format!("proving task panicked: {}", e)),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
} else {
|
||||
return QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Proving,
|
||||
..Default::default()
|
||||
};
|
||||
}
|
||||
}
|
||||
// If no handle is found
|
||||
QueryTaskResponse {
|
||||
task_id: req.task_id,
|
||||
status: TaskStatus::Failed,
|
||||
error: Some("no proving task is running".to_string()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static GLOBAL_ASSET_URLS: LazyLock<HashMap<String, HashMap<String, url::Url>>> =
|
||||
LazyLock::new(|| {
|
||||
const ASSETS_JSON: &str = include_str!("../../assets_url_preset.json");
|
||||
serde_json::from_str(ASSETS_JSON).expect("Failed to parse assets_url_preset.json")
|
||||
});
|
||||
|
||||
impl LocalProver {
|
||||
pub fn new(mut config: LocalProverConfig) -> Self {
|
||||
for (fork_name, circuit_config) in config.circuits.iter_mut() {
|
||||
// validate each base url
|
||||
circuit_config.location_data.validate().unwrap();
|
||||
let mut template_url_mapping = GLOBAL_ASSET_URLS
|
||||
.get(&fork_name.to_lowercase())
|
||||
.cloned()
|
||||
.unwrap_or_default();
|
||||
|
||||
// apply default settings in template
|
||||
for (key, url) in circuit_config.location_data.asset_detours.drain() {
|
||||
template_url_mapping.insert(key, url);
|
||||
}
|
||||
|
||||
circuit_config.location_data.asset_detours = template_url_mapping;
|
||||
|
||||
// validate each detours url
|
||||
for url in circuit_config.location_data.asset_detours.values() {
|
||||
assert!(
|
||||
url.path().ends_with('/'),
|
||||
"url {} must be end with /",
|
||||
url.as_str()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Self {
|
||||
config,
|
||||
next_task_id: 0,
|
||||
current_task: None,
|
||||
handlers: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn do_prove(&mut self, req: ProveRequest) -> Result<ProveResponse> {
|
||||
self.next_task_id += 1;
|
||||
let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
|
||||
let created_at = duration.as_secs() as f64 + duration.subsec_nanos() as f64 * 1e-9;
|
||||
|
||||
let prover_task = UniversalHandler::get_task_from_input(&req.input)?;
|
||||
let is_openvm_13 = prover_task.use_openvm_13;
|
||||
let prover_task: ProvingTask = prover_task.into();
|
||||
let vk = hex::encode(&prover_task.vk);
|
||||
let handler = if let Some(handler) = self.handlers.get(&vk) {
|
||||
handler.clone()
|
||||
} else {
|
||||
let base_config = self
|
||||
.config
|
||||
.circuits
|
||||
.get(&req.hard_fork_name)
|
||||
.ok_or_else(|| {
|
||||
eyre::eyre!(
|
||||
"coordinator sent unexpected forkname {}",
|
||||
req.hard_fork_name
|
||||
)
|
||||
})?;
|
||||
let url_base = if let Some(url) = base_config.location_data.asset_detours.get(&vk) {
|
||||
url.clone()
|
||||
} else {
|
||||
base_config
|
||||
.location_data
|
||||
.gen_asset_url(&vk, req.proof_type)?
|
||||
};
|
||||
let asset_path = base_config
|
||||
.location_data
|
||||
.get_asset(&vk, &url_base, &base_config.workspace_path)
|
||||
.await?;
|
||||
let circuits_handler = Arc::new(Mutex::new(UniversalHandler::new(
|
||||
&asset_path,
|
||||
is_openvm_13,
|
||||
)?));
|
||||
self.handlers.insert(vk, circuits_handler.clone());
|
||||
circuits_handler
|
||||
};
|
||||
|
||||
let handle = Handle::current();
|
||||
let is_evm = req.proof_type == ProofType::Bundle;
|
||||
let task_handle = tokio::task::spawn_blocking(move || {
|
||||
handle.block_on(handler.get_proof_data(&prover_task, is_evm))
|
||||
});
|
||||
self.current_task = Some(task_handle);
|
||||
|
||||
Ok(ProveResponse {
|
||||
task_id: self.next_task_id.to_string(),
|
||||
proof_type: req.proof_type,
|
||||
circuit_version: req.circuit_version,
|
||||
hard_fork_name: req.hard_fork_name,
|
||||
status: TaskStatus::Proving,
|
||||
created_at,
|
||||
input: Some(req.input),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -9,14 +9,13 @@ import (
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
//go:embed migrations
|
||||
//go:embed migrations/*.sql
|
||||
var embedMigrations embed.FS
|
||||
|
||||
// MigrationsDir migration dir
|
||||
const MigrationsDir string = "migrations"
|
||||
|
||||
func init() {
|
||||
// note goose ignore ono-sql files by default so we do not need to specify *.sql
|
||||
goose.SetBaseFS(embedMigrations)
|
||||
goose.SetSequential(true)
|
||||
goose.SetTableName("scroll_migrations")
|
||||
@@ -25,41 +24,6 @@ func init() {
|
||||
goose.SetVerbose(verbose)
|
||||
}
|
||||
|
||||
// MigrateModule migrate db used by other module with specified goose TableName
|
||||
// sql file for that module must be put as a sub-directory under `MigrationsDir`
|
||||
func MigrateModule(db *sql.DB, moduleName string) error {
|
||||
|
||||
goose.SetTableName(moduleName + "_migrations")
|
||||
defer func() {
|
||||
goose.SetTableName("scroll_migrations")
|
||||
}()
|
||||
|
||||
return goose.Up(db, MigrationsDir+"/"+moduleName, goose.WithAllowMissing())
|
||||
}
|
||||
|
||||
// RollbackModule rollback the specified module to the given version
|
||||
func RollbackModule(db *sql.DB, moduleName string, version *int64) error {
|
||||
|
||||
goose.SetTableName(moduleName + "_migrations")
|
||||
defer func() {
|
||||
goose.SetTableName("scroll_migrations")
|
||||
}()
|
||||
moduleDir := MigrationsDir + "/" + moduleName
|
||||
|
||||
if version != nil {
|
||||
return goose.DownTo(db, moduleDir, *version)
|
||||
}
|
||||
return goose.Down(db, moduleDir)
|
||||
}
|
||||
|
||||
// ResetModuleDB clean and migrate db for a module.
|
||||
func ResetModuleDB(db *sql.DB, moduleName string) error {
|
||||
if err := RollbackModule(db, moduleName, new(int64)); err != nil {
|
||||
return err
|
||||
}
|
||||
return MigrateModule(db, moduleName)
|
||||
}
|
||||
|
||||
// Migrate migrate db
|
||||
func Migrate(db *sql.DB) error {
|
||||
//return goose.Up(db, MIGRATIONS_DIR, goose.WithAllowMissing())
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
create table prover_sessions
|
||||
(
|
||||
public_key TEXT NOT NULL,
|
||||
upstream TEXT NOT NULL,
|
||||
up_token TEXT NOT NULL,
|
||||
expired TIMESTAMP(0) NOT NULL,
|
||||
constraint uk_prover_sessions_public_key_upstream unique (public_key, upstream)
|
||||
);
|
||||
|
||||
create index idx_prover_sessions_expired on prover_sessions (expired);
|
||||
|
||||
create table priority_upstream
|
||||
(
|
||||
public_key TEXT NOT NULL,
|
||||
upstream TEXT NOT NULL,
|
||||
update_time TIMESTAMP(0) NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
create unique index idx_priority_upstream_public_key on priority_upstream (public_key);
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop index if exists idx_prover_sessions_expired;
|
||||
|
||||
drop table if exists prover_sessions;
|
||||
drop table if exists priority_upstream;
|
||||
-- +goose StatementEnd
|
||||
@@ -1,3 +1,5 @@
|
||||
include ../../build/common.mk
|
||||
|
||||
.PHONY: clean setup_db test_tool all check_vars
|
||||
|
||||
include conf/.make.env
|
||||
@@ -57,6 +59,7 @@ reset_db:
|
||||
|
||||
test_tool:
|
||||
go build -o $(PWD)/build/bin/e2e_tool ../../rollup/tests/integration_tool
|
||||
$(call macos_codesign,$(PWD)/build/bin/e2e_tool)
|
||||
|
||||
build/bin/e2e_tool: test_tool
|
||||
|
||||
@@ -67,4 +70,4 @@ reimport_data: reset_db import_data
|
||||
|
||||
coordinator_setup:
|
||||
SCROLL_FORK_NAME=${SCROLL_FORK_NAME} $(MAKE) -C ../../coordinator localsetup
|
||||
cp -f conf/genesis.json ../../coordinator/build/bin/conf
|
||||
cp -f conf/genesis.json ../../coordinator/build/bin/conf
|
||||
|
||||
@@ -2,15 +2,34 @@
|
||||
|
||||
It contains data from some blocks in a specified testnet, and helps to generate a series of chunks/batches/bundles from these blocks, filling the DB for the coordinator, so an e2e test (from chunk to bundle) can be run completely local
|
||||
|
||||
### Pre
|
||||
|
||||
1. install [goose](https://github.com/pressly/goose)
|
||||
|
||||
```bash
|
||||
go install github.com/pressly/goose/v3/cmd/goose@latest
|
||||
```
|
||||
|
||||
Prepare:
|
||||
link the staff dir as "conf" from one of the dir with staff set, currently we have following staff sets:
|
||||
+ sepolia: with blocks from scroll sepolia
|
||||
+ cloak-xen: with blocks from xen sepolia, which is a cloak network
|
||||
|
||||
- sepolia: with blocks from scroll sepolia forking, e.g. `ln -s sepolia-galileo conf`
|
||||
- galileo: with blocks from scroll galileo forking
|
||||
- cloak-xen: with blocks from xen sepolia, which is a cloak network
|
||||
|
||||
Steps:
|
||||
|
||||
1. run `make all` under `tests/prover-e2e`, it would launch a postgreSql db in local docker container, which is ready to be used by coordinator (include some chunks/batches/bundles waiting to be proven)
|
||||
2. setup assets by run `make coordinator_setup`
|
||||
2. setup assets by run `make coordinator_setup`, `SCROLL_ZKVM_VERSION` must be sepcified, and if we do e2e test for other forking than `Galileo`, `SCROLL_FORK_NAME` is also required, example:
|
||||
|
||||
```bash
|
||||
SCROLL_FORK_NAME=feynman SCROLL_ZKVM_VERSION=v0.7.0 make coordinator_setup
|
||||
```
|
||||
|
||||
3. in `coordinator/build/bin/conf`, update necessary items in `config.template.json` and rename it as `config.json`
|
||||
4. build and launch `coordinator_api` service locally
|
||||
5. setup the `config.json` for zkvm prover to connect with the locally launched coordinator api
|
||||
6. in `zkvm-prover`, launch `make test_e2e_run`, which would specific prover run locally, connect to the local coordinator api service according to the `config.json`, and prove all tasks being injected to db in step 1.
|
||||
5. setup the `config.json` for zkvm prover to connect with the locally launched coordinator api:
|
||||
|
||||
- set the `sdk_config.coordinator.base_url` field into "http://localhost:8390",
|
||||
|
||||
6. in `zkvm-prover`, launch `make test_e2e_run`, which would specific prover run locally, connect to the local coordinator api service according to the `config.json`, and prove all tasks being injected to db in step 1.
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
BEGIN_BLOCK?=15206785
|
||||
END_BLOCK?=15206794
|
||||
SCROLL_FORK_NAME=galileo
|
||||
SCROLL_FORK_NAME=galileo
|
||||
|
||||
16
zkvm-prover/Dockerfile
Normal file
16
zkvm-prover/Dockerfile
Normal file
@@ -0,0 +1,16 @@
|
||||
FROM scrolltech/cuda-go-rust-builder:cuda-12.9.1-go-1.22.12-rust-nightly-2025-08-18 AS builder
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
RUN cd /app/zkvm-prover && make cpu_prover
|
||||
|
||||
FROM debian:trixie-slim AS runtime
|
||||
WORKDIR app
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends ca-certificates \
|
||||
&& update-ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV RUST_LOG='off,scroll_proving_sdk=info,prover=info'
|
||||
|
||||
COPY --from=builder /app/target/release/prover ./prover
|
||||
ENTRYPOINT ["./prover"]
|
||||
@@ -35,6 +35,9 @@ ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_VERSION}
|
||||
E2E_HANDLE_SET ?= ../tests/prover-e2e/testset.json
|
||||
DUMP_DIR ?= .work
|
||||
|
||||
cpu_prover:
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --locked --release -p prover
|
||||
|
||||
prover:
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --locked --release --features cuda -p prover
|
||||
|
||||
@@ -60,8 +63,11 @@ test_run:
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json
|
||||
|
||||
test_e2e_run: ${E2E_HANDLE_SET}
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json handle ${E2E_HANDLE_SET}
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --prover.kind local --config ./config.json handle ${E2E_HANDLE_SET}
|
||||
|
||||
test_e2e_run_gpu: ${E2E_HANDLE_SET}
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release --features cuda -p prover -- --config ./config.json handle ${E2E_HANDLE_SET}
|
||||
|
||||
test_axiom_e2e_run: ${E2E_HANDLE_SET}
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --prover.kind axiom --config ./config.json handle ${E2E_HANDLE_SET}
|
||||
|
||||
|
||||
@@ -6,7 +6,8 @@
|
||||
"base_url": "<the url of coordinator>",
|
||||
"retry_count": 10,
|
||||
"retry_wait_time_sec": 10,
|
||||
"connection_timeout_sec": 1800
|
||||
"connection_timeout_sec": 1800,
|
||||
"suppress_empty_task_error": false
|
||||
},
|
||||
"prover": {
|
||||
"supported_proof_types": [
|
||||
@@ -14,7 +15,10 @@
|
||||
2,
|
||||
3
|
||||
],
|
||||
"circuit_version": "v0.13.1"
|
||||
"circuit_version": "v0.13.1",
|
||||
"n_workers": 1,
|
||||
"poll_interval_sec": 20,
|
||||
"randomized_delay_sec": 0
|
||||
},
|
||||
"health_listener_addr": "127.0.0.1:10080",
|
||||
"db_path": ".work/db"
|
||||
@@ -31,6 +35,13 @@
|
||||
"galileoV2": {
|
||||
"base_url": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/galileov2/",
|
||||
"workspace_path": ".work/galileo"
|
||||
}
|
||||
}
|
||||
},
|
||||
"axiom_api_key": "<axiom api key>",
|
||||
"axiom_programs": {
|
||||
"<vk hex string>": {
|
||||
"program_id": "prg_<axiom program id>",
|
||||
"config_id": "cfg_<axiom config id>"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user