Compare commits

..

2 Commits

Author SHA1 Message Date
Ahmed Castro
c799808df4 docs: improve Galileo gas parameter derivation tooling (#1777) 2026-01-06 10:44:19 +08:00
Roy Lou
8fc6d65f3a feat: add Galileo gas parameter derivation tooling
Add scripts and environment setup for deriving Scroll gas fee parameters
using data collected from L1 batches. Includes data collection, analysis,
and batch visualization utilities.
2025-12-30 11:52:04 +08:00
46 changed files with 4006 additions and 2699 deletions

View File

@@ -360,49 +360,6 @@ jobs:
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
coordinator-proxy:
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: coordinator-proxy
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: coordinator-proxy
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/coordinator-proxy.Dockerfile
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
coordinator-cron:
runs-on:
group: scroll-reth-runner-group

View File

@@ -1,26 +0,0 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
# Build coordinator proxy
FROM base as builder
COPY . .
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_proxy && mv ./build/bin/coordinator_proxy /bin/coordinator_proxy
# Pull coordinator proxy into a second stage deploy ubuntu container
FROM ubuntu:20.04
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
RUN apt update && apt install vim netcat-openbsd net-tools curl jq -y
COPY --from=builder /bin/coordinator_proxy /bin/
RUN /bin/coordinator_proxy --version
WORKDIR /app
ENTRYPOINT ["/bin/coordinator_proxy"]

View File

@@ -1,8 +0,0 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -12,7 +12,6 @@ require (
github.com/gin-gonic/gin v1.9.1
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.20
github.com/mitchellh/mapstructure v1.5.0
github.com/modern-go/reflect2 v1.0.2
github.com/orcaman/concurrent-map v1.0.0
github.com/prometheus/client_golang v1.19.0
@@ -148,6 +147,7 @@ require (
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect

View File

@@ -4,7 +4,6 @@ import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/mitchellh/mapstructure"
)
// Response the response schema
@@ -14,19 +13,6 @@ type Response struct {
Data interface{} `json:"data"`
}
func (resp *Response) DecodeData(out interface{}) error {
// Decode generically unmarshaled JSON (map[string]any, []any) into a typed struct
// honoring `json` tags and allowing weak type conversions.
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
TagName: "json",
Result: out,
})
if err != nil {
return err
}
return dec.Decode(resp.Data)
}
// RenderJSON renders response with json
func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) {
var errMsg string

View File

@@ -34,10 +34,6 @@ coordinator_cron:
coordinator_tool:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool
coordinator_proxy:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_proxy ./cmd/proxy
localsetup: coordinator_api ## Local setup: build coordinator_api, copy config, and setup releases
mkdir -p build/bin/conf
@echo "Copying configuration files..."

View File

@@ -1,122 +0,0 @@
package app
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"os/signal"
"time"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/observability"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/proxy"
"scroll-tech/coordinator/internal/route"
)
var app *cli.App
func init() {
// Set up coordinator app info.
app = cli.NewApp()
app.Action = action
app.Name = "coordinator proxy"
app.Usage = "Proxy for multiple Scroll L2 Coordinators"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Register `coordinator-test` app for integration-test.
utils.RegisterSimulation(app, utils.CoordinatorAPIApp)
}
func action(ctx *cli.Context) error {
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewProxyConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
var db *gorm.DB
if dbCfg := cfg.ProxyManager.DB; dbCfg != nil {
log.Info("Apply persistent storage", "via", cfg.ProxyManager.DB.DSN)
db, err = database.InitDB(cfg.ProxyManager.DB)
if err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
if err = database.CloseDB(db); err != nil {
log.Error("can not close db connection", "error", err)
}
}()
observability.Server(ctx, db)
}
registry := prometheus.DefaultRegisterer
apiSrv := server(ctx, cfg, db, registry)
log.Info(
"Start coordinator api successfully.",
"version", version.Version,
)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
log.Info("start shutdown coordinator proxy server ...")
closeCtx, cancelExit := context.WithTimeout(context.Background(), 5*time.Second)
defer cancelExit()
if err = apiSrv.Shutdown(closeCtx); err != nil {
log.Warn("shutdown coordinator proxy server failure", "error", err)
return nil
}
<-closeCtx.Done()
log.Info("coordinator proxy server exiting success")
return nil
}
func server(ctx *cli.Context, cfg *config.ProxyConfig, db *gorm.DB, reg prometheus.Registerer) *http.Server {
router := gin.New()
proxy.InitController(cfg, db, reg)
route.ProxyRoute(router, cfg, reg)
port := ctx.String(httpPortFlag.Name)
srv := &http.Server{
Addr: fmt.Sprintf(":%s", port),
Handler: router,
ReadHeaderTimeout: time.Minute,
}
go func() {
if runServerErr := srv.ListenAndServe(); runServerErr != nil && !errors.Is(runServerErr, http.ErrServerClosed) {
log.Crit("run coordinator proxy http server failure", "error", runServerErr)
}
}()
return srv
}
// Run coordinator.
func Run() {
// RunApp the coordinator.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,30 +0,0 @@
package app
import "github.com/urfave/cli/v2"
var (
apiFlags = []cli.Flag{
// http flags
&httpEnabledFlag,
&httpListenAddrFlag,
&httpPortFlag,
}
// httpEnabledFlag enable rpc server.
httpEnabledFlag = cli.BoolFlag{
Name: "http",
Usage: "Enable the HTTP-RPC server",
Value: false,
}
// httpListenAddrFlag set the http address.
httpListenAddrFlag = cli.StringFlag{
Name: "http.addr",
Usage: "HTTP-RPC server listening interface",
Value: "localhost",
}
// httpPortFlag set http.port.
httpPortFlag = cli.IntFlag{
Name: "http.port",
Usage: "HTTP-RPC server listening port",
Value: 8590,
}
)

View File

@@ -1,7 +0,0 @@
package main
import "scroll-tech/coordinator/cmd/proxy/app"
func main() {
app.Run()
}

View File

@@ -1,31 +0,0 @@
{
"proxy_manager": {
"proxy_cli": {
"proxy_name": "proxy_name",
"secret": "client private key"
},
"auth": {
"secret": "proxy secret key",
"challenge_expire_duration_sec": 3600,
"login_expire_duration_sec": 3600
},
"verifier": {
"min_prover_version": "v4.4.45",
"verifiers": []
},
"db": {
"driver_name": "postgres",
"dsn": "postgres://localhost/scroll?sslmode=disable",
"maxOpenNum": 200,
"maxIdleNum": 20
}
},
"coordinators": {
"sepolia": {
"base_url": "http://localhost:8555",
"retry_count": 10,
"retry_wait_time_sec": 10,
"connection_timeout_sec": 30
}
}
}

View File

@@ -1,74 +0,0 @@
package config
import (
"encoding/json"
"os"
"path/filepath"
"scroll-tech/common/database"
"scroll-tech/common/utils"
)
// Proxy loads proxy configuration items.
type ProxyManager struct {
// Zk verifier config help to confine the connected prover.
Verifier *VerifierConfig `json:"verifier"`
Client *ProxyClient `json:"proxy_cli"`
Auth *Auth `json:"auth"`
DB *database.Config `json:"db,omitempty"`
}
func (m *ProxyManager) Normalize() {
if m.Client.Secret == "" {
m.Client.Secret = m.Auth.Secret
}
if m.Client.ProxyVersion == "" {
m.Client.ProxyVersion = m.Verifier.MinProverVersion
}
}
// Proxy client configuration for connect to upstream as a client
type ProxyClient struct {
ProxyName string `json:"proxy_name"`
ProxyVersion string `json:"proxy_version,omitempty"`
Secret string `json:"secret,omitempty"`
}
// Coordinator configuration
type UpStream struct {
BaseUrl string `json:"base_url"`
RetryCount uint `json:"retry_count"`
RetryWaitTime uint `json:"retry_wait_time_sec"`
ConnectionTimeoutSec uint `json:"connection_timeout_sec"`
CompatibileMode bool `json:"compatible_mode,omitempty"`
}
// Config load configuration items.
type ProxyConfig struct {
ProxyManager *ProxyManager `json:"proxy_manager"`
ProxyName string `json:"proxy_name"`
Coordinators map[string]*UpStream `json:"coordinators"`
}
// NewConfig returns a new instance of Config.
func NewProxyConfig(file string) (*ProxyConfig, error) {
buf, err := os.ReadFile(filepath.Clean(file))
if err != nil {
return nil, err
}
cfg := &ProxyConfig{}
err = json.Unmarshal(buf, cfg)
if err != nil {
return nil, err
}
// Override config with environment variables
err = utils.OverrideConfigWithEnv(cfg, "SCROLL_COORDINATOR_PROXY")
if err != nil {
return nil, err
}
return cfg, nil
}

View File

@@ -19,56 +19,28 @@ type AuthController struct {
loginLogic *auth.LoginLogic
}
func NewAuthControllerWithLogic(loginLogic *auth.LoginLogic) *AuthController {
return &AuthController{
loginLogic: loginLogic,
}
}
// NewAuthController returns an LoginController instance
func NewAuthController(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *AuthController {
return &AuthController{
loginLogic: auth.NewLoginLogic(db, cfg.ProverManager.Verifier, vf),
loginLogic: auth.NewLoginLogic(db, cfg, vf),
}
}
// Login the api controller for login, used as the Authenticator in JWT
// It can work in two mode: full process for normal login, or if login request
// is posted from proxy, run a simpler process to login a client
// Login the api controller for login
func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
// check if the login is post by proxy
var viaProxy bool
if proverType, proverTypeExist := c.Get(types.ProverProviderTypeKey); proverTypeExist {
proverType := uint8(proverType.(float64))
viaProxy = proverType == types.ProverProviderTypeProxy
}
var login types.LoginParameter
if err := c.ShouldBind(&login); err != nil {
return "", fmt.Errorf("missing the public_key, err:%w", err)
}
// if not, process with normal login
if !viaProxy {
// check login parameter's token is equal to bearer token, the Authorization must be existed
// if not exist, the jwt token will intercept it
brearToken := c.GetHeader("Authorization")
if brearToken != "Bearer "+login.Message.Challenge {
return "", errors.New("check challenge failure for the not equal challenge string")
}
if err := auth.VerifyMsg(&login); err != nil {
return "", err
}
// check the challenge is used, if used, return failure
if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil {
return "", fmt.Errorf("login insert challenge string failure:%w", err)
}
// check login parameter's token is equal to bearer token, the Authorization must be existed
// if not exist, the jwt token will intercept it
brearToken := c.GetHeader("Authorization")
if brearToken != "Bearer "+login.Message.Challenge {
return "", errors.New("check challenge failure for the not equal challenge string")
}
if err := a.loginLogic.CompatiblityCheck(&login); err != nil {
if err := a.loginLogic.Check(&login); err != nil {
return "", fmt.Errorf("check the login parameter failure: %w", err)
}
@@ -77,6 +49,11 @@ func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
return "", fmt.Errorf("prover hard fork name failure:%w", err)
}
// check the challenge is used, if used, return failure
if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil {
return "", fmt.Errorf("login insert challenge string failure:%w", err)
}
returnData := types.LoginParameterWithHardForkName{
HardForkName: hardForkNames,
LoginParameter: login,
@@ -108,6 +85,10 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
c.Set(types.ProverName, proverName)
}
if publicKey, ok := claims[types.PublicKey]; ok {
c.Set(types.PublicKey, publicKey)
}
if proverVersion, ok := claims[types.ProverVersion]; ok {
c.Set(types.ProverVersion, proverVersion)
}
@@ -120,9 +101,5 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
c.Set(types.ProverProviderTypeKey, providerType)
}
if publicKey, ok := claims[types.PublicKey]; ok {
return publicKey
}
return nil
}

View File

@@ -1,150 +0,0 @@
package proxy
import (
"context"
"fmt"
"sync"
"time"
jwt "github.com/appleboy/gin-jwt/v2"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/logic/auth"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/types"
)
// AuthController is login API
type AuthController struct {
apiLogin *api.AuthController
clients Clients
proverMgr *ProverManager
}
const upstreamConnTimeout = time.Second * 5
const LoginParamCache = "login_param"
const ProverTypesKey = "prover_types"
const SignatureKey = "prover_signature"
// NewAuthController returns an LoginController instance
func NewAuthController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager) *AuthController {
// use a dummy Verifier to create login logic (we do not use any information in verifier)
dummyVf := verifier.Verifier{
OpenVMVkMap: make(map[string]struct{}),
}
loginLogic := auth.NewLoginLogicWithSimpleDeduplicator(cfg.ProxyManager.Verifier, &dummyVf)
authController := &AuthController{
apiLogin: api.NewAuthControllerWithLogic(loginLogic),
clients: clients,
proverMgr: proverMgr,
}
return authController
}
// Login extended the Login hander in api controller
func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
loginRes, err := a.apiLogin.Login(c)
if err != nil {
return nil, err
}
loginParam := loginRes.(types.LoginParameterWithHardForkName)
if loginParam.LoginParameter.Message.ProverProviderType == types.ProverProviderTypeProxy {
return nil, fmt.Errorf("proxy do not support recursive login")
}
session := a.proverMgr.GetOrCreate(loginParam.PublicKey)
log.Debug("start handling login", "cli", loginParam.Message.ProverName)
loginCtx, cf := context.WithTimeout(context.Background(), upstreamConnTimeout)
var wg sync.WaitGroup
for _, cli := range a.clients {
wg.Add(1)
go func(cli Client) {
defer wg.Done()
if err := session.ProxyLogin(loginCtx, cli, &loginParam.LoginParameter); err != nil {
log.Error("proxy login failed during token cache update",
"userKey", loginParam.PublicKey,
"upstream", cli.Name(),
"error", err)
}
}(cli)
}
go func(cliName string) {
wg.Wait()
cf()
log.Debug("first login attempt has completed", "cli", cliName)
}(loginParam.Message.ProverName)
return loginParam.LoginParameter, nil
}
// PayloadFunc returns jwt.MapClaims with {public key, prover name}.
func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
v, ok := data.(types.LoginParameter)
if !ok {
log.Error("PayloadFunc received unexpected type", "type", fmt.Sprintf("%T", data))
return jwt.MapClaims{}
}
return jwt.MapClaims{
types.PublicKey: v.PublicKey,
types.ProverName: v.Message.ProverName,
types.ProverVersion: v.Message.ProverVersion,
types.ProverProviderTypeKey: v.Message.ProverProviderType,
SignatureKey: v.Signature,
ProverTypesKey: v.Message.ProverTypes,
}
}
// IdentityHandler replies to client for /login
func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
claims := jwt.ExtractClaims(c)
loginParam := &types.LoginParameter{}
if proverName, ok := claims[types.ProverName]; ok {
loginParam.Message.ProverName, _ = proverName.(string)
}
if proverVersion, ok := claims[types.ProverVersion]; ok {
loginParam.Message.ProverVersion, _ = proverVersion.(string)
}
if providerType, ok := claims[types.ProverProviderTypeKey]; ok {
num, _ := providerType.(float64)
loginParam.Message.ProverProviderType = types.ProverProviderType(num)
}
if signature, ok := claims[SignatureKey]; ok {
loginParam.Signature, _ = signature.(string)
}
if proverTypes, ok := claims[ProverTypesKey]; ok {
arr, _ := proverTypes.([]any)
for _, elm := range arr {
num, _ := elm.(float64)
loginParam.Message.ProverTypes = append(loginParam.Message.ProverTypes, types.ProverType(num))
}
}
if publicKey, ok := claims[types.PublicKey]; ok {
loginParam.PublicKey, _ = publicKey.(string)
}
if loginParam.PublicKey != "" {
c.Set(LoginParamCache, loginParam)
c.Set(types.ProverName, loginParam.Message.ProverName)
// publickey will also be set since we have specified public_key as identical key
return loginParam.PublicKey
}
return nil
}

View File

@@ -1,246 +0,0 @@
//nolint:errcheck,bodyclose // body is closed in the following handleHttpResp call
package proxy
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
ctypes "scroll-tech/common/types"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/types"
)
type ProxyCli interface {
Login(ctx context.Context, genLogin func(string) (*types.LoginParameter, error)) (*ctypes.Response, error)
ProxyLogin(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error)
Token() string
Reset()
}
type ProverCli interface {
GetTask(ctx context.Context, param *types.GetTaskParameter) (*ctypes.Response, error)
SubmitProof(ctx context.Context, param *types.SubmitProofParameter) (*ctypes.Response, error)
}
// Client wraps an http client with a preset host for coordinator API calls
type upClient struct {
httpClient *http.Client
baseURL string
loginToken string
compatibileMode bool
resetFromMgr func()
}
// NewClient creates a new Client with the specified host
func newUpClient(cfg *config.UpStream) *upClient {
return &upClient{
httpClient: &http.Client{
Timeout: time.Duration(cfg.ConnectionTimeoutSec) * time.Second,
},
baseURL: cfg.BaseUrl,
compatibileMode: cfg.CompatibileMode,
}
}
func (c *upClient) Reset() {
if c.resetFromMgr != nil {
c.resetFromMgr()
}
}
func (c *upClient) Token() string {
return c.loginToken
}
// need a parsable schema definition
type loginSchema struct {
Time string `json:"time"`
Token string `json:"token"`
}
// Login performs the complete login process: get challenge then login
func (c *upClient) Login(ctx context.Context, genLogin func(string) (*types.LoginParameter, error)) (*ctypes.Response, error) {
// Step 1: Get challenge
url := fmt.Sprintf("%s/coordinator/v1/challenge", c.baseURL)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, fmt.Errorf("failed to create challenge request: %w", err)
}
challengeResp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to get challenge: %w", err)
}
parsedResp, err := handleHttpResp(challengeResp)
if err != nil {
return nil, err
} else if parsedResp.ErrCode != 0 {
return nil, fmt.Errorf("challenge failed: %d (%s)", parsedResp.ErrCode, parsedResp.ErrMsg)
}
// Ste p2: Parse challenge response
var challengeSchema loginSchema
if err := parsedResp.DecodeData(&challengeSchema); err != nil {
return nil, fmt.Errorf("failed to parse challenge response: %w", err)
}
// Step 3: Use the token from challenge as Bearer token for login
url = fmt.Sprintf("%s/coordinator/v1/login", c.baseURL)
param, err := genLogin(challengeSchema.Token)
if err != nil {
return nil, fmt.Errorf("failed to setup login parameter: %w", err)
}
jsonData, err := json.Marshal(param)
if err != nil {
return nil, fmt.Errorf("failed to marshal login parameter: %w", err)
}
req, err = http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create login request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+challengeSchema.Token)
loginResp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to perform login request: %w", err)
}
return handleHttpResp(loginResp)
}
func handleHttpResp(resp *http.Response) (*ctypes.Response, error) {
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusUnauthorized {
defer resp.Body.Close()
var respWithData ctypes.Response
// Note: Body is consumed after decoding, caller should not read it again
if err := json.NewDecoder(resp.Body).Decode(&respWithData); err == nil {
return &respWithData, nil
} else {
return nil, fmt.Errorf("login parsing expected response failed: %v", err)
}
}
return nil, fmt.Errorf("login request failed with status: %d", resp.StatusCode)
}
func (c *upClient) proxyLoginCompatibleMode(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error) {
mimePrivK, err := buildPrivateKey([]byte(param.PublicKey))
if err != nil {
return nil, err
}
mimePkHex := common.Bytes2Hex(crypto.CompressPubkey(&mimePrivK.PublicKey))
genLoginParam := func(challenge string) (*types.LoginParameter, error) {
// Create login parameter with proxy settings
loginParam := &types.LoginParameter{
Message: param.Message,
PublicKey: mimePkHex,
}
loginParam.Message.Challenge = challenge
// Sign the message with the private key
if err := loginParam.SignWithKey(mimePrivK); err != nil {
return nil, fmt.Errorf("failed to sign login parameter: %w", err)
}
return loginParam, nil
}
return c.Login(ctx, genLoginParam)
}
// ProxyLogin makes a POST request to /v1/proxy_login with LoginParameter
func (c *upClient) ProxyLogin(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error) {
if c.compatibileMode {
return c.proxyLoginCompatibleMode(ctx, param)
}
url := fmt.Sprintf("%s/coordinator/v1/proxy_login", c.baseURL)
jsonData, err := json.Marshal(param)
if err != nil {
return nil, fmt.Errorf("failed to marshal proxy login parameter: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create proxy login request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+c.loginToken)
proxyLoginResp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to perform proxy login request: %w", err)
}
return handleHttpResp(proxyLoginResp)
}
// GetTask makes a POST request to /v1/get_task with GetTaskParameter
func (c *upClient) GetTask(ctx context.Context, param *types.GetTaskParameter) (*ctypes.Response, error) {
url := fmt.Sprintf("%s/coordinator/v1/get_task", c.baseURL)
jsonData, err := json.Marshal(param)
if err != nil {
return nil, fmt.Errorf("failed to marshal get task parameter: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create get task request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
if c.loginToken != "" {
req.Header.Set("Authorization", "Bearer "+c.loginToken)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
return handleHttpResp(resp)
}
// SubmitProof makes a POST request to /v1/submit_proof with SubmitProofParameter
func (c *upClient) SubmitProof(ctx context.Context, param *types.SubmitProofParameter) (*ctypes.Response, error) {
url := fmt.Sprintf("%s/coordinator/v1/submit_proof", c.baseURL)
jsonData, err := json.Marshal(param)
if err != nil {
return nil, fmt.Errorf("failed to marshal submit proof parameter: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create submit proof request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
if c.loginToken != "" {
req.Header.Set("Authorization", "Bearer "+c.loginToken)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
return handleHttpResp(resp)
}

View File

@@ -1,220 +0,0 @@
package proxy
import (
"context"
"crypto/ecdsa"
"fmt"
"sync"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/types"
)
type Client interface {
// a client to access upstream coordinator with specified identity
// so prover can contact with coordinator as itself
Client(string) ProverCli
// the client to access upstream as proxy itself
ClientAsProxy(context.Context) ProxyCli
Name() string
}
type ClientManager struct {
name string
cliCfg *config.ProxyClient
cfg *config.UpStream
privKey *ecdsa.PrivateKey
cachedCli struct {
sync.RWMutex
cli *upClient
completionCtx context.Context
}
}
// transformToValidPrivateKey safely transforms arbitrary bytes into valid private key bytes
func buildPrivateKey(inputBytes []byte) (*ecdsa.PrivateKey, error) {
// Try appending bytes from 0x0 to 0x20 until we get a valid private key
for appendByte := byte(0x0); appendByte <= 0x20; appendByte++ {
// Append the byte to input
extendedBytes := append(inputBytes, appendByte)
// Calculate 256-bit hash
hash := crypto.Keccak256(extendedBytes)
// Try to create private key from hash
if k, err := crypto.ToECDSA(hash); err == nil {
return k, nil
}
}
return nil, fmt.Errorf("failed to generate valid private key from input bytes")
}
func NewClientManager(name string, cliCfg *config.ProxyClient, cfg *config.UpStream) (*ClientManager, error) {
log.Info("init client", "name", name, "upcfg", cfg.BaseUrl, "compatible mode", cfg.CompatibileMode)
privKey, err := buildPrivateKey([]byte(cliCfg.Secret))
if err != nil {
return nil, err
}
return &ClientManager{
name: name,
privKey: privKey,
cfg: cfg,
cliCfg: cliCfg,
}, nil
}
type ctxKeyType string
const loginCliKey ctxKeyType = "cli"
func (cliMgr *ClientManager) doLogin(ctx context.Context, loginCli *upClient) {
if cliMgr.cfg.CompatibileMode {
loginCli.loginToken = "dummy"
log.Info("Skip login process for compatible mode")
return
}
// Calculate wait time between 2 seconds and cfg.RetryWaitTime
minWait := 2 * time.Second
waitDuration := time.Duration(cliMgr.cfg.RetryWaitTime) * time.Second
if waitDuration < minWait {
waitDuration = minWait
}
for {
log.Info("proxy attempting login to upstream coordinator", "name", cliMgr.name)
loginResp, err := loginCli.Login(ctx, cliMgr.genLoginParam)
if err == nil && loginResp.ErrCode == 0 {
var loginResult loginSchema
err = loginResp.DecodeData(&loginResult)
if err != nil {
log.Error("login parsing data fail", "error", err)
} else {
loginCli.loginToken = loginResult.Token
log.Info("login to upstream coordinator successful", "name", cliMgr.name, "time", loginResult.Time)
// TODO: we need to parse time if we start making use of it
return
}
} else if err != nil {
log.Error("login process fail", "error", err)
} else {
log.Error("login get fail resp", "code", loginResp.ErrCode, "msg", loginResp.ErrMsg)
}
log.Info("login to upstream coordinator failed, retrying", "name", cliMgr.name, "error", err, "waitDuration", waitDuration)
timer := time.NewTimer(waitDuration)
select {
case <-ctx.Done():
timer.Stop()
return
case <-timer.C:
// Continue to next retry
}
}
}
func (cliMgr *ClientManager) Name() string {
return cliMgr.name
}
func (cliMgr *ClientManager) Client(token string) ProverCli {
loginCli := newUpClient(cliMgr.cfg)
loginCli.loginToken = token
return loginCli
}
func (cliMgr *ClientManager) ClientAsProxy(ctx context.Context) ProxyCli {
cliMgr.cachedCli.RLock()
if cliMgr.cachedCli.cli != nil {
defer cliMgr.cachedCli.RUnlock()
return cliMgr.cachedCli.cli
}
cliMgr.cachedCli.RUnlock()
cliMgr.cachedCli.Lock()
if cliMgr.cachedCli.cli != nil {
defer cliMgr.cachedCli.Unlock()
return cliMgr.cachedCli.cli
}
var completionCtx context.Context
// Check if completion context is set
if cliMgr.cachedCli.completionCtx != nil {
completionCtx = cliMgr.cachedCli.completionCtx
} else {
// Set new completion context and launch login goroutine
ctx, completionDone := context.WithCancel(context.TODO())
loginCli := newUpClient(cliMgr.cfg)
loginCli.resetFromMgr = func() {
cliMgr.cachedCli.Lock()
if cliMgr.cachedCli.cli == loginCli {
log.Info("cached client cleared", "name", cliMgr.name)
cliMgr.cachedCli.cli = nil
}
cliMgr.cachedCli.Unlock()
}
completionCtx = context.WithValue(ctx, loginCliKey, loginCli)
cliMgr.cachedCli.completionCtx = completionCtx
// Launch keep-login goroutine
go func() {
defer completionDone()
cliMgr.doLogin(context.Background(), loginCli)
cliMgr.cachedCli.Lock()
cliMgr.cachedCli.cli = loginCli
cliMgr.cachedCli.completionCtx = nil
cliMgr.cachedCli.Unlock()
}()
}
cliMgr.cachedCli.Unlock()
// Wait for completion or request cancellation
select {
case <-ctx.Done():
return nil
case <-completionCtx.Done():
cli := completionCtx.Value(loginCliKey).(*upClient)
return cli
}
}
func (cliMgr *ClientManager) genLoginParam(challenge string) (*types.LoginParameter, error) {
// Generate public key string
publicKeyHex := common.Bytes2Hex(crypto.CompressPubkey(&cliMgr.privKey.PublicKey))
// Create login parameter with proxy settings
loginParam := &types.LoginParameter{
Message: types.Message{
Challenge: challenge,
ProverName: cliMgr.cliCfg.ProxyName,
ProverVersion: version.Version,
ProverProviderType: types.ProverProviderTypeProxy,
ProverTypes: []types.ProverType{}, // Default empty
VKs: []string{}, // Default empty
},
PublicKey: publicKeyHex,
}
// Sign the message with the private key
if err := loginParam.SignWithKey(cliMgr.privKey); err != nil {
return nil, fmt.Errorf("failed to sign login parameter: %w", err)
}
return loginParam, nil
}

View File

@@ -1,44 +0,0 @@
package proxy
import (
"github.com/prometheus/client_golang/prometheus"
"gorm.io/gorm"
"scroll-tech/coordinator/internal/config"
)
var (
// GetTask the prover task controller
GetTask *GetTaskController
// SubmitProof the submit proof controller
SubmitProof *SubmitProofController
// Auth the auth controller
Auth *AuthController
)
// Clients manager a series of thread-safe clients for requesting upstream
// coordinators
type Clients map[string]Client
// InitController inits Controller with database
func InitController(cfg *config.ProxyConfig, db *gorm.DB, reg prometheus.Registerer) {
// normalize cfg
cfg.ProxyManager.Normalize()
clients := make(map[string]Client)
for nm, upCfg := range cfg.Coordinators {
cli, err := NewClientManager(nm, cfg.ProxyManager.Client, upCfg)
if err != nil {
panic("create new client fail")
}
clients[cli.Name()] = cli
}
proverManager := NewProverManagerWithPersistent(100, db)
priorityManager := NewPriorityUpstreamManagerPersistent(db)
Auth = NewAuthController(cfg, clients, proverManager)
GetTask = NewGetTaskController(cfg, clients, proverManager, priorityManager, reg)
SubmitProof = NewSubmitProofController(cfg, clients, proverManager, priorityManager, reg)
}

View File

@@ -1,229 +0,0 @@
package proxy
import (
"fmt"
"math/rand"
"sync"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/coordinator/internal/config"
coordinatorType "scroll-tech/coordinator/internal/types"
)
func getSessionData(ctx *gin.Context) (string, string) {
publicKeyData, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
publicKey, castOk := publicKeyData.(string)
if !publicKeyExist || !castOk {
nerr := fmt.Errorf("no public key binding: %v", publicKeyData)
log.Warn("get_task parameter fail", "error", nerr)
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
return "", ""
}
publicNameData, publicNameExist := ctx.Get(coordinatorType.ProverName)
publicName, castOk := publicNameData.(string)
if !publicNameExist || !castOk {
log.Error("no public name binding for unknown reason, but we still forward with name = 'unknown'", "data", publicNameData)
publicName = "unknown"
}
return publicKey, publicName
}
// PriorityUpstreamManager manages priority upstream mappings with thread safety
type PriorityUpstreamManager struct {
sync.RWMutex
*proverPriorityPersist
data map[string]string
}
// NewPriorityUpstreamManager creates a new PriorityUpstreamManager
func NewPriorityUpstreamManager() *PriorityUpstreamManager {
return &PriorityUpstreamManager{
data: make(map[string]string),
}
}
// NewPriorityUpstreamManager creates a new PriorityUpstreamManager
func NewPriorityUpstreamManagerPersistent(db *gorm.DB) *PriorityUpstreamManager {
return &PriorityUpstreamManager{
data: make(map[string]string),
proverPriorityPersist: NewProverPriorityPersist(db),
}
}
// Get retrieves the priority upstream for a given key
func (p *PriorityUpstreamManager) Get(key string) (string, bool) {
p.RLock()
value, exists := p.data[key]
p.RUnlock()
if !exists {
if v, err := p.proverPriorityPersist.Get(key); err != nil {
log.Error("persistent priority record read failure", "error", err, "key", key)
} else if v != "" {
log.Debug("restore record from persistent layer", "key", key, "value", v)
return v, true
}
}
return value, exists
}
// Set sets the priority upstream for a given key
func (p *PriorityUpstreamManager) Set(key, value string) {
defer func() {
if err := p.proverPriorityPersist.Update(key, value); err != nil {
log.Error("update priority record failure", "error", err, "key", key, "value", value)
}
}()
p.Lock()
defer p.Unlock()
p.data[key] = value
}
// Delete removes the priority upstream for a given key
func (p *PriorityUpstreamManager) Delete(key string) {
defer func() {
if err := p.proverPriorityPersist.Del(key); err != nil {
log.Error("delete priority record failure", "error", err, "key", key)
}
}()
p.Lock()
defer p.Unlock()
delete(p.data, key)
}
// GetTaskController the get prover task api controller
type GetTaskController struct {
proverMgr *ProverManager
clients Clients
priorityUpstream *PriorityUpstreamManager
//workingRnd *rand.Rand
//getTaskAccessCounter *prometheus.CounterVec
}
// NewGetTaskController create a get prover task controller
func NewGetTaskController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager, priorityMgr *PriorityUpstreamManager, reg prometheus.Registerer) *GetTaskController {
// TODO: implement proxy get task controller initialization
return &GetTaskController{
priorityUpstream: priorityMgr,
proverMgr: proverMgr,
clients: clients,
}
}
// func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error {
// // TODO: implement proxy get task access counter
// return nil
// }
// GetTasks get assigned chunk/batch task
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
var getTaskParameter coordinatorType.GetTaskParameter
if err := ctx.ShouldBind(&getTaskParameter); err != nil {
nerr := fmt.Errorf("prover task parameter invalid, err:%w", err)
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
return
}
publicKey, proverName := getSessionData(ctx)
if publicKey == "" {
return
}
session := ptc.proverMgr.Get(publicKey)
if session == nil {
nerr := fmt.Errorf("can not get session for prover %s", proverName)
types.RenderFailure(ctx, types.InternalServerError, nerr)
return
}
getTask := func(cli Client) (error, int) {
log.Debug("Start get task", "up", cli.Name(), "cli", proverName)
upStream := cli.Name()
resp, err := session.GetTask(ctx, &getTaskParameter, cli)
if err != nil {
log.Error("Upstream error for get task", "error", err, "up", upStream, "cli", proverName)
return err, types.ErrCoordinatorGetTaskFailure
} else if resp.ErrCode != types.ErrCoordinatorEmptyProofData {
if resp.ErrCode != 0 {
// simply dispatch the error from upstream to prover
log.Error("Upstream has error resp for get task", "code", resp.ErrCode, "msg", resp.ErrMsg, "up", upStream, "cli", proverName)
return fmt.Errorf("upstream failure %s:", resp.ErrMsg), resp.ErrCode
}
var task coordinatorType.GetTaskSchema
if err = resp.DecodeData(&task); err == nil {
task.TaskID = formUpstreamWithTaskName(upStream, task.TaskID)
ptc.priorityUpstream.Set(publicKey, upStream)
log.Debug("Upstream get task", "up", upStream, "cli", proverName, "taskID", task.TaskID, "taskType", task.TaskType)
types.RenderSuccess(ctx, &task)
return nil, 0
} else {
log.Error("Upstream has wrong data for get task", "error", err, "up", upStream, "cli", proverName)
return fmt.Errorf("decode task fail: %v", err), types.InternalServerError
}
}
return nil, resp.ErrCode
}
// if the priority upstream is set, we try this upstream first until get the task resp or no task resp
priorityUpstream, exist := ptc.priorityUpstream.Get(publicKey)
if exist {
cli := ptc.clients[priorityUpstream]
log.Debug("Try get task from priority stream", "up", priorityUpstream, "cli", proverName)
if cli != nil {
err, code := getTask(cli)
if err != nil {
types.RenderFailure(ctx, code, err)
return
} else if code == 0 {
// get task done and rendered, return
return
}
// only continue if get empty task (the task has been removed in upstream)
log.Debug("can not get priority task from upstream", "up", priorityUpstream, "cli", proverName)
} else {
log.Warn("A upstream is removed or lost for some reason while running", "up", priorityUpstream, "cli", proverName)
}
}
ptc.priorityUpstream.Delete(publicKey)
// Create a slice to hold the keys
keys := make([]string, 0, len(ptc.clients))
for k := range ptc.clients {
keys = append(keys, k)
}
// Shuffle the keys using a local RNG (avoid deprecated rand.Seed)
rand.Shuffle(len(keys), func(i, j int) {
keys[i], keys[j] = keys[j], keys[i]
})
// Iterate over the shuffled keys
for _, n := range keys {
if err, code := getTask(ptc.clients[n]); err == nil && code == 0 {
// get task done
return
}
}
log.Debug("get no task from upstream", "cli", proverName)
// if all get task failed, throw empty proof resp
types.RenderFailure(ctx, types.ErrCoordinatorEmptyProofData, fmt.Errorf("get empty prover task"))
}

View File

@@ -1,125 +0,0 @@
package proxy
import (
"time"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"scroll-tech/coordinator/internal/types"
)
type proverDataPersist struct {
db *gorm.DB
}
// NewProverDataPersist creates a persistence instance backed by a gorm DB.
func NewProverDataPersist(db *gorm.DB) *proverDataPersist {
return &proverDataPersist{db: db}
}
// gorm model mapping to table `prover_sessions`
type proverSessionRecord struct {
PublicKey string `gorm:"column:public_key;not null"`
Upstream string `gorm:"column:upstream;not null"`
UpToken string `gorm:"column:up_token;not null"`
Expired time.Time `gorm:"column:expired;not null"`
}
func (proverSessionRecord) TableName() string { return "prover_sessions" }
// priority_upstream model
type priorityUpstreamRecord struct {
PublicKey string `gorm:"column:public_key;not null"`
Upstream string `gorm:"column:upstream;not null"`
}
func (priorityUpstreamRecord) TableName() string { return "priority_upstream" }
// get retrieves ProverSession for a given user key, returns empty if still not exists
func (p *proverDataPersist) Get(userKey string) (*proverSession, error) {
if p == nil || p.db == nil {
return nil, nil
}
var rows []proverSessionRecord
if err := p.db.Where("public_key = ?", userKey).Find(&rows).Error; err != nil || len(rows) == 0 {
return nil, err
}
ret := &proverSession{
proverToken: make(map[string]loginToken),
}
for _, r := range rows {
ls := &types.LoginSchema{
Token: r.UpToken,
Time: r.Expired,
}
ret.proverToken[r.Upstream] = loginToken{LoginSchema: ls}
}
return ret, nil
}
func (p *proverDataPersist) Update(userKey, up string, login *types.LoginSchema) error {
if p == nil || p.db == nil || login == nil {
return nil
}
rec := proverSessionRecord{
PublicKey: userKey,
Upstream: up,
UpToken: login.Token,
Expired: login.Time,
}
return p.db.Clauses(
clause.OnConflict{
Columns: []clause.Column{{Name: "public_key"}, {Name: "upstream"}},
DoUpdates: clause.AssignmentColumns([]string{"up_token", "expired"}),
},
).Create(&rec).Error
}
type proverPriorityPersist struct {
db *gorm.DB
}
func NewProverPriorityPersist(db *gorm.DB) *proverPriorityPersist {
return &proverPriorityPersist{db: db}
}
func (p *proverPriorityPersist) Get(userKey string) (string, error) {
if p == nil || p.db == nil {
return "", nil
}
var rec priorityUpstreamRecord
if err := p.db.Where("public_key = ?", userKey).First(&rec).Error; err != nil {
if err != gorm.ErrRecordNotFound {
return "", err
} else {
return "", nil
}
}
return rec.Upstream, nil
}
func (p *proverPriorityPersist) Update(userKey, up string) error {
if p == nil || p.db == nil {
return nil
}
rec := priorityUpstreamRecord{PublicKey: userKey, Upstream: up}
return p.db.Clauses(
clause.OnConflict{
Columns: []clause.Column{{Name: "public_key"}},
DoUpdates: clause.Assignments(map[string]interface{}{"upstream": up}),
},
).Create(&rec).Error
}
func (p *proverPriorityPersist) Del(userKey string) error {
if p == nil || p.db == nil {
return nil
}
return p.db.Where("public_key = ?", userKey).Delete(&priorityUpstreamRecord{}).Error
}

View File

@@ -1,285 +0,0 @@
package proxy
import (
"context"
"fmt"
"math"
"sync"
"gorm.io/gorm"
"github.com/scroll-tech/go-ethereum/log"
ctypes "scroll-tech/common/types"
"scroll-tech/coordinator/internal/types"
)
type ProverManager struct {
sync.RWMutex
data map[string]*proverSession
willDeprecatedData map[string]*proverSession
sizeLimit int
persistent *proverDataPersist
}
func NewProverManager(size int) *ProverManager {
return &ProverManager{
data: make(map[string]*proverSession),
willDeprecatedData: make(map[string]*proverSession),
sizeLimit: size,
}
}
func NewProverManagerWithPersistent(size int, db *gorm.DB) *ProverManager {
return &ProverManager{
data: make(map[string]*proverSession),
willDeprecatedData: make(map[string]*proverSession),
sizeLimit: size,
persistent: NewProverDataPersist(db),
}
}
// get retrieves ProverSession for a given user key, returns empty if still not exists
func (m *ProverManager) Get(userKey string) (ret *proverSession) {
defer func() {
if ret == nil {
var err error
ret, err = m.persistent.Get(userKey)
if err != nil {
log.Error("Get persistent layer for prover tokens fail", "error", err)
} else if ret != nil {
log.Debug("restore record from persistent", "key", userKey, "token", ret.proverToken)
ret.persistent = m.persistent
}
}
if ret != nil {
m.Lock()
m.data[userKey] = ret
m.Unlock()
}
}()
m.RLock()
defer m.RUnlock()
if r, existed := m.data[userKey]; existed {
return r
} else {
return m.willDeprecatedData[userKey]
}
}
func (m *ProverManager) GetOrCreate(userKey string) *proverSession {
if ret := m.Get(userKey); ret != nil {
return ret
}
m.Lock()
defer m.Unlock()
ret := &proverSession{
proverToken: make(map[string]loginToken),
persistent: m.persistent,
}
if len(m.data) >= m.sizeLimit {
m.willDeprecatedData = m.data
m.data = make(map[string]*proverSession)
}
m.data[userKey] = ret
return ret
}
type loginToken struct {
*types.LoginSchema
phase uint
}
// Client wraps an http client with a preset host for coordinator API calls
type proverSession struct {
persistent *proverDataPersist
sync.RWMutex
proverToken map[string]loginToken
completionCtx context.Context
}
func (c *proverSession) maintainLogin(ctx context.Context, cliMgr Client, up string, param *types.LoginParameter, phase uint) (result loginToken, nerr error) {
c.Lock()
curPhase := c.proverToken[up].phase
if c.completionCtx != nil {
waitctx := c.completionCtx
c.Unlock()
select {
case <-waitctx.Done():
return c.maintainLogin(ctx, cliMgr, up, param, phase)
case <-ctx.Done():
nerr = fmt.Errorf("ctx fail")
return
}
}
if phase < curPhase {
// outdate login phase, give up
log.Debug("drop outdated proxy login attempt", "upstream", up, "cli", param.Message.ProverName, "phase", phase, "now", curPhase)
defer c.Unlock()
return c.proverToken[up], nil
}
// occupy the update slot
completeCtx, cf := context.WithCancel(ctx)
defer cf()
c.completionCtx = completeCtx
defer func() {
c.Lock()
c.completionCtx = nil
if result.LoginSchema != nil {
c.proverToken[up] = result
log.Info("maintain login status", "upstream", up, "cli", param.Message.ProverName, "phase", curPhase+1)
}
c.Unlock()
if nerr != nil {
log.Error("maintain login fail", "error", nerr, "upstream", up, "cli", param.Message.ProverName, "phase", curPhase)
}
}()
c.Unlock()
log.Debug("start proxy login process", "upstream", up, "cli", param.Message.ProverName)
cli := cliMgr.ClientAsProxy(ctx)
if cli == nil {
nerr = fmt.Errorf("get upstream cli fail")
return
}
resp, err := cli.ProxyLogin(ctx, param)
if err != nil {
nerr = fmt.Errorf("proxylogin fail: %v", err)
return
}
if resp.ErrCode == ctypes.ErrJWTTokenExpired {
log.Info("up stream has expired, renew upstream connection", "up", up)
cli.Reset()
cli = cliMgr.ClientAsProxy(ctx)
if cli == nil {
nerr = fmt.Errorf("get upstream cli fail (secondary try)")
return
}
// like SDK, we would try one more time if the upstream token is expired
resp, err = cli.ProxyLogin(ctx, param)
if err != nil {
nerr = fmt.Errorf("proxylogin fail: %v", err)
return
}
}
if resp.ErrCode != 0 {
nerr = fmt.Errorf("upstream fail: %d (%s)", resp.ErrCode, resp.ErrMsg)
return
}
var loginResult loginSchema
if err := resp.DecodeData(&loginResult); err != nil {
nerr = err
return
}
log.Debug("Proxy login done", "upstream", up, "cli", param.Message.ProverName)
result = loginToken{
LoginSchema: &types.LoginSchema{
Token: loginResult.Token,
},
phase: curPhase + 1,
}
return
}
// const expireTolerant = 10 * time.Minute
// ProxyLogin makes a POST request to /v1/proxy_login with LoginParameter
func (c *proverSession) ProxyLogin(ctx context.Context, cli Client, param *types.LoginParameter) error {
up := cli.Name()
c.RLock()
existedToken := c.proverToken[up]
c.RUnlock()
newtoken, err := c.maintainLogin(ctx, cli, up, param, math.MaxUint)
if newtoken.phase > existedToken.phase {
if err := c.persistent.Update(param.PublicKey, up, newtoken.LoginSchema); err != nil {
log.Error("Update persistent layer for prover tokens fail", "error", err)
}
}
return err
}
// GetTask makes a POST request to /v1/get_task with GetTaskParameter
func (c *proverSession) GetTask(ctx context.Context, param *types.GetTaskParameter, cliMgr Client) (*ctypes.Response, error) {
up := cliMgr.Name()
c.RLock()
log.Debug("call get task", "up", up, "tokens", c.proverToken)
token := c.proverToken[up]
c.RUnlock()
if token.LoginSchema != nil {
resp, err := cliMgr.Client(token.Token).GetTask(ctx, param)
if err != nil {
return nil, err
}
if resp.ErrCode != ctypes.ErrJWTTokenExpired {
return resp, nil
}
}
// like SDK, we would try one more time if the upstream token is expired
// get param from ctx
loginParam, ok := ctx.Value(LoginParamCache).(*types.LoginParameter)
if !ok {
return nil, fmt.Errorf("Unexpected error, no loginparam ctx value")
}
newToken, err := c.maintainLogin(ctx, cliMgr, up, loginParam, token.phase)
if err != nil {
return nil, fmt.Errorf("update prover token fail: %v", err)
}
return cliMgr.Client(newToken.Token).GetTask(ctx, param)
}
// SubmitProof makes a POST request to /v1/submit_proof with SubmitProofParameter
func (c *proverSession) SubmitProof(ctx context.Context, param *types.SubmitProofParameter, cliMgr Client) (*ctypes.Response, error) {
up := cliMgr.Name()
c.RLock()
token := c.proverToken[up]
c.RUnlock()
if token.LoginSchema != nil {
resp, err := cliMgr.Client(token.Token).SubmitProof(ctx, param)
if err != nil {
return nil, err
}
if resp.ErrCode != ctypes.ErrJWTTokenExpired {
return resp, nil
}
}
// like SDK, we would try one more time if the upstream token is expired
// get param from ctx
loginParam, ok := ctx.Value(LoginParamCache).(*types.LoginParameter)
if !ok {
return nil, fmt.Errorf("Unexpected error, no loginparam ctx value")
}
newToken, err := c.maintainLogin(ctx, cliMgr, up, loginParam, token.phase)
if err != nil {
return nil, fmt.Errorf("update prover token fail: %v", err)
}
return cliMgr.Client(newToken.Token).SubmitProof(ctx, param)
}

View File

@@ -1,107 +0,0 @@
package proxy
import (
"testing"
)
// TestProverManagerGetAndCreate validates basic creation and retrieval semantics.
func TestProverManagerGetAndCreate(t *testing.T) {
pm := NewProverManager(2)
if got := pm.Get("user1"); got != nil {
t.Fatalf("expected nil for non-existent key, got: %+v", got)
}
sess1 := pm.GetOrCreate("user1")
if sess1 == nil {
t.Fatalf("expected non-nil session from GetOrCreate")
}
// Should be stable on subsequent Get
if got := pm.Get("user1"); got != sess1 {
t.Fatalf("expected same session pointer on Get, got different instance: %p vs %p", got, sess1)
}
}
// TestProverManagerRolloverAndPromotion verifies rollover when sizeLimit is reached
// and that old entries are accessible and promoted back to active data map.
func TestProverManagerRolloverAndPromotion(t *testing.T) {
pm := NewProverManager(2)
s1 := pm.GetOrCreate("u1")
s2 := pm.GetOrCreate("u2")
if s1 == nil || s2 == nil {
t.Fatalf("expected sessions to be created for u1/u2")
}
// Precondition: data should contain 2 entries, no deprecated yet.
pm.RLock()
if len(pm.data) != 2 {
pm.RUnlock()
t.Fatalf("expected data len=2 before rollover, got %d", len(pm.data))
}
if len(pm.willDeprecatedData) != 0 {
pm.RUnlock()
t.Fatalf("expected willDeprecatedData len=0 before rollover, got %d", len(pm.willDeprecatedData))
}
pm.RUnlock()
// Trigger rollover by creating a third key.
s3 := pm.GetOrCreate("u3")
if s3 == nil {
t.Fatalf("expected session for u3 after rollover")
}
// After rollover: current data should only have u3, deprecated should hold u1 and u2.
pm.RLock()
if len(pm.data) != 1 {
pm.RUnlock()
t.Fatalf("expected data len=1 after rollover (only u3), got %d", len(pm.data))
}
if _, ok := pm.data["u3"]; !ok {
pm.RUnlock()
t.Fatalf("expected 'u3' to be in active data after rollover")
}
if len(pm.willDeprecatedData) != 2 {
pm.RUnlock()
t.Fatalf("expected willDeprecatedData len=2 after rollover, got %d", len(pm.willDeprecatedData))
}
pm.RUnlock()
// Accessing an old key should return the same pointer and promote it to active data map.
got1 := pm.Get("u1")
if got1 != s1 {
t.Fatalf("expected same pointer for u1 after promotion, got %p want %p", got1, s1)
}
// The promotion should add it to active data (without enforcing size limit on promotion).
pm.RLock()
if _, ok := pm.data["u1"]; !ok {
pm.RUnlock()
t.Fatalf("expected 'u1' to be present in active data after promotion")
}
if len(pm.data) != 2 {
// Now should contain u3 and u1
pm.RUnlock()
t.Fatalf("expected data len=2 after promotion of u1, got %d", len(pm.data))
}
pm.RUnlock()
// Access the other deprecated key and ensure behavior is consistent.
got2 := pm.Get("u2")
if got2 != s2 {
t.Fatalf("expected same pointer for u2 after promotion, got %p want %p", got2, s2)
}
pm.RLock()
if _, ok := pm.data["u2"]; !ok {
pm.RUnlock()
t.Fatalf("expected 'u2' to be present in active data after promotion")
}
// Note: promotion does not enforce sizeLimit, so data can grow beyond sizeLimit after promotions.
if len(pm.data) != 3 {
pm.RUnlock()
t.Fatalf("expected data len=3 after promoting both u1 and u2, got %d", len(pm.data))
}
pm.RUnlock()
}

View File

@@ -1,94 +0,0 @@
package proxy
import (
"fmt"
"strings"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types"
"scroll-tech/coordinator/internal/config"
coordinatorType "scroll-tech/coordinator/internal/types"
)
// SubmitProofController the submit proof api controller
type SubmitProofController struct {
proverMgr *ProverManager
clients Clients
priorityUpstream *PriorityUpstreamManager
}
// NewSubmitProofController create the submit proof api controller instance
func NewSubmitProofController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager, priorityMgr *PriorityUpstreamManager, reg prometheus.Registerer) *SubmitProofController {
return &SubmitProofController{
proverMgr: proverMgr,
clients: clients,
priorityUpstream: priorityMgr,
}
}
func upstreamFromTaskName(taskID string) (string, string) {
parts, rest, found := strings.Cut(taskID, ":")
if found {
return parts, rest
}
return "", parts
}
func formUpstreamWithTaskName(upstream string, taskID string) string {
return fmt.Sprintf("%s:%s", upstream, taskID)
}
// SubmitProof prover submit the proof to coordinator
func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
var submitParameter coordinatorType.SubmitProofParameter
if err := ctx.ShouldBind(&submitParameter); err != nil {
nerr := fmt.Errorf("prover submitProof parameter invalid, err:%w", err)
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
return
}
publicKey, proverName := getSessionData(ctx)
if publicKey == "" {
return
}
session := spc.proverMgr.Get(publicKey)
if session == nil {
nerr := fmt.Errorf("can not get session for prover %s", proverName)
types.RenderFailure(ctx, types.InternalServerError, nerr)
return
}
upstream, realTaskID := upstreamFromTaskName(submitParameter.TaskID)
cli, existed := spc.clients[upstream]
if !existed {
log.Warn("A upstream for submitting is removed or lost for some reason while running", "up", upstream)
nerr := fmt.Errorf("Invalid upstream name (%s) from taskID %s", upstream, submitParameter.TaskID)
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
return
}
log.Debug("Start submitting", "up", upstream, "cli", proverName, "id", realTaskID, "status", submitParameter.Status)
submitParameter.TaskID = realTaskID
resp, err := session.SubmitProof(ctx, &submitParameter, cli)
if err != nil {
log.Error("Upstream has error resp for submit", "error", err, "up", upstream, "cli", proverName, "taskID", realTaskID)
types.RenderFailure(ctx, types.ErrCoordinatorGetTaskFailure, err)
return
} else if resp.ErrCode != 0 {
log.Error("Upstream has error resp for get task", "code", resp.ErrCode, "msg", resp.ErrMsg, "up", upstream, "cli", proverName, "taskID", realTaskID)
// simply dispatch the error from upstream to prover
types.RenderFailure(ctx, resp.ErrCode, fmt.Errorf("%s", resp.ErrMsg))
return
} else {
log.Debug("Submit proof to upstream", "up", upstream, "cli", proverName, "taskID", realTaskID)
spc.priorityUpstream.Delete(publicKey)
types.RenderSuccess(ctx, resp.Data)
return
}
}

View File

@@ -1,7 +1,6 @@
package auth
import (
"context"
"errors"
"fmt"
"strings"
@@ -20,72 +19,45 @@ import (
// LoginLogic the auth logic
type LoginLogic struct {
cfg *config.VerifierConfig
deduplicator ChallengeDeduplicator
cfg *config.Config
challengeOrm *orm.Challenge
openVmVks map[string]struct{}
proverVersionHardForkMap map[string]string
}
type ChallengeDeduplicator interface {
InsertChallenge(ctx context.Context, challengeString string) error
}
type SimpleDeduplicator struct {
}
func (s *SimpleDeduplicator) InsertChallenge(ctx context.Context, challengeString string) error {
return nil
}
// NewLoginLogicWithSimpleDEduplicator new a LoginLogic, do not use db to deduplicate challenge
func NewLoginLogicWithSimpleDeduplicator(vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
return newLoginLogic(&SimpleDeduplicator{}, vcfg, vf)
}
// NewLoginLogic new a LoginLogic
func NewLoginLogic(db *gorm.DB, vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
return newLoginLogic(orm.NewChallenge(db), vcfg, vf)
}
func newLoginLogic(deduplicator ChallengeDeduplicator, vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
proverVersionHardForkMap := make(map[string]string)
for _, cfg := range vcfg.Verifiers {
for _, cfg := range cfg.ProverManager.Verifier.Verifiers {
proverVersionHardForkMap[cfg.ForkName] = cfg.MinProverVersion
}
return &LoginLogic{
cfg: vcfg,
cfg: cfg,
openVmVks: vf.OpenVMVkMap,
deduplicator: deduplicator,
challengeOrm: orm.NewChallenge(db),
proverVersionHardForkMap: proverVersionHardForkMap,
}
}
// Verify the completeness of login message
func VerifyMsg(login *types.LoginParameter) error {
// InsertChallengeString insert and check the challenge string is existed
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
return l.challengeOrm.InsertChallenge(ctx.Copy(), challenge)
}
func (l *LoginLogic) Check(login *types.LoginParameter) error {
verify, err := login.Verify()
if err != nil || !verify {
log.Error("auth message verify failure", "prover_name", login.Message.ProverName,
"prover_version", login.Message.ProverVersion, "message", login.Message)
return errors.New("auth message verify failure")
}
return nil
}
// InsertChallengeString insert and check the challenge string is existed
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
return l.deduplicator.InsertChallenge(ctx.Copy(), challenge)
}
// Check if the login client is compatible with the setting in coordinator
func (l *LoginLogic) CompatiblityCheck(login *types.LoginParameter) error {
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.MinProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.MinProverVersion, login.Message.ProverVersion)
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.MinProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.MinProverVersion, login.Message.ProverVersion)
}
vks := make(map[string]struct{})
@@ -93,32 +65,27 @@ func (l *LoginLogic) CompatiblityCheck(login *types.LoginParameter) error {
vks[vk] = struct{}{}
}
// new coordinator / proxy do not check vks while login, code only for backward compatibility
if len(vks) != 0 {
for _, vk := range login.Message.VKs {
if _, ok := vks[vk]; !ok {
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
"prover_version", login.Message.ProverVersion, "message", login.Message)
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
version.Version, login.Message.ProverVersion)
}
// if the prover reports a same prover version
return errors.New("incompatible vk. please check your params files or config files")
for _, vk := range login.Message.VKs {
if _, ok := vks[vk]; !ok {
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
"prover_version", login.Message.ProverVersion, "message", login.Message)
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
version.Version, login.Message.ProverVersion)
}
// if the prover reports a same prover version
return errors.New("incompatible vk. please check your params files or config files")
}
}
switch login.Message.ProverProviderType {
case types.ProverProviderTypeInternal:
case types.ProverProviderTypeExternal:
case types.ProverProviderTypeProxy:
case types.ProverProviderTypeUndefined:
if login.Message.ProverProviderType != types.ProverProviderTypeInternal && login.Message.ProverProviderType != types.ProverProviderTypeExternal {
// for backward compatibility, set ProverProviderType as internal
login.Message.ProverProviderType = types.ProverProviderTypeInternal
default:
log.Error("invalid prover_provider_type", "value", login.Message.ProverProviderType, "prover name", login.Message.ProverName, "prover version", login.Message.ProverVersion)
return errors.New("invalid prover provider type.")
if login.Message.ProverProviderType == types.ProverProviderTypeUndefined {
login.Message.ProverProviderType = types.ProverProviderTypeInternal
} else {
log.Error("invalid prover_provider_type", "value", login.Message.ProverProviderType, "prover name", login.Message.ProverName, "prover version", login.Message.ProverVersion)
return errors.New("invalid prover provider type.")
}
}
return nil

View File

@@ -1,5 +1,3 @@
//go:build !mock_verifier
package libzkp
/*
@@ -15,6 +13,8 @@ import (
"os"
"strings"
"unsafe"
"scroll-tech/common/types/message"
)
func init() {
@@ -72,6 +72,31 @@ func VerifyBundleProof(proofData, forkName string) bool {
return result != 0
}
// TaskType enum values matching the Rust enum
const (
TaskTypeChunk = 0
TaskTypeBatch = 1
TaskTypeBundle = 2
)
func fromMessageTaskType(taskType int) int {
switch message.ProofType(taskType) {
case message.ProofTypeChunk:
return TaskTypeChunk
case message.ProofTypeBatch:
return TaskTypeBatch
case message.ProofTypeBundle:
return TaskTypeBundle
default:
panic(fmt.Sprintf("unsupported proof type: %d", taskType))
}
}
// Generate a universal task
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
}
// Generate wrapped proof
func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
cProofJSON := goToCString(proofJSON)

View File

@@ -1,57 +0,0 @@
//go:build mock_verifier
package libzkp
import (
"encoding/json"
)
// // InitVerifier is a no-op in the mock.
// func InitVerifier(configJSON string) {}
// // VerifyChunkProof returns a fixed success in the mock.
// func VerifyChunkProof(proofData, forkName string) bool {
// return true
// }
// // VerifyBatchProof returns a fixed success in the mock.
// func VerifyBatchProof(proofData, forkName string) bool {
// return true
// }
// // VerifyBundleProof returns a fixed success in the mock.
// func VerifyBundleProof(proofData, forkName string) bool {
// return true
// }
func UniversalTaskCompatibilityFix(taskJSON string) (string, error) {
panic("should not run here")
}
// GenerateWrappedProof returns a fixed dummy proof string in the mock.
func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
payload := struct {
Metadata json.RawMessage `json:"metadata"`
Proof json.RawMessage `json:"proof"`
GitVersion string `json:"git_version"`
}{
Metadata: json.RawMessage(metadata),
Proof: json.RawMessage(proofJSON),
GitVersion: "mock-git-version",
}
out, err := json.Marshal(payload)
if err != nil {
panic(err)
}
return string(out)
}
// DumpVk is a no-op and returns nil in the mock.
func DumpVk(forkName, filePath string) error {
return nil
}
// SetDynamicFeature is a no-op in the mock.
func SetDynamicFeature(feats string) {}

View File

@@ -1,27 +0,0 @@
package libzkp
import (
"fmt"
"scroll-tech/common/types/message"
)
// TaskType enum values matching the Rust enum
const (
TaskTypeChunk = 0
TaskTypeBatch = 1
TaskTypeBundle = 2
)
func fromMessageTaskType(taskType int) int {
switch message.ProofType(taskType) {
case message.ProofTypeChunk:
return TaskTypeChunk
case message.ProofTypeBatch:
return TaskTypeBatch
case message.ProofTypeBundle:
return TaskTypeBundle
default:
panic(fmt.Sprintf("unsupported proof type: %d", taskType))
}
}

View File

@@ -5,7 +5,6 @@ package libzkp
import (
"encoding/json"
"fmt"
"strings"
"scroll-tech/common/types/message"
@@ -15,10 +14,6 @@ import (
func InitL2geth(configJSON string) {
}
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
}
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
fmt.Printf("call mocked generate universal task %d, taskJson %s\n", taskType, taskJSON)

View File

@@ -7,10 +7,7 @@ package libzkp
#include "libzkp.h"
*/
import "C" //nolint:typecheck
import (
"strings"
"unsafe"
)
import "unsafe"
// Initialize the handler for universal task
func InitL2geth(configJSON string) {
@@ -20,11 +17,6 @@ func InitL2geth(configJSON string) {
C.init_l2geth(cConfig)
}
// Generate a universal task
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
}
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
cTask := goToCString(taskJSON)
cForkName := goToCString(forkName)

View File

@@ -14,7 +14,7 @@ import (
)
// ChallengeMiddleware jwt challenge middleware
func ChallengeMiddleware(auth *config.Auth) *jwt.GinJWTMiddleware {
func ChallengeMiddleware(conf *config.Config) *jwt.GinJWTMiddleware {
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
Authenticator: func(c *gin.Context) (interface{}, error) {
return nil, nil
@@ -30,8 +30,8 @@ func ChallengeMiddleware(auth *config.Auth) *jwt.GinJWTMiddleware {
}
},
Unauthorized: unauthorized,
Key: []byte(auth.Secret),
Timeout: time.Second * time.Duration(auth.ChallengeExpireDurationSec),
Key: []byte(conf.Auth.Secret),
Timeout: time.Second * time.Duration(conf.Auth.ChallengeExpireDurationSec),
TokenLookup: "header: Authorization, query: token, cookie: jwt",
TokenHeadName: "Bearer",
TimeFunc: time.Now,

View File

@@ -4,57 +4,22 @@ import (
"time"
jwt "github.com/appleboy/gin-jwt/v2"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/proxy"
"scroll-tech/coordinator/internal/types"
)
func nonIdendityAuthorizator(data interface{}, _ *gin.Context) bool {
return data != nil
}
// LoginMiddleware jwt auth middleware
func LoginMiddleware(auth *config.Auth) *jwt.GinJWTMiddleware {
func LoginMiddleware(conf *config.Config) *jwt.GinJWTMiddleware {
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
PayloadFunc: api.Auth.PayloadFunc,
IdentityHandler: api.Auth.IdentityHandler,
IdentityKey: types.PublicKey,
Key: []byte(auth.Secret),
Timeout: time.Second * time.Duration(auth.LoginExpireDurationSec),
Key: []byte(conf.Auth.Secret),
Timeout: time.Second * time.Duration(conf.Auth.LoginExpireDurationSec),
Authenticator: api.Auth.Login,
Authorizator: nonIdendityAuthorizator,
Unauthorized: unauthorized,
TokenLookup: "header: Authorization, query: token, cookie: jwt",
TokenHeadName: "Bearer",
TimeFunc: time.Now,
LoginResponse: loginResponse,
})
if err != nil {
log.Crit("new jwt middleware panic", "error", err)
}
if errInit := jwtMiddleware.MiddlewareInit(); errInit != nil {
log.Crit("init jwt middleware panic", "error", errInit)
}
return jwtMiddleware
}
// ProxyLoginMiddleware jwt auth middleware for proxy login
func ProxyLoginMiddleware(auth *config.Auth) *jwt.GinJWTMiddleware {
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
PayloadFunc: proxy.Auth.PayloadFunc,
IdentityHandler: proxy.Auth.IdentityHandler,
IdentityKey: types.PublicKey,
Key: []byte(auth.Secret),
Timeout: time.Second * time.Duration(auth.LoginExpireDurationSec),
Authenticator: proxy.Auth.Login,
Authorizator: nonIdendityAuthorizator,
Unauthorized: unauthorized,
TokenLookup: "header: Authorization, query: token, cookie: jwt",
TokenHeadName: "Bearer",

View File

@@ -28,8 +28,8 @@ func TestMain(m *testing.M) {
defer func() {
if testApps != nil {
testApps.Free()
tearDownEnv(t)
}
tearDownEnv(t)
}()
m.Run()
}

View File

@@ -8,7 +8,6 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/proxy"
"scroll-tech/coordinator/internal/middleware"
)
@@ -26,45 +25,16 @@ func Route(router *gin.Engine, cfg *config.Config, reg prometheus.Registerer) {
func v1(router *gin.RouterGroup, conf *config.Config) {
r := router.Group("/v1")
challengeMiddleware := middleware.ChallengeMiddleware(conf.Auth)
challengeMiddleware := middleware.ChallengeMiddleware(conf)
r.GET("/challenge", challengeMiddleware.LoginHandler)
loginMiddleware := middleware.LoginMiddleware(conf.Auth)
loginMiddleware := middleware.LoginMiddleware(conf)
r.POST("/login", challengeMiddleware.MiddlewareFunc(), loginMiddleware.LoginHandler)
// need jwt token api
r.Use(loginMiddleware.MiddlewareFunc())
{
r.POST("/proxy_login", loginMiddleware.LoginHandler)
r.POST("/get_task", api.GetTask.GetTasks)
r.POST("/submit_proof", api.SubmitProof.SubmitProof)
}
}
// Route register route for coordinator
func ProxyRoute(router *gin.Engine, cfg *config.ProxyConfig, reg prometheus.Registerer) {
router.Use(gin.Recovery())
observability.Use(router, "coordinator", reg)
r := router.Group("coordinator")
v1_proxy(r, cfg)
}
func v1_proxy(router *gin.RouterGroup, conf *config.ProxyConfig) {
r := router.Group("/v1")
challengeMiddleware := middleware.ChallengeMiddleware(conf.ProxyManager.Auth)
r.GET("/challenge", challengeMiddleware.LoginHandler)
loginMiddleware := middleware.ProxyLoginMiddleware(conf.ProxyManager.Auth)
r.POST("/login", challengeMiddleware.MiddlewareFunc(), loginMiddleware.LoginHandler)
// need jwt token api
r.Use(loginMiddleware.MiddlewareFunc())
{
r.POST("/get_task", proxy.GetTask.GetTasks)
r.POST("/submit_proof", proxy.SubmitProof.SubmitProof)
}
}

View File

@@ -64,8 +64,6 @@ func (r ProverProviderType) String() string {
return "prover provider type internal"
case ProverProviderTypeExternal:
return "prover provider type external"
case ProverProviderTypeProxy:
return "prover provider type proxy"
default:
return fmt.Sprintf("prover provider type: %d", r)
}
@@ -78,6 +76,4 @@ const (
ProverProviderTypeInternal
// ProverProviderTypeExternal is an external prover provider type
ProverProviderTypeExternal
// ProverProviderTypeProxy is an proxy prover provider type
ProverProviderTypeProxy = 3
)

View File

@@ -1,48 +0,0 @@
package types
import (
"encoding/json"
"reflect"
"testing"
"scroll-tech/common/types"
)
func TestResponseDecodeData_GetTaskSchema(t *testing.T) {
// Arrange: build a dummy payload and wrap it in Response
in := GetTaskSchema{
UUID: "uuid-123",
TaskID: "task-abc",
TaskType: 1,
UseSnark: true,
TaskData: "dummy-data",
HardForkName: "cancun",
}
resp := types.Response{
ErrCode: 0,
ErrMsg: "",
Data: in,
}
// Act: JSON round-trip the Response to simulate real HTTP encoding/decoding
b, err := json.Marshal(resp)
if err != nil {
t.Fatalf("marshal response: %v", err)
}
var decoded types.Response
if err := json.Unmarshal(b, &decoded); err != nil {
t.Fatalf("unmarshal response: %v", err)
}
var out GetTaskSchema
if err := decoded.DecodeData(&out); err != nil {
t.Fatalf("DecodeData error: %v", err)
}
// Assert: structs match after decode
if !reflect.DeepEqual(in, out) {
t.Fatalf("decoded struct mismatch:\nwant: %+v\n got: %+v", in, out)
}
}

View File

@@ -30,14 +30,12 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/cron"
"scroll-tech/coordinator/internal/controller/proxy"
"scroll-tech/coordinator/internal/orm"
"scroll-tech/coordinator/internal/route"
)
var (
conf *config.Config
proxyConf *config.ProxyConfig
conf *config.Config
testApps *testcontainers.TestcontainerApps
@@ -53,9 +51,6 @@ var (
chunk *encoding.Chunk
batch *encoding.Batch
tokenTimeout int
envSet bool
portUsed map[int64]struct{}
)
func TestMain(m *testing.M) {
@@ -68,44 +63,18 @@ func TestMain(m *testing.M) {
}
func randomURL() string {
return randmURLBatch(1)[0]
id, _ := rand.Int(rand.Reader, big.NewInt(2000-1))
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
}
// Generate a batch of random localhost URLs with different ports, similar to randomURL.
func randmURLBatch(n int) []string {
if n <= 0 {
return nil
}
urls := make([]string, 0, n)
if portUsed == nil {
portUsed = make(map[int64]struct{})
}
for len(urls) < n {
id, _ := rand.Int(rand.Reader, big.NewInt(2000-1))
port := 20000 + 2000 + id.Int64()
if _, exist := portUsed[port]; exist {
continue
}
portUsed[port] = struct{}{}
urls = append(urls, fmt.Sprintf("localhost:%d", port))
}
return urls
}
func setupCoordinatorDb(t *testing.T) {
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
var err error
assert.NotNil(t, db, "setEnv must be called before")
// db, err = testApps.GetGormDBClient()
db, err = testApps.GetGormDBClient()
// assert.NoError(t, err)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
}
func launchCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
assert.NotNil(t, db, "db must be set")
tokenTimeout = 60
conf = &config.Config{
@@ -145,7 +114,6 @@ func launchCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL str
EuclidV2Time: new(uint64),
}, db, nil)
route.Route(router, conf, nil)
t.Log("coordinator server url", coordinatorURL)
srv := &http.Server{
Addr: coordinatorURL,
Handler: router,
@@ -161,77 +129,7 @@ func launchCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL str
return proofCollector, srv
}
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
setupCoordinatorDb(t)
return launchCoordinator(t, proversPerSession, coordinatorURL)
}
func setupProxyDb(t *testing.T) {
assert.NotNil(t, db, "setEnv must be called before")
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetModuleDB(sqlDB, "proxy"))
}
func launchProxy(t *testing.T, proxyURL string, coordinatorURL []string, usePersistent bool) *http.Server {
var err error
assert.NoError(t, err)
coordinators := make(map[string]*config.UpStream)
for i, n := range coordinatorURL {
coordinators[fmt.Sprintf("coordinator_%d", i)] = testProxyUpStreamCfg(n)
}
tokenTimeout = 60
proxyConf = &config.ProxyConfig{
ProxyName: "test_proxy",
ProxyManager: &config.ProxyManager{
Verifier: &config.VerifierConfig{
MinProverVersion: "v4.4.89",
Verifiers: []config.AssetConfig{{
AssetsPath: "",
ForkName: "euclidV2",
}},
},
Client: testProxyClientCfg(),
Auth: &config.Auth{
Secret: "proxy",
ChallengeExpireDurationSec: tokenTimeout,
LoginExpireDurationSec: tokenTimeout,
},
},
Coordinators: coordinators,
}
router := gin.New()
if usePersistent {
proxy.InitController(proxyConf, db, nil)
} else {
proxy.InitController(proxyConf, nil, nil)
}
route.ProxyRoute(router, proxyConf, nil)
t.Log("proxy server url", proxyURL)
srv := &http.Server{
Addr: proxyURL,
Handler: router,
}
go func() {
runErr := srv.ListenAndServe()
if runErr != nil && !errors.Is(runErr, http.ErrServerClosed) {
assert.NoError(t, runErr)
}
}()
time.Sleep(time.Second * 2)
return srv
}
func setEnv(t *testing.T) {
if envSet {
t.Log("SetEnv is re-entried")
return
}
var err error
version.Version = "v4.5.45"
@@ -248,7 +146,6 @@ func setEnv(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
assert.NoError(t, migrate.MigrateModule(sqlDB, "proxy"))
batchOrm = orm.NewBatch(db)
chunkOrm = orm.NewChunk(db)
@@ -272,7 +169,6 @@ func setEnv(t *testing.T) {
assert.NoError(t, err)
batch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk}}
envSet = true
}
func TestApis(t *testing.T) {

View File

@@ -34,8 +34,6 @@ type mockProver struct {
privKey *ecdsa.PrivateKey
proofType message.ProofType
coordinatorURL string
token string
useCacheToken bool
}
func newMockProver(t *testing.T, proverName string, coordinatorURL string, proofType message.ProofType, version string) *mockProver {
@@ -52,14 +50,6 @@ func newMockProver(t *testing.T, proverName string, coordinatorURL string, proof
return prover
}
func (r *mockProver) resetConnection(coordinatorURL string) {
r.coordinatorURL = coordinatorURL
}
func (r *mockProver) setUseCacheToken(enable bool) {
r.useCacheToken = enable
}
// connectToCoordinator sets up a websocket client to connect to the prover manager.
func (r *mockProver) connectToCoordinator(t *testing.T, proverTypes []types.ProverType) (string, int, string) {
challengeString := r.challenge(t)
@@ -125,7 +115,6 @@ func (r *mockProver) login(t *testing.T, challengeString string, proverTypes []t
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode())
assert.Empty(t, result.ErrMsg)
r.token = loginData.Token
return loginData.Token, 0, ""
}
@@ -155,14 +144,11 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool {
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*types.GetTaskSchema, int, string) {
// get task from coordinator
if !r.useCacheToken || r.token == "" {
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
if errCode != 0 {
return nil, errCode, errMsg
}
assert.NotEmpty(t, token)
assert.Equal(t, token, r.token)
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
if errCode != 0 {
return nil, errCode, errMsg
}
assert.NotEmpty(t, token)
type response struct {
ErrCode int `json:"errcode"`
@@ -174,7 +160,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*
client := resty.New()
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.token)).
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"universal": true, "prover_height": 100, "task_types": []int{int(proofType)}}).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
@@ -188,14 +174,11 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*
//nolint:unparam
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) {
// get task from coordinator
if !r.useCacheToken || r.token == "" {
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
if errCode != 0 {
return errCode, errMsg
}
assert.NotEmpty(t, token)
assert.Equal(t, token, r.token)
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
if errCode != 0 {
return errCode, errMsg
}
assert.NotEmpty(t, token)
type response struct {
ErrCode int `json:"errcode"`
@@ -207,8 +190,8 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
client := resty.New()
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_types": []int{int(proofType)}, "universal": true}).
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "universal": true}).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err)
@@ -266,13 +249,10 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
Universal: true,
}
if !r.useCacheToken || r.token == "" {
token, authErrCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(message.ProofType(proverTaskSchema.TaskType))})
assert.Equal(t, authErrCode, 0)
assert.Equal(t, errMsg, "")
assert.NotEmpty(t, token)
assert.Equal(t, token, r.token)
}
token, authErrCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(message.ProofType(proverTaskSchema.TaskType))})
assert.Equal(t, authErrCode, 0)
assert.Equal(t, errMsg, "")
assert.NotEmpty(t, token)
submitProofData, err := json.Marshal(submitProof)
assert.NoError(t, err)
@@ -282,7 +262,7 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
client := resty.New()
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.token)).
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(string(submitProofData)).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/submit_proof")

View File

@@ -1,297 +0,0 @@
package test
import (
"context"
"fmt"
"net/http"
"strings"
"testing"
"time"
"github.com/scroll-tech/da-codec/encoding"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/proxy"
)
func testProxyClientCfg() *config.ProxyClient {
return &config.ProxyClient{
Secret: "test-secret-key",
ProxyName: "test-proxy",
ProxyVersion: version.Version,
}
}
var testCompatibileMode bool
func testProxyUpStreamCfg(coordinatorURL string) *config.UpStream {
return &config.UpStream{
BaseUrl: fmt.Sprintf("http://%s", coordinatorURL),
RetryWaitTime: 3,
ConnectionTimeoutSec: 30,
CompatibileMode: testCompatibileMode,
}
}
func testProxyClient(t *testing.T) {
// Setup coordinator and http server.
coordinatorURL := randomURL()
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
proofCollector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
}()
cliCfg := testProxyClientCfg()
upCfg := testProxyUpStreamCfg(coordinatorURL)
clientManager, err := proxy.NewClientManager("test_coordinator", cliCfg, upCfg)
assert.NoError(t, err)
assert.NotNil(t, clientManager)
// Create context with timeout
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Test Client method
client := clientManager.ClientAsProxy(ctx)
// Client should not be nil if login succeeds
// Note: This might be nil if the coordinator is not properly set up for proxy authentication
// but the test validates that the Client method completes without panic
assert.NotNil(t, client)
token1 := client.Token()
assert.NotEmpty(t, token1)
t.Logf("Client token: %s (%v)", token1, client)
if !upCfg.CompatibileMode {
time.Sleep(time.Second * 2)
client.Reset()
client = clientManager.ClientAsProxy(ctx)
assert.NotNil(t, client)
token2 := client.Token()
assert.NotEmpty(t, token2)
t.Logf("Client token (sec): %s (%v)", token2, client)
assert.NotEqual(t, token1, token2, "token should not be identical")
}
}
func testProxyHandshake(t *testing.T) {
// Setup proxy http server.
proxyURL := randomURL()
proxyHttpHandler := launchProxy(t, proxyURL, []string{}, false)
defer func() {
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
}()
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL, message.ProofTypeChunk, version.Version)
assert.True(t, chunkProver.healthCheckSuccess(t))
}
func testProxyGetTask(t *testing.T) {
// Setup coordinator and http server.
urls := randmURLBatch(2)
coordinatorURL := urls[0]
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
}()
proxyURL := urls[1]
proxyHttpHandler := launchProxy(t, proxyURL, []string{coordinatorURL}, false)
defer func() {
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
}()
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL, message.ProofTypeChunk, version.Version)
chunkProver.setUseCacheToken(true)
code, _ := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
assert.Equal(t, int(types.ErrCoordinatorEmptyProofData), code)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
assert.NoError(t, err)
task, code, msg := chunkProver.getProverTask(t, message.ProofTypeChunk)
assert.Empty(t, code)
if code == 0 {
t.Log("get task id", task.TaskID)
} else {
t.Log("get task error msg", msg)
}
}
func testProxyProof(t *testing.T) {
urls := randmURLBatch(3)
coordinatorURL0 := urls[0]
setupCoordinatorDb(t)
collector0, httpHandler0 := launchCoordinator(t, 3, coordinatorURL0)
defer func() {
collector0.Stop()
httpHandler0.Shutdown(context.Background())
}()
coordinatorURL1 := urls[1]
collector1, httpHandler1 := launchCoordinator(t, 3, coordinatorURL1)
defer func() {
collector1.Stop()
httpHandler1.Shutdown(context.Background())
}()
coordinators := map[string]*http.Server{
"coordinator_0": httpHandler0,
"coordinator_1": httpHandler1,
}
proxyURL := urls[2]
proxyHttpHandler := launchProxy(t, proxyURL, []string{coordinatorURL0, coordinatorURL1}, false)
defer func() {
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
}()
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
assert.NoError(t, err)
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL, message.ProofTypeChunk, version.Version)
chunkProver.setUseCacheToken(true)
task, code, msg := chunkProver.getProverTask(t, message.ProofTypeChunk)
assert.Empty(t, code)
if code == 0 {
t.Log("get task", task)
parts, _, _ := strings.Cut(task.TaskID, ":")
// close the coordinator which do not dispatch task first, so if we submit to wrong target,
// there would be a chance the submit failed (to the closed coordinator)
for n, srv := range coordinators {
if n != parts {
t.Log("close coordinator", n)
assert.NoError(t, srv.Shutdown(context.Background()))
}
}
exceptProofStatus := verifiedSuccess
chunkProver.submitProof(t, task, exceptProofStatus, types.Success)
} else {
t.Log("get task error msg", msg)
}
// verify proof status
var (
tick = time.Tick(1500 * time.Millisecond)
tickStop = time.Tick(time.Minute)
)
var (
chunkProofStatus types.ProvingStatus
chunkActiveAttempts int16
chunkMaxAttempts int16
)
for {
select {
case <-tick:
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskVerified {
return
}
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(chunkMaxAttempts))
assert.Equal(t, 0, int(chunkActiveAttempts))
case <-tickStop:
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String())
return
}
}
}
func testProxyPersistent(t *testing.T) {
urls := randmURLBatch(4)
coordinatorURL0 := urls[0]
setupCoordinatorDb(t)
collector0, httpHandler0 := launchCoordinator(t, 3, coordinatorURL0)
defer func() {
collector0.Stop()
httpHandler0.Shutdown(context.Background())
}()
coordinatorURL1 := urls[1]
collector1, httpHandler1 := launchCoordinator(t, 3, coordinatorURL1)
defer func() {
collector1.Stop()
httpHandler1.Shutdown(context.Background())
}()
setupProxyDb(t)
proxyURL1 := urls[2]
proxyHttpHandler := launchProxy(t, proxyURL1, []string{coordinatorURL0, coordinatorURL1}, true)
defer func() {
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
}()
proxyURL2 := urls[3]
proxyHttpHandler2 := launchProxy(t, proxyURL2, []string{coordinatorURL0, coordinatorURL1}, true)
defer func() {
assert.NoError(t, proxyHttpHandler2.Shutdown(context.Background()))
}()
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
assert.NoError(t, err)
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL1, message.ProofTypeChunk, version.Version)
chunkProver.setUseCacheToken(true)
task, _, _ := chunkProver.getProverTask(t, message.ProofTypeChunk)
assert.NotNil(t, task)
taskFrom, _, _ := strings.Cut(task.TaskID, ":")
t.Log("get task from coordinator:", taskFrom)
chunkProver.resetConnection(proxyURL2)
task, _, _ = chunkProver.getProverTask(t, message.ProofTypeChunk)
assert.NotNil(t, task)
taskFrom2, _, _ := strings.Cut(task.TaskID, ":")
assert.Equal(t, taskFrom, taskFrom2)
}
func TestProxyClient(t *testing.T) {
testCompatibileMode = false
// Set up the test environment.
setEnv(t)
t.Run("TestProxyClient", testProxyClient)
t.Run("TestProxyHandshake", testProxyHandshake)
t.Run("TestProxyGetTask", testProxyGetTask)
t.Run("TestProxyValidProof", testProxyProof)
t.Run("testProxyPersistent", testProxyPersistent)
}
func TestProxyClientCompatibleMode(t *testing.T) {
testCompatibileMode = true
// Set up the test environment.
setEnv(t)
t.Run("TestProxyClient", testProxyClient)
t.Run("TestProxyHandshake", testProxyHandshake)
t.Run("TestProxyGetTask", testProxyGetTask)
t.Run("TestProxyValidProof", testProxyProof)
t.Run("testProxyPersistent", testProxyPersistent)
}

View File

@@ -9,14 +9,13 @@ import (
"github.com/pressly/goose/v3"
)
//go:embed migrations
//go:embed migrations/*.sql
var embedMigrations embed.FS
// MigrationsDir migration dir
const MigrationsDir string = "migrations"
func init() {
// note goose ignore ono-sql files by default so we do not need to specify *.sql
goose.SetBaseFS(embedMigrations)
goose.SetSequential(true)
goose.SetTableName("scroll_migrations")
@@ -25,41 +24,6 @@ func init() {
goose.SetVerbose(verbose)
}
// MigrateModule migrate db used by other module with specified goose TableName
// sql file for that module must be put as a sub-directory under `MigrationsDir`
func MigrateModule(db *sql.DB, moduleName string) error {
goose.SetTableName(moduleName + "_migrations")
defer func() {
goose.SetTableName("scroll_migrations")
}()
return goose.Up(db, MigrationsDir+"/"+moduleName, goose.WithAllowMissing())
}
// RollbackModule rollback the specified module to the given version
func RollbackModule(db *sql.DB, moduleName string, version *int64) error {
goose.SetTableName(moduleName + "_migrations")
defer func() {
goose.SetTableName("scroll_migrations")
}()
moduleDir := MigrationsDir + "/" + moduleName
if version != nil {
return goose.DownTo(db, moduleDir, *version)
}
return goose.Down(db, moduleDir)
}
// ResetModuleDB clean and migrate db for a module.
func ResetModuleDB(db *sql.DB, moduleName string) error {
if err := RollbackModule(db, moduleName, new(int64)); err != nil {
return err
}
return MigrateModule(db, moduleName)
}
// Migrate migrate db
func Migrate(db *sql.DB) error {
//return goose.Up(db, MIGRATIONS_DIR, goose.WithAllowMissing())

View File

@@ -1,30 +0,0 @@
-- +goose Up
-- +goose StatementBegin
create table prover_sessions
(
public_key TEXT NOT NULL,
upstream TEXT NOT NULL,
up_token TEXT NOT NULL,
expired TIMESTAMP(0) NOT NULL,
constraint uk_prover_sessions_public_key_upstream unique (public_key, upstream)
);
create index idx_prover_sessions_expired on prover_sessions (expired);
create table priority_upstream
(
public_key TEXT NOT NULL,
upstream TEXT NOT NULL,
update_time TIMESTAMP(0) NOT NULL DEFAULT now()
);
create unique index idx_priority_upstream_public_key on priority_upstream (public_key);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop index if exists idx_prover_sessions_expired;
drop table if exists prover_sessions;
drop table if exists priority_upstream;
-- +goose StatementEnd

View File

@@ -0,0 +1,3 @@
SCROLL_URL=https://rpc.scroll.io
MAINNET_URL=https://eth-mainnet.g.alchemy.com/v2/YOUR_API_KEY
BEACON_URL=https://eth-mainnet-beacon.g.alchemy.com/v2/YOUR_API_KEY

View File

@@ -0,0 +1,41 @@
# Environment variables (contains API keys)
.env
# Cached data files
*.pkl
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# Virtual environments
.venv
venv/
ENV/
# IDE
.idea/
.vscode/
*.swp
*.swo
# Logs
*.log

View File

@@ -0,0 +1,18 @@
[[source]]
url = "https://pypi.org/simple"
verify_ssl = true
name = "pypi"
[packages]
web3 = "<7,>=6"
pandas = "*"
numpy = "*"
rlp = "*"
zstandard = "*"
requests = "*"
async_timeout = "*"
[dev-packages]
[requires]
python_version = "3.10"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,54 @@
# Galileo Gas Parameter Derivation
Derives L1 fee parameters (`commit_scalar`, `blob_scalar`, `penalty_multiplier`) for Scroll's Galileo upgrade by analyzing historical on-chain data.
## Prerequisites
- Python 3.10+
- pipenv
- gcc and python3-devel
```bash
# Ubuntu/Debian
sudo apt install python3-dev gcc
# Fedora/RHEL
sudo dnf install python3-devel gcc
```
## Installation
```bash
cd scripts/derive_galileo_gas_parameter
pipenv install
```
## Running
### 1. Create `.env` file
```bash
SCROLL_URL=https://rpc.scroll.io
MAINNET_URL=https://eth-mainnet.g.alchemy.com/v2/YOUR_API_KEY
BEACON_URL=https://eth-mainnet-beacon.g.alchemy.com/v2/YOUR_API_KEY
```
### 2. Usage
```bash
# Collect data from 30 recent batches
pipenv run python -u derive_galileo_gas_parameter.py --mode collect --n-batches 30
# Load previously cached data
pipenv run python -u derive_galileo_gas_parameter.py --mode load --start-batch 494041 --end-batch 494070
```
**Options:**
- `--mode {collect,load}` - Collect new data or load from cache (required)
- `--n-batches N` - Number of batches to collect (default: 30)
- `--start-batch N` / `--end-batch N` - Batch range for load mode
- `--target-penalty FLOAT` - Target penalty at P95 (default: 0.1 = 10%)
- `--penalty-multiplier FLOAT` - Use fixed penalty multiplier instead of calculating
### 3. Cached Data
Collected data is saved to `galileo_data_batch_{start}_{end}.pkl` for re-analysis without re-fetching from RPC. Use `--mode load` to reload.

View File

@@ -0,0 +1,70 @@
# Project Description
This is a Scroll L2 blockchain gas fee parameter calculation project for deriving Scroll gas fee parameters.
# Environment Setup
Claude is executed within a pipenv shell environment, so all packages should already be installed.
# Guidelines
- All Python script comments must be in English
- When executing `derive_galileo_gas_parameter.py`, output should be redirected to a fixed log file
- Use the `-u` option to disable output buffering for real-time logging
- Always use `/tmp/derive_galileo_gas_parameter.log` as the log file
- Example: `python -u derive_galileo_gas_parameter.py --mode collect --n-batches 5 > /tmp/derive_galileo_gas_parameter.log 2>&1 &`
# Python Script Execution
## Command Format
```bash
python -u derive_galileo_gas_parameter.py [OPTIONS] > /tmp/derive_galileo_gas_parameter.log 2>&1 &
```
## Options
- `--mode {collect,load}` (required): Operation mode
- `collect`: Collect new data from blockchain
- `load`: Load data from cached pickle file
- `--n-batches N`: Number of batches to collect (default: 30)
- Only used in `collect` mode
- `--start-batch N`: Start batch index (required for `load` mode)
- `--end-batch N`: End batch index (required for `load` mode)
- `--target-penalty FLOAT`: Target penalty at P95 (default: 0.1 = 10%)
- `--penalty-multiplier FLOAT`: Fixed penalty multiplier (optional, will be calculated from P95 if not specified)
## Examples
Collect data for 3 batches:
```bash
python -u derive_galileo_gas_parameter.py --mode collect --n-batches 3 > /tmp/derive_galileo_gas_parameter.log 2>&1 &
```
Load cached data:
```bash
python -u derive_galileo_gas_parameter.py --mode load --start-batch 12345 --end-batch 12347 > /tmp/derive_galileo_gas_parameter.log 2>&1 &
```
Collect with custom target penalty:
```bash
python -u derive_galileo_gas_parameter.py --mode collect --n-batches 30 --target-penalty 0.15 > /tmp/derive_galileo_gas_parameter.log 2>&1 &
```
## Monitoring Execution
Check log output in real-time:
```bash
tail -f /tmp/derive_galileo_gas_parameter.log
```
View complete log:
```bash
cat /tmp/derive_galileo_gas_parameter.log
```

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,81 @@
#!/usr/bin/env python3
"""
Show batch IDs in a saved data file
"""
import pickle
import sys
from pathlib import Path
def show_batches(filename):
"""Show batch IDs in the data file"""
if not Path(filename).exists():
print(f"Error: File not found: {filename}")
return
try:
with open(filename, 'rb') as f:
data = pickle.load(f)
batch_df = data['batch_df']
tx_df = data['tx_df']
print(f"\n{'='*60}")
print(f"File: {filename}")
print(f"{'='*60}")
print(f"\nBatch range: {data['start_batch']} - {data['end_batch']}")
print(f"Total batches: {len(batch_df)}")
print(f"Total transactions: {len(tx_df)}")
print(f"\nBatch IDs:")
batch_ids = sorted(batch_df.index.tolist())
# Print batch IDs in a compact format
for i in range(0, len(batch_ids), 10):
batch_subset = batch_ids[i:i+10]
print(f" {', '.join(map(str, batch_subset))}")
# Show detailed information for each batch
print(f"\n{'='*60}")
print("BATCH DETAILS")
print(f"{'='*60}")
for batch_id in sorted(batch_df.index.tolist()):
batch_row = batch_df.loc[batch_id]
print(f"\n{''*60}")
print(f"Batch {int(batch_id)}")
print(f"{''*60}")
print(f" versioned_hash: {batch_row['versioned_hash']}")
print(f" commit_cost: {batch_row['commit_cost'] / 1e9:.9f} gwei")
print(f" blob_cost: {batch_row['blob_cost'] / 1e9:.9f} gwei")
print(f" finalize_cost: {batch_row['finalize_cost'] / 1e9:.9f} gwei")
# Get transactions for this batch
batch_txs = tx_df[tx_df['batch_index'] == batch_id]
print(f"\n Transactions ({len(batch_txs)} total):")
if len(batch_txs) > 0:
for idx, tx in batch_txs.iterrows():
print(f"\n Transaction {idx}:")
print(f" hash: {tx['hash']}")
print(f" block_number: {tx['block_number']}")
print(f" size: {tx['size']}")
print(f" compressed_tx_size: {tx['compressed_tx_size']}")
print(f" l1_base_fee: {tx['l1_base_fee'] / 1e9:.9f} gwei")
print(f" l1_blob_base_fee: {tx['l1_blob_base_fee'] / 1e9:.9f} gwei")
else:
print(" (No transactions)")
print(f"\n{'='*60}\n")
except Exception as e:
print(f"Error reading file: {e}")
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python show_batches.py <data_file.pkl>")
print("\nExample: python show_batches.py galileo_data_batch_491125_491136.pkl")
sys.exit(1)
show_batches(sys.argv[1])