Clarify custom pricing override for LLM migrations

Improved documentation and comments for the custom_credit_cost field in backend, frontend, and schema files to clarify its use as a billing override during LLM model migrations. Also removed unused LLM registry types and API methods from frontend code, and renamed useLlmRegistryPage.ts to getLlmRegistryPage.ts for consistency.
This commit is contained in:
Bentlybro
2026-01-12 11:40:49 +00:00
parent 6cf28e58d3
commit 6ed8bb4f14
10 changed files with 30 additions and 327 deletions

View File

@@ -145,7 +145,7 @@ async def toggle_llm_model(
Optional fields:
- `migration_reason`: Reason for the migration (e.g., "Provider outage")
- `custom_credit_cost`: Custom pricing during the migration period
- `custom_credit_cost`: Custom pricing override for billing during migration
"""
try:
result = await llm_db.toggle_model(

View File

@@ -302,7 +302,9 @@ async def toggle_model(
migrate_to_slug: If disabling and this is provided, migrate all workflows
using this model to the specified replacement model
migration_reason: Optional reason for the migration (e.g., "Provider outage")
custom_credit_cost: Optional custom pricing during the migration period
custom_credit_cost: Optional custom pricing override for migrated workflows.
When set, the billing system should use this cost instead
of the target model's cost for affected nodes.
Returns:
ToggleLlmModelResponse with the updated model and optional migration stats

View File

@@ -138,7 +138,10 @@ class ToggleLlmModelRequest(pydantic.BaseModel):
is_enabled: bool
migrate_to_slug: Optional[str] = None
migration_reason: Optional[str] = None # e.g., "Provider outage"
custom_credit_cost: Optional[int] = None # Custom pricing during migration
# Custom pricing override for migrated workflows. When set, billing should use
# this cost instead of the target model's cost for affected nodes.
# See LlmModelMigration in schema.prisma for full documentation.
custom_credit_cost: Optional[int] = None
class ToggleLlmModelResponse(pydantic.BaseModel):
@@ -168,6 +171,7 @@ class LlmModelMigration(pydantic.BaseModel):
target_model_slug: str
reason: Optional[str] = None
node_count: int
# Custom pricing override - billing should use this instead of target model's cost
custom_credit_cost: Optional[int] = None
is_reverted: bool = False
created_at: str # ISO datetime string

View File

@@ -991,6 +991,11 @@ enum APIKeyStatus {
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
// LlmCostUnit: Defines how LLM MODEL costs are calculated (per run or per token).
// This is distinct from BlockCostType (in backend/data/block.py) which defines
// how BLOCK EXECUTION costs are calculated (per run, per byte, or per second).
// LlmCostUnit is for pricing individual LLM model API calls in the registry,
// while BlockCostType is for billing platform block executions.
enum LlmCostUnit {
RUN
TOKENS
@@ -1106,9 +1111,17 @@ model LlmModelMigration {
migratedNodeIds Json @default("[]")
nodeCount Int // Number of nodes migrated
// Optional custom pricing during the migration period
// If set, this cost should be applied instead of the target model's cost
// Note: Requires billing system integration to fully work
// Custom pricing override for migrated workflows during the migration period.
// Use case: When migrating users from an expensive model (e.g., GPT-4) to a cheaper
// one (e.g., GPT-3.5), you may want to temporarily maintain the original pricing
// to avoid billing surprises, or offer a discount during the transition.
//
// IMPORTANT: This field is intended for integration with the billing system.
// When billing calculates costs for nodes affected by this migration, it should
// check if customCreditCost is set and use it instead of the target model's cost.
// If null, the target model's normal cost applies.
//
// TODO: Integrate with billing system to apply this override during cost calculation.
customCreditCost Int?
// Revert tracking

View File

@@ -208,8 +208,8 @@ export function DisableModelModal({
className="w-full rounded border border-input bg-background p-2 text-sm"
/>
<p className="mt-1 text-xs text-muted-foreground">
Override pricing during this migration (for billing
adjustments)
Override pricing for migrated workflows. When set, billing
will use this cost instead of the target model&apos;s cost.
</p>
</label>
</div>

View File

@@ -90,7 +90,8 @@ function MigrationRow({ migration }: { migration: LlmModelMigration }) {
</TableCell>
<TableCell>
<div className="text-sm">
{migration.custom_credit_cost !== null
{migration.custom_credit_cost !== null &&
migration.custom_credit_cost !== undefined
? `${migration.custom_credit_cost} credits`
: "—"}
</div>

View File

@@ -1,5 +1,5 @@
import { withRoleAccess } from "@/lib/withRoleAccess";
import { getLlmRegistryPageData } from "./useLlmRegistryPage";
import { getLlmRegistryPageData } from "./getLlmRegistryPage";
import { LlmRegistryDashboard } from "./components/LlmRegistryDashboard";
async function LlmRegistryPage() {

View File

@@ -43,8 +43,6 @@ import type {
LibraryAgentPresetUpdatable,
LibraryAgentResponse,
LibraryAgentSortEnum,
LlmModel,
LlmModelCreator,
MyAgentsResponse,
NodeExecutionResult,
NotificationPreference,
@@ -54,19 +52,6 @@ import type {
ProfileDetails,
RefundRequest,
ReviewSubmissionRequest,
CreateLlmModelRequest,
UpdateLlmModelRequest,
ToggleLlmModelRequest,
ToggleLlmModelResponse,
LlmModelMigration,
LlmMigrationsResponse,
RevertMigrationResponse,
UpsertLlmProviderRequest,
UpsertLlmCreatorRequest,
LlmModelsResponse,
LlmCreatorsResponse,
LlmProvider,
LlmProvidersResponse,
Schedule,
ScheduleCreatable,
ScheduleID,
@@ -393,143 +378,6 @@ export default class BackendAPI {
);
}
////////////////////////////////////////
/////////////// LLM MODELS /////////////
////////////////////////////////////////
listLlmModels(): Promise<LlmModelsResponse> {
return this._get("/llm/models");
}
listLlmProviders(includeModels = true): Promise<LlmProvidersResponse> {
const query = includeModels ? { include_models: true } : undefined;
return this._get("/llm/providers", query);
}
listAdminLlmProviders(includeModels = true): Promise<LlmProvidersResponse> {
const query = includeModels ? { include_models: true } : undefined;
return this._get("/llm/admin/llm/providers", query);
}
createAdminLlmProvider(
payload: UpsertLlmProviderRequest,
): Promise<LlmProvider> {
return this._request("POST", "/llm/admin/llm/providers", payload);
}
updateAdminLlmProvider(
providerId: string,
payload: UpsertLlmProviderRequest,
): Promise<LlmProvider> {
return this._request(
"PATCH",
`/llm/admin/llm/providers/${providerId}`,
payload,
);
}
listAdminLlmModels(providerId?: string): Promise<LlmModelsResponse> {
const query = providerId ? { provider_id: providerId } : undefined;
return this._get("/llm/admin/llm/models", query);
}
createAdminLlmModel(payload: CreateLlmModelRequest): Promise<LlmModel> {
return this._request("POST", "/llm/admin/llm/models", payload);
}
updateAdminLlmModel(
modelId: string,
payload: UpdateLlmModelRequest,
): Promise<LlmModel> {
return this._request("PATCH", `/llm/admin/llm/models/${modelId}`, payload);
}
toggleAdminLlmModel(
modelId: string,
payload: ToggleLlmModelRequest,
): Promise<ToggleLlmModelResponse> {
return this._request(
"PATCH",
`/llm/admin/llm/models/${modelId}/toggle`,
payload,
);
}
getAdminLlmModelUsage(
modelId: string,
): Promise<{ model_slug: string; node_count: number }> {
return this._get(`/llm/admin/llm/models/${modelId}/usage`);
}
deleteAdminLlmModel(
modelId: string,
replacementModelSlug: string,
): Promise<{
deleted_model_slug: string;
deleted_model_display_name: string;
replacement_model_slug: string;
nodes_migrated: number;
message: string;
}> {
return this._request("DELETE", `/llm/admin/llm/models/${modelId}`, {
replacement_model_slug: replacementModelSlug,
});
}
// Migration management
listAdminLlmMigrations(
includeReverted: boolean = false,
): Promise<LlmMigrationsResponse> {
return this._get(
`/llm/admin/llm/migrations?include_reverted=${includeReverted}`,
);
}
getAdminLlmMigration(migrationId: string): Promise<LlmModelMigration> {
return this._get(`/llm/admin/llm/migrations/${migrationId}`);
}
revertAdminLlmMigration(
migrationId: string,
): Promise<RevertMigrationResponse> {
return this._request(
"POST",
`/llm/admin/llm/migrations/${migrationId}/revert`,
);
}
// Creator management
listAdminLlmCreators(): Promise<LlmCreatorsResponse> {
return this._get("/llm/admin/llm/creators");
}
getAdminLlmCreator(creatorId: string): Promise<LlmModelCreator> {
return this._get(`/llm/admin/llm/creators/${creatorId}`);
}
createAdminLlmCreator(
payload: UpsertLlmCreatorRequest,
): Promise<LlmModelCreator> {
return this._request("POST", "/llm/admin/llm/creators", payload);
}
updateAdminLlmCreator(
creatorId: string,
payload: UpsertLlmCreatorRequest,
): Promise<LlmModelCreator> {
return this._request(
"PATCH",
`/llm/admin/llm/creators/${creatorId}`,
payload,
);
}
deleteAdminLlmCreator(
creatorId: string,
): Promise<{ success: boolean; message: string }> {
return this._request("DELETE", `/llm/admin/llm/creators/${creatorId}`);
}
// API Key related requests
async createAPIKey(
name: string,

View File

@@ -268,171 +268,6 @@ type BlockIOCombinedTypeSubSchema = BlockIOSubSchemaMeta &
| BlockIODiscriminatedOneOfSubSchema
);
////////////////////////////////////////
///////////// LLM REGISTRY /////////////
////////////////////////////////////////
export type LlmCostUnit = "RUN" | "TOKENS";
export type LlmModelCostInput = {
unit?: LlmCostUnit;
credit_cost: number;
credential_provider: string;
credential_id?: string | null;
credential_type?: string | null;
currency?: string | null;
metadata?: Record<string, any>;
};
export type LlmModelCost = LlmModelCostInput & {
id: string;
};
// Creator represents the organization that created/trained the model (e.g., OpenAI, Meta)
// This is distinct from Provider who hosts/serves the model (e.g., OpenRouter)
export type LlmModelCreator = {
id: string;
name: string;
display_name: string;
description?: string | null;
website_url?: string | null;
logo_url?: string | null;
metadata: Record<string, any>;
};
export type LlmModel = {
id: string;
slug: string;
display_name: string;
description?: string | null;
provider_id: string;
creator_id?: string | null;
creator?: LlmModelCreator | null;
context_window: number;
max_output_tokens?: number | null;
is_enabled: boolean;
capabilities: Record<string, any>;
metadata: Record<string, any>;
costs: LlmModelCost[];
};
export type LlmProvider = {
id: string;
name: string;
display_name: string;
description?: string | null;
default_credential_provider?: string | null;
default_credential_id?: string | null;
default_credential_type?: string | null;
supports_tools: boolean;
supports_json_output: boolean;
supports_reasoning: boolean;
supports_parallel_tool: boolean;
metadata: Record<string, any>;
models?: LlmModel[];
};
export type LlmProvidersResponse = {
providers: LlmProvider[];
};
export type LlmModelsResponse = {
models: LlmModel[];
};
export type LlmCreatorsResponse = {
creators: LlmModelCreator[];
};
export type UpsertLlmCreatorRequest = {
name: string;
display_name: string;
description?: string | null;
website_url?: string | null;
logo_url?: string | null;
metadata?: Record<string, any>;
};
export type UpsertLlmProviderRequest = {
name: string;
display_name: string;
description?: string | null;
default_credential_provider?: string | null;
default_credential_id?: string | null;
default_credential_type?: string | null;
supports_tools?: boolean;
supports_json_output?: boolean;
supports_reasoning?: boolean;
supports_parallel_tool?: boolean;
metadata?: Record<string, any>;
};
export type CreateLlmModelRequest = {
slug: string;
display_name: string;
description?: string | null;
provider_id: string;
creator_id?: string | null;
context_window: number;
max_output_tokens?: number | null;
is_enabled?: boolean;
capabilities?: Record<string, any>;
metadata?: Record<string, any>;
costs: LlmModelCostInput[];
};
export type UpdateLlmModelRequest = {
display_name?: string;
description?: string | null;
provider_id?: string;
creator_id?: string | null;
context_window?: number;
max_output_tokens?: number | null;
is_enabled?: boolean;
capabilities?: Record<string, any>;
metadata?: Record<string, any>;
costs?: LlmModelCostInput[];
};
export type ToggleLlmModelRequest = {
is_enabled: boolean;
migrate_to_slug?: string;
migration_reason?: string;
custom_credit_cost?: number;
};
export type ToggleLlmModelResponse = {
model: LlmModel;
nodes_migrated: number;
migrated_to_slug?: string | null;
migration_id?: string | null;
};
// Migration tracking types
export type LlmModelMigration = {
id: string;
source_model_slug: string;
target_model_slug: string;
reason?: string | null;
node_count: number;
custom_credit_cost?: number | null;
is_reverted: boolean;
created_at: string;
reverted_at?: string | null;
};
export type LlmMigrationsResponse = {
migrations: LlmModelMigration[];
};
export type RevertMigrationResponse = {
migration_id: string;
source_model_slug: string;
target_model_slug: string;
nodes_reverted: number;
message: string;
};
export type BlockIOOneOfSubSchema = {
oneOf: BlockIOSimpleTypeSubSchema[];
default?: string | number | boolean | null;