mirror of
https://github.com/simstudioai/sim.git
synced 2026-04-06 03:00:16 -04:00
* improvement(processing): reduce redundant DB queries in execution preprocessing * improvement(processing): add defensive ID check for prefetched workflow record * improvement(processing): fix type safety in execution error logging Replace `as any` cast in non-SSE error path with proper `buildTraceSpans()` transformation, matching the SSE error path. Remove redundant `as any` cast in preprocessing.ts where the types already align. * improvement(processing): replace `as any` casts with proper types in logging - logger.ts: cast JSONB cost column to `WorkflowExecutionLog['cost']` instead of `any` in both `completeWorkflowExecution` and `getWorkflowExecution` - logger.ts: replace `(orgUsageBefore as any)?.toString?.()` with `String()` since COALESCE guarantees a non-null SQL aggregate value - logging-session.ts: cast JSONB cost to `AccumulatedCost` (the local interface) instead of `any` in `loadExistingCost` * improvement(processing): use exported HighestPrioritySubscription type in usage.ts Replace inline `Awaited<ReturnType<typeof getHighestPrioritySubscription>>` with the already-exported `HighestPrioritySubscription` type alias. * improvement(processing): replace remaining `as any` casts with proper types - preprocessing.ts: use exported `HighestPrioritySubscription` type instead of redeclaring via `Awaited<ReturnType<...>>` - deploy/route.ts, status/route.ts: cast `hasWorkflowChanged` args to `WorkflowState` instead of `any` (JSONB + object literal narrowing) - state/route.ts: type block sanitization and save with `BlockState` and `WorkflowState` instead of `any` - search-suggestions.ts: remove 8 unnecessary `as any` casts on `'date'` literal that already satisfies the `Suggestion['category']` union * fix(processing): prevent double-billing race in LoggingSession completion When executeWorkflowCore throws, its catch block fire-and-forgets safeCompleteWithError, then re-throws. The caller's catch block also fire-and-forgets safeCompleteWithError on the same LoggingSession. Both check this.completed (still false) before either's async DB write resolves, so both proceed to completeWorkflowExecution which uses additive SQL for billing — doubling the charged cost on every failed execution. Fix: add a synchronous `completing` flag set immediately before the async work begins. This blocks concurrent callers at the guard check. On failure, the flag is reset so the safe* fallback path (completeWithCostOnlyLog) can still attempt recovery. * fix(processing): unblock error responses and isolate run-count failures Remove unnecessary `await waitForCompletion()` from non-SSE and SSE error paths where no `markAsFailed()` follows — these were blocking error responses on log persistence for no reason. Wrap `updateWorkflowRunCounts` in its own try/catch so a run-count DB failure cannot prevent session completion, billing, and trace span persistence. * improvement(processing): remove dead setupExecutor method The method body was just a debug log with an `any` parameter — logging now works entirely through trace spans with no executor integration. * remove logger.debug * fix(processing): guard completionPromise as write-once (singleton promise) Prevent concurrent safeComplete* calls from overwriting completionPromise with a no-op. The guard now lives at the assignment site — if a completion is already in-flight, return its promise instead of starting a new one. This ensures waitForCompletion() always awaits the real work. * improvement(processing): remove empty else/catch blocks left by debug log cleanup * fix(processing): enforce waitForCompletion inside markAsFailed to prevent completion races Move waitForCompletion() into markAsFailed() so every call site is automatically safe against in-flight fire-and-forget completions. Remove the now-redundant external waitForCompletion() calls in route.ts. * fix(processing): reset completing flag on fallback failure, clean up empty catch - completeWithCostOnlyLog now resets this.completing = false when the fallback itself fails, preventing a permanently stuck session - Use _disconnectError in MCP test-connection to signal intentional ignore * fix(processing): restore disconnect error logging in MCP test-connection Revert unrelated debug log removal — this file isn't part of the processing improvements and the log aids connection leak detection. * fix(processing): address audit findings across branch - preprocessing.ts: use undefined (not null) for failed subscription fetch so getUserUsageLimit does a fresh lookup instead of silently falling back to free-tier limits - deployed/route.ts: log warning on loadDeployedWorkflowState failure instead of silently swallowing the error - schedule-execution.ts: remove dead successLog parameter and all call-site arguments left over from logger.debug cleanup - mcp/middleware.ts: drop unused error binding in empty catch - audit/log.ts, wand.ts: promote logger.debug to logger.warn in catch blocks where these are the only failure signal * revert: undo unnecessary subscription null→undefined change getHighestPrioritySubscription never throws (it catches internally and returns null), so the catch block in preprocessExecution is dead code. The null vs undefined distinction doesn't matter and the coercions added unnecessary complexity. * improvement(processing): remove dead try/catch around getHighestPrioritySubscription getHighestPrioritySubscription catches internally and returns null on error, so the wrapping try/catch was unreachable dead code. * improvement(processing): remove dead getSnapshotByHash method No longer called after createSnapshotWithDeduplication was refactored to use a single upsert instead of select-then-insert. ---------
173 lines
5.0 KiB
TypeScript
173 lines
5.0 KiB
TypeScript
import { db } from '@sim/db'
|
|
import { userStats } from '@sim/db/schema'
|
|
import { createLogger } from '@sim/logger'
|
|
import { eq, sql } from 'drizzle-orm'
|
|
import { type NextRequest, NextResponse } from 'next/server'
|
|
import { z } from 'zod'
|
|
import { logModelUsage } from '@/lib/billing/core/usage-log'
|
|
import { checkAndBillOverageThreshold } from '@/lib/billing/threshold-billing'
|
|
import { checkInternalApiKey } from '@/lib/copilot/utils'
|
|
import { isBillingEnabled } from '@/lib/core/config/feature-flags'
|
|
import { generateRequestId } from '@/lib/core/utils/request'
|
|
|
|
const logger = createLogger('BillingUpdateCostAPI')
|
|
|
|
const UpdateCostSchema = z.object({
|
|
userId: z.string().min(1, 'User ID is required'),
|
|
cost: z.number().min(0, 'Cost must be a non-negative number'),
|
|
model: z.string().min(1, 'Model is required'),
|
|
inputTokens: z.number().min(0).default(0),
|
|
outputTokens: z.number().min(0).default(0),
|
|
source: z.enum(['copilot', 'mcp_copilot']).default('copilot'),
|
|
})
|
|
|
|
/**
|
|
* POST /api/billing/update-cost
|
|
* Update user cost with a pre-calculated cost value (internal API key auth required)
|
|
*/
|
|
export async function POST(req: NextRequest) {
|
|
const requestId = generateRequestId()
|
|
const startTime = Date.now()
|
|
|
|
try {
|
|
logger.info(`[${requestId}] Update cost request started`)
|
|
|
|
if (!isBillingEnabled) {
|
|
return NextResponse.json({
|
|
success: true,
|
|
message: 'Billing disabled, cost update skipped',
|
|
data: {
|
|
billingEnabled: false,
|
|
processedAt: new Date().toISOString(),
|
|
requestId,
|
|
},
|
|
})
|
|
}
|
|
|
|
// Check authentication (internal API key)
|
|
const authResult = checkInternalApiKey(req)
|
|
if (!authResult.success) {
|
|
logger.warn(`[${requestId}] Authentication failed: ${authResult.error}`)
|
|
return NextResponse.json(
|
|
{
|
|
success: false,
|
|
error: authResult.error || 'Authentication failed',
|
|
},
|
|
{ status: 401 }
|
|
)
|
|
}
|
|
|
|
const body = await req.json()
|
|
const validation = UpdateCostSchema.safeParse(body)
|
|
|
|
if (!validation.success) {
|
|
logger.warn(`[${requestId}] Invalid request body`, {
|
|
errors: validation.error.issues,
|
|
body,
|
|
})
|
|
return NextResponse.json(
|
|
{
|
|
success: false,
|
|
error: 'Invalid request body',
|
|
details: validation.error.issues,
|
|
},
|
|
{ status: 400 }
|
|
)
|
|
}
|
|
|
|
const { userId, cost, model, inputTokens, outputTokens, source } = validation.data
|
|
const isMcp = source === 'mcp_copilot'
|
|
|
|
logger.info(`[${requestId}] Processing cost update`, {
|
|
userId,
|
|
cost,
|
|
model,
|
|
source,
|
|
})
|
|
|
|
// Check if user stats record exists (same as ExecutionLogger)
|
|
const userStatsRecords = await db.select().from(userStats).where(eq(userStats.userId, userId))
|
|
|
|
if (userStatsRecords.length === 0) {
|
|
logger.error(
|
|
`[${requestId}] User stats record not found - should be created during onboarding`,
|
|
{
|
|
userId,
|
|
}
|
|
)
|
|
return NextResponse.json({ error: 'User stats record not found' }, { status: 500 })
|
|
}
|
|
|
|
const updateFields: Record<string, unknown> = {
|
|
totalCost: sql`total_cost + ${cost}`,
|
|
currentPeriodCost: sql`current_period_cost + ${cost}`,
|
|
totalCopilotCost: sql`total_copilot_cost + ${cost}`,
|
|
currentPeriodCopilotCost: sql`current_period_copilot_cost + ${cost}`,
|
|
totalCopilotCalls: sql`total_copilot_calls + 1`,
|
|
lastActive: new Date(),
|
|
}
|
|
|
|
// Also increment MCP-specific counters when source is mcp_copilot
|
|
if (isMcp) {
|
|
updateFields.totalMcpCopilotCost = sql`total_mcp_copilot_cost + ${cost}`
|
|
updateFields.currentPeriodMcpCopilotCost = sql`current_period_mcp_copilot_cost + ${cost}`
|
|
}
|
|
|
|
await db.update(userStats).set(updateFields).where(eq(userStats.userId, userId))
|
|
|
|
logger.info(`[${requestId}] Updated user stats record`, {
|
|
userId,
|
|
addedCost: cost,
|
|
source,
|
|
})
|
|
|
|
// Log usage for complete audit trail
|
|
await logModelUsage({
|
|
userId,
|
|
source: isMcp ? 'mcp_copilot' : 'copilot',
|
|
model,
|
|
inputTokens,
|
|
outputTokens,
|
|
cost,
|
|
})
|
|
|
|
// Check if user has hit overage threshold and bill incrementally
|
|
await checkAndBillOverageThreshold(userId)
|
|
|
|
const duration = Date.now() - startTime
|
|
|
|
logger.info(`[${requestId}] Cost update completed successfully`, {
|
|
userId,
|
|
duration,
|
|
cost,
|
|
})
|
|
|
|
return NextResponse.json({
|
|
success: true,
|
|
data: {
|
|
userId,
|
|
cost,
|
|
processedAt: new Date().toISOString(),
|
|
requestId,
|
|
},
|
|
})
|
|
} catch (error) {
|
|
const duration = Date.now() - startTime
|
|
|
|
logger.error(`[${requestId}] Cost update failed`, {
|
|
error: error instanceof Error ? error.message : String(error),
|
|
stack: error instanceof Error ? error.stack : undefined,
|
|
duration,
|
|
})
|
|
|
|
return NextResponse.json(
|
|
{
|
|
success: false,
|
|
error: 'Internal server error',
|
|
requestId,
|
|
},
|
|
{ status: 500 }
|
|
)
|
|
}
|
|
}
|