mirror of
https://github.com/simstudioai/sim.git
synced 2026-01-25 23:08:11 -05:00
Compare commits
1 Commits
fix/copilo
...
fix/claude
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dbe53f406b |
@@ -513,6 +513,12 @@ Return ONLY the JSON array.`,
|
||||
})(),
|
||||
}),
|
||||
},
|
||||
{
|
||||
id: 'maxTokens',
|
||||
title: 'Max Output Tokens',
|
||||
type: 'short-input',
|
||||
placeholder: 'Enter max tokens (e.g., 4096)...',
|
||||
},
|
||||
{
|
||||
id: 'responseFormat',
|
||||
title: 'Response Format',
|
||||
@@ -754,6 +760,7 @@ Example 3 (Array Input):
|
||||
},
|
||||
},
|
||||
temperature: { type: 'number', description: 'Response randomness level' },
|
||||
maxTokens: { type: 'number', description: 'Maximum number of tokens in the response' },
|
||||
reasoningEffort: { type: 'string', description: 'Reasoning effort level for GPT-5 models' },
|
||||
verbosity: { type: 'string', description: 'Verbosity level for GPT-5 models' },
|
||||
thinkingLevel: { type: 'string', description: 'Thinking level for Gemini 3 models' },
|
||||
|
||||
@@ -2508,10 +2508,6 @@ async function validateWorkflowSelectorIds(
|
||||
for (const subBlockConfig of blockConfig.subBlocks) {
|
||||
if (!SELECTOR_TYPES.has(subBlockConfig.type)) continue
|
||||
|
||||
// Skip oauth-input - credentials are pre-validated before edit application
|
||||
// This allows existing collaborator credentials to remain untouched
|
||||
if (subBlockConfig.type === 'oauth-input') continue
|
||||
|
||||
const subBlockValue = blockData.subBlocks?.[subBlockConfig.id]?.value
|
||||
if (!subBlockValue) continue
|
||||
|
||||
@@ -2577,150 +2573,6 @@ async function validateWorkflowSelectorIds(
|
||||
return errors
|
||||
}
|
||||
|
||||
/**
|
||||
* Pre-validates credential and apiKey inputs in operations before they are applied.
|
||||
* - Validates oauth-input (credential) IDs belong to the user
|
||||
* - Filters out apiKey inputs for hosted models when isHosted is true
|
||||
* Returns validation errors for any removed inputs.
|
||||
*/
|
||||
async function preValidateCredentialInputs(
|
||||
operations: EditWorkflowOperation[],
|
||||
context: { userId: string }
|
||||
): Promise<{ filteredOperations: EditWorkflowOperation[]; errors: ValidationError[] }> {
|
||||
const { isHosted } = await import('@/lib/core/config/feature-flags')
|
||||
const { getHostedModels } = await import('@/providers/utils')
|
||||
|
||||
const logger = createLogger('PreValidateCredentials')
|
||||
const errors: ValidationError[] = []
|
||||
|
||||
// Collect credential and apiKey inputs that need validation/filtering
|
||||
const credentialInputs: Array<{
|
||||
operationIndex: number
|
||||
blockId: string
|
||||
blockType: string
|
||||
fieldName: string
|
||||
value: string
|
||||
}> = []
|
||||
|
||||
const hostedApiKeyInputs: Array<{
|
||||
operationIndex: number
|
||||
blockId: string
|
||||
blockType: string
|
||||
model: string
|
||||
}> = []
|
||||
|
||||
const hostedModelsLower = isHosted
|
||||
? new Set(getHostedModels().map((m) => m.toLowerCase()))
|
||||
: null
|
||||
|
||||
operations.forEach((op, opIndex) => {
|
||||
if (!op.params?.inputs || !op.params?.type) return
|
||||
|
||||
const blockConfig = getBlock(op.params.type)
|
||||
if (!blockConfig) return
|
||||
|
||||
// Find oauth-input subblocks
|
||||
for (const subBlockConfig of blockConfig.subBlocks) {
|
||||
if (subBlockConfig.type !== 'oauth-input') continue
|
||||
|
||||
const inputValue = op.params.inputs[subBlockConfig.id]
|
||||
if (!inputValue || typeof inputValue !== 'string' || inputValue.trim() === '') continue
|
||||
|
||||
credentialInputs.push({
|
||||
operationIndex: opIndex,
|
||||
blockId: op.block_id,
|
||||
blockType: op.params.type,
|
||||
fieldName: subBlockConfig.id,
|
||||
value: inputValue,
|
||||
})
|
||||
}
|
||||
|
||||
// Check for apiKey inputs on hosted models
|
||||
if (hostedModelsLower && op.params.inputs.apiKey) {
|
||||
const modelValue = op.params.inputs.model
|
||||
if (modelValue && typeof modelValue === 'string') {
|
||||
if (hostedModelsLower.has(modelValue.toLowerCase())) {
|
||||
hostedApiKeyInputs.push({
|
||||
operationIndex: opIndex,
|
||||
blockId: op.block_id,
|
||||
blockType: op.params.type,
|
||||
model: modelValue,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
const hasCredentialsToValidate = credentialInputs.length > 0
|
||||
const hasHostedApiKeysToFilter = hostedApiKeyInputs.length > 0
|
||||
|
||||
if (!hasCredentialsToValidate && !hasHostedApiKeysToFilter) {
|
||||
return { filteredOperations: operations, errors }
|
||||
}
|
||||
|
||||
// Deep clone operations so we can modify them
|
||||
const filteredOperations = structuredClone(operations)
|
||||
|
||||
// Filter out apiKey inputs for hosted models
|
||||
if (hasHostedApiKeysToFilter) {
|
||||
logger.info('Filtering apiKey inputs for hosted models', { count: hostedApiKeyInputs.length })
|
||||
|
||||
for (const apiKeyInput of hostedApiKeyInputs) {
|
||||
const op = filteredOperations[apiKeyInput.operationIndex]
|
||||
if (op.params?.inputs?.apiKey) {
|
||||
delete op.params.inputs.apiKey
|
||||
logger.debug('Silently filtered apiKey for hosted model', {
|
||||
blockId: apiKeyInput.blockId,
|
||||
model: apiKeyInput.model,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate credential inputs
|
||||
if (hasCredentialsToValidate) {
|
||||
logger.info('Pre-validating credential inputs', {
|
||||
credentialCount: credentialInputs.length,
|
||||
userId: context.userId,
|
||||
})
|
||||
|
||||
const allCredentialIds = credentialInputs.map((c) => c.value)
|
||||
const validationResult = await validateSelectorIds('oauth-input', allCredentialIds, context)
|
||||
const invalidSet = new Set(validationResult.invalid)
|
||||
|
||||
if (invalidSet.size > 0) {
|
||||
for (const credInput of credentialInputs) {
|
||||
if (!invalidSet.has(credInput.value)) continue
|
||||
|
||||
const op = filteredOperations[credInput.operationIndex]
|
||||
if (op.params?.inputs?.[credInput.fieldName]) {
|
||||
delete op.params.inputs[credInput.fieldName]
|
||||
logger.info('Removed invalid credential from operation', {
|
||||
blockId: credInput.blockId,
|
||||
field: credInput.fieldName,
|
||||
invalidValue: credInput.value,
|
||||
})
|
||||
}
|
||||
|
||||
const warningInfo = validationResult.warning ? `. ${validationResult.warning}` : ''
|
||||
errors.push({
|
||||
blockId: credInput.blockId,
|
||||
blockType: credInput.blockType,
|
||||
field: credInput.fieldName,
|
||||
value: credInput.value,
|
||||
error: `Invalid credential ID "${credInput.value}" - credential does not exist or user doesn't have access${warningInfo}`,
|
||||
})
|
||||
}
|
||||
|
||||
logger.warn('Filtered out invalid credentials', {
|
||||
invalidCount: invalidSet.size,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return { filteredOperations, errors }
|
||||
}
|
||||
|
||||
async function getCurrentWorkflowStateFromDb(
|
||||
workflowId: string
|
||||
): Promise<{ workflowState: any; subBlockValues: Record<string, Record<string, any>> }> {
|
||||
@@ -2805,28 +2657,12 @@ export const editWorkflowServerTool: BaseServerTool<EditWorkflowParams, any> = {
|
||||
// Get permission config for the user
|
||||
const permissionConfig = context?.userId ? await getUserPermissionConfig(context.userId) : null
|
||||
|
||||
// Pre-validate credential and apiKey inputs before applying operations
|
||||
// This filters out invalid credentials and apiKeys for hosted models
|
||||
let operationsToApply = operations
|
||||
const credentialErrors: ValidationError[] = []
|
||||
if (context?.userId) {
|
||||
const { filteredOperations, errors: credErrors } = await preValidateCredentialInputs(
|
||||
operations,
|
||||
{ userId: context.userId }
|
||||
)
|
||||
operationsToApply = filteredOperations
|
||||
credentialErrors.push(...credErrors)
|
||||
}
|
||||
|
||||
// Apply operations directly to the workflow state
|
||||
const {
|
||||
state: modifiedWorkflowState,
|
||||
validationErrors,
|
||||
skippedItems,
|
||||
} = applyOperationsToWorkflowState(workflowState, operationsToApply, permissionConfig)
|
||||
|
||||
// Add credential validation errors
|
||||
validationErrors.push(...credentialErrors)
|
||||
} = applyOperationsToWorkflowState(workflowState, operations, permissionConfig)
|
||||
|
||||
// Get workspaceId for selector validation
|
||||
let workspaceId: string | undefined
|
||||
|
||||
@@ -9,6 +9,7 @@ import {
|
||||
generateToolUseId,
|
||||
} from '@/providers/anthropic/utils'
|
||||
import {
|
||||
getMaxOutputTokensForModel,
|
||||
getProviderDefaultModel,
|
||||
getProviderModels,
|
||||
supportsNativeStructuredOutputs,
|
||||
@@ -178,7 +179,9 @@ export const anthropicProvider: ProviderConfig = {
|
||||
model: request.model,
|
||||
messages,
|
||||
system: systemPrompt,
|
||||
max_tokens: Number.parseInt(String(request.maxTokens)) || 1024,
|
||||
max_tokens:
|
||||
Number.parseInt(String(request.maxTokens)) ||
|
||||
getMaxOutputTokensForModel(request.model, request.stream ?? false),
|
||||
temperature: Number.parseFloat(String(request.temperature ?? 0.7)),
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,11 @@ import {
|
||||
generateToolUseId,
|
||||
getBedrockInferenceProfileId,
|
||||
} from '@/providers/bedrock/utils'
|
||||
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
|
||||
import {
|
||||
getMaxOutputTokensForModel,
|
||||
getProviderDefaultModel,
|
||||
getProviderModels,
|
||||
} from '@/providers/models'
|
||||
import type {
|
||||
ProviderConfig,
|
||||
ProviderRequest,
|
||||
@@ -259,7 +263,9 @@ export const bedrockProvider: ProviderConfig = {
|
||||
|
||||
const inferenceConfig = {
|
||||
temperature: Number.parseFloat(String(request.temperature ?? 0.7)),
|
||||
maxTokens: Number.parseInt(String(request.maxTokens)) || 4096,
|
||||
maxTokens:
|
||||
Number.parseInt(String(request.maxTokens)) ||
|
||||
getMaxOutputTokensForModel(request.model, request.stream ?? false),
|
||||
}
|
||||
|
||||
const shouldStreamToolCalls = request.streamToolCalls ?? false
|
||||
|
||||
@@ -34,6 +34,12 @@ export interface ModelCapabilities {
|
||||
toolUsageControl?: boolean
|
||||
computerUse?: boolean
|
||||
nativeStructuredOutputs?: boolean
|
||||
maxOutputTokens?: {
|
||||
/** Maximum tokens for streaming requests */
|
||||
max: number
|
||||
/** Safe default for non-streaming requests (to avoid timeout issues) */
|
||||
default: number
|
||||
}
|
||||
reasoningEffort?: {
|
||||
values: string[]
|
||||
}
|
||||
@@ -613,6 +619,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
capabilities: {
|
||||
temperature: { min: 0, max: 1 },
|
||||
nativeStructuredOutputs: true,
|
||||
maxOutputTokens: { max: 64000, default: 4096 },
|
||||
},
|
||||
contextWindow: 200000,
|
||||
},
|
||||
@@ -627,6 +634,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
capabilities: {
|
||||
temperature: { min: 0, max: 1 },
|
||||
nativeStructuredOutputs: true,
|
||||
maxOutputTokens: { max: 64000, default: 4096 },
|
||||
},
|
||||
contextWindow: 200000,
|
||||
},
|
||||
@@ -640,6 +648,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
},
|
||||
capabilities: {
|
||||
temperature: { min: 0, max: 1 },
|
||||
maxOutputTokens: { max: 64000, default: 4096 },
|
||||
},
|
||||
contextWindow: 200000,
|
||||
},
|
||||
@@ -654,6 +663,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
capabilities: {
|
||||
temperature: { min: 0, max: 1 },
|
||||
nativeStructuredOutputs: true,
|
||||
maxOutputTokens: { max: 64000, default: 4096 },
|
||||
},
|
||||
contextWindow: 200000,
|
||||
},
|
||||
@@ -668,6 +678,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
capabilities: {
|
||||
temperature: { min: 0, max: 1 },
|
||||
nativeStructuredOutputs: true,
|
||||
maxOutputTokens: { max: 64000, default: 4096 },
|
||||
},
|
||||
contextWindow: 200000,
|
||||
},
|
||||
@@ -681,6 +692,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
},
|
||||
capabilities: {
|
||||
temperature: { min: 0, max: 1 },
|
||||
maxOutputTokens: { max: 64000, default: 4096 },
|
||||
},
|
||||
contextWindow: 200000,
|
||||
},
|
||||
@@ -695,6 +707,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
capabilities: {
|
||||
temperature: { min: 0, max: 1 },
|
||||
computerUse: true,
|
||||
maxOutputTokens: { max: 8192, default: 8192 },
|
||||
},
|
||||
contextWindow: 200000,
|
||||
},
|
||||
@@ -709,6 +722,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
capabilities: {
|
||||
temperature: { min: 0, max: 1 },
|
||||
computerUse: true,
|
||||
maxOutputTokens: { max: 8192, default: 8192 },
|
||||
},
|
||||
contextWindow: 200000,
|
||||
},
|
||||
@@ -1655,6 +1669,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
capabilities: {
|
||||
temperature: { min: 0, max: 1 },
|
||||
nativeStructuredOutputs: true,
|
||||
maxOutputTokens: { max: 64000, default: 4096 },
|
||||
},
|
||||
contextWindow: 200000,
|
||||
},
|
||||
@@ -1668,6 +1683,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
capabilities: {
|
||||
temperature: { min: 0, max: 1 },
|
||||
nativeStructuredOutputs: true,
|
||||
maxOutputTokens: { max: 64000, default: 4096 },
|
||||
},
|
||||
contextWindow: 200000,
|
||||
},
|
||||
@@ -1681,6 +1697,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
capabilities: {
|
||||
temperature: { min: 0, max: 1 },
|
||||
nativeStructuredOutputs: true,
|
||||
maxOutputTokens: { max: 64000, default: 4096 },
|
||||
},
|
||||
contextWindow: 200000,
|
||||
},
|
||||
@@ -1694,6 +1711,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
capabilities: {
|
||||
temperature: { min: 0, max: 1 },
|
||||
nativeStructuredOutputs: true,
|
||||
maxOutputTokens: { max: 64000, default: 4096 },
|
||||
},
|
||||
contextWindow: 200000,
|
||||
},
|
||||
@@ -2333,3 +2351,31 @@ export function getThinkingLevelsForModel(modelId: string): string[] | null {
|
||||
const capability = getThinkingCapability(modelId)
|
||||
return capability?.levels ?? null
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the max output tokens for a specific model
|
||||
* Returns the model's max capacity for streaming requests,
|
||||
* or the model's safe default for non-streaming requests to avoid timeout issues.
|
||||
*
|
||||
* @param modelId - The model ID
|
||||
* @param streaming - Whether the request is streaming (default: false)
|
||||
*/
|
||||
export function getMaxOutputTokensForModel(modelId: string, streaming = false): number {
|
||||
const normalizedModelId = modelId.toLowerCase()
|
||||
const STANDARD_MAX_OUTPUT_TOKENS = 4096
|
||||
|
||||
for (const provider of Object.values(PROVIDER_DEFINITIONS)) {
|
||||
for (const model of provider.models) {
|
||||
const baseModelId = model.id.toLowerCase()
|
||||
if (normalizedModelId === baseModelId || normalizedModelId.startsWith(`${baseModelId}-`)) {
|
||||
const outputTokens = model.capabilities.maxOutputTokens
|
||||
if (outputTokens) {
|
||||
return streaming ? outputTokens.max : outputTokens.default
|
||||
}
|
||||
return STANDARD_MAX_OUTPUT_TOKENS
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return STANDARD_MAX_OUTPUT_TOKENS
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
getComputerUseModels,
|
||||
getEmbeddingModelPricing,
|
||||
getHostedModels as getHostedModelsFromDefinitions,
|
||||
getMaxOutputTokensForModel as getMaxOutputTokensForModelFromDefinitions,
|
||||
getMaxTemperature as getMaxTempFromDefinitions,
|
||||
getModelPricing as getModelPricingFromDefinitions,
|
||||
getModelsWithReasoningEffort,
|
||||
@@ -992,6 +993,18 @@ export function getThinkingLevelsForModel(model: string): string[] | null {
|
||||
return getThinkingLevelsForModelFromDefinitions(model)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get max output tokens for a specific model
|
||||
* Returns the model's maxOutputTokens capability for streaming requests,
|
||||
* or a conservative default (8192) for non-streaming requests to avoid timeout issues.
|
||||
*
|
||||
* @param model - The model ID
|
||||
* @param streaming - Whether the request is streaming (default: false)
|
||||
*/
|
||||
export function getMaxOutputTokensForModel(model: string, streaming = false): number {
|
||||
return getMaxOutputTokensForModelFromDefinitions(model, streaming)
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare tool execution parameters, separating tool parameters from system parameters
|
||||
*/
|
||||
|
||||
@@ -5,7 +5,7 @@ import type { ToolConfig, ToolResponse } from '@/tools/types'
|
||||
const logger = createLogger('BrowserUseTool')
|
||||
|
||||
const POLL_INTERVAL_MS = 5000
|
||||
const MAX_POLL_TIME_MS = 180000
|
||||
const MAX_POLL_TIME_MS = 600000 // 10 minutes
|
||||
const MAX_CONSECUTIVE_ERRORS = 3
|
||||
|
||||
async function createSessionWithProfile(
|
||||
|
||||
Reference in New Issue
Block a user