Compare commits

..

4 Commits

Author SHA1 Message Date
Siddharth Ganesan
62b06d00de Fix comments 2026-01-25 14:33:27 -08:00
Siddharth Ganesan
2a630859fb Fix validation 2026-01-25 14:31:12 -08:00
Siddharth Ganesan
3533bd009d Fix greptile 2026-01-25 13:27:51 -08:00
Siddharth Ganesan
43402fde1c Fix 2026-01-25 13:15:07 -08:00
7 changed files with 169 additions and 80 deletions

View File

@@ -513,12 +513,6 @@ Return ONLY the JSON array.`,
})(),
}),
},
{
id: 'maxTokens',
title: 'Max Output Tokens',
type: 'short-input',
placeholder: 'Enter max tokens (e.g., 4096)...',
},
{
id: 'responseFormat',
title: 'Response Format',
@@ -760,7 +754,6 @@ Example 3 (Array Input):
},
},
temperature: { type: 'number', description: 'Response randomness level' },
maxTokens: { type: 'number', description: 'Maximum number of tokens in the response' },
reasoningEffort: { type: 'string', description: 'Reasoning effort level for GPT-5 models' },
verbosity: { type: 'string', description: 'Verbosity level for GPT-5 models' },
thinkingLevel: { type: 'string', description: 'Thinking level for Gemini 3 models' },

View File

@@ -2508,6 +2508,10 @@ async function validateWorkflowSelectorIds(
for (const subBlockConfig of blockConfig.subBlocks) {
if (!SELECTOR_TYPES.has(subBlockConfig.type)) continue
// Skip oauth-input - credentials are pre-validated before edit application
// This allows existing collaborator credentials to remain untouched
if (subBlockConfig.type === 'oauth-input') continue
const subBlockValue = blockData.subBlocks?.[subBlockConfig.id]?.value
if (!subBlockValue) continue
@@ -2573,6 +2577,150 @@ async function validateWorkflowSelectorIds(
return errors
}
/**
* Pre-validates credential and apiKey inputs in operations before they are applied.
* - Validates oauth-input (credential) IDs belong to the user
* - Filters out apiKey inputs for hosted models when isHosted is true
* Returns validation errors for any removed inputs.
*/
async function preValidateCredentialInputs(
operations: EditWorkflowOperation[],
context: { userId: string }
): Promise<{ filteredOperations: EditWorkflowOperation[]; errors: ValidationError[] }> {
const { isHosted } = await import('@/lib/core/config/feature-flags')
const { getHostedModels } = await import('@/providers/utils')
const logger = createLogger('PreValidateCredentials')
const errors: ValidationError[] = []
// Collect credential and apiKey inputs that need validation/filtering
const credentialInputs: Array<{
operationIndex: number
blockId: string
blockType: string
fieldName: string
value: string
}> = []
const hostedApiKeyInputs: Array<{
operationIndex: number
blockId: string
blockType: string
model: string
}> = []
const hostedModelsLower = isHosted
? new Set(getHostedModels().map((m) => m.toLowerCase()))
: null
operations.forEach((op, opIndex) => {
if (!op.params?.inputs || !op.params?.type) return
const blockConfig = getBlock(op.params.type)
if (!blockConfig) return
// Find oauth-input subblocks
for (const subBlockConfig of blockConfig.subBlocks) {
if (subBlockConfig.type !== 'oauth-input') continue
const inputValue = op.params.inputs[subBlockConfig.id]
if (!inputValue || typeof inputValue !== 'string' || inputValue.trim() === '') continue
credentialInputs.push({
operationIndex: opIndex,
blockId: op.block_id,
blockType: op.params.type,
fieldName: subBlockConfig.id,
value: inputValue,
})
}
// Check for apiKey inputs on hosted models
if (hostedModelsLower && op.params.inputs.apiKey) {
const modelValue = op.params.inputs.model
if (modelValue && typeof modelValue === 'string') {
if (hostedModelsLower.has(modelValue.toLowerCase())) {
hostedApiKeyInputs.push({
operationIndex: opIndex,
blockId: op.block_id,
blockType: op.params.type,
model: modelValue,
})
}
}
}
})
const hasCredentialsToValidate = credentialInputs.length > 0
const hasHostedApiKeysToFilter = hostedApiKeyInputs.length > 0
if (!hasCredentialsToValidate && !hasHostedApiKeysToFilter) {
return { filteredOperations: operations, errors }
}
// Deep clone operations so we can modify them
const filteredOperations = structuredClone(operations)
// Filter out apiKey inputs for hosted models
if (hasHostedApiKeysToFilter) {
logger.info('Filtering apiKey inputs for hosted models', { count: hostedApiKeyInputs.length })
for (const apiKeyInput of hostedApiKeyInputs) {
const op = filteredOperations[apiKeyInput.operationIndex]
if (op.params?.inputs?.apiKey) {
delete op.params.inputs.apiKey
logger.debug('Silently filtered apiKey for hosted model', {
blockId: apiKeyInput.blockId,
model: apiKeyInput.model,
})
}
}
}
// Validate credential inputs
if (hasCredentialsToValidate) {
logger.info('Pre-validating credential inputs', {
credentialCount: credentialInputs.length,
userId: context.userId,
})
const allCredentialIds = credentialInputs.map((c) => c.value)
const validationResult = await validateSelectorIds('oauth-input', allCredentialIds, context)
const invalidSet = new Set(validationResult.invalid)
if (invalidSet.size > 0) {
for (const credInput of credentialInputs) {
if (!invalidSet.has(credInput.value)) continue
const op = filteredOperations[credInput.operationIndex]
if (op.params?.inputs?.[credInput.fieldName]) {
delete op.params.inputs[credInput.fieldName]
logger.info('Removed invalid credential from operation', {
blockId: credInput.blockId,
field: credInput.fieldName,
invalidValue: credInput.value,
})
}
const warningInfo = validationResult.warning ? `. ${validationResult.warning}` : ''
errors.push({
blockId: credInput.blockId,
blockType: credInput.blockType,
field: credInput.fieldName,
value: credInput.value,
error: `Invalid credential ID "${credInput.value}" - credential does not exist or user doesn't have access${warningInfo}`,
})
}
logger.warn('Filtered out invalid credentials', {
invalidCount: invalidSet.size,
})
}
}
return { filteredOperations, errors }
}
async function getCurrentWorkflowStateFromDb(
workflowId: string
): Promise<{ workflowState: any; subBlockValues: Record<string, Record<string, any>> }> {
@@ -2657,12 +2805,28 @@ export const editWorkflowServerTool: BaseServerTool<EditWorkflowParams, any> = {
// Get permission config for the user
const permissionConfig = context?.userId ? await getUserPermissionConfig(context.userId) : null
// Pre-validate credential and apiKey inputs before applying operations
// This filters out invalid credentials and apiKeys for hosted models
let operationsToApply = operations
const credentialErrors: ValidationError[] = []
if (context?.userId) {
const { filteredOperations, errors: credErrors } = await preValidateCredentialInputs(
operations,
{ userId: context.userId }
)
operationsToApply = filteredOperations
credentialErrors.push(...credErrors)
}
// Apply operations directly to the workflow state
const {
state: modifiedWorkflowState,
validationErrors,
skippedItems,
} = applyOperationsToWorkflowState(workflowState, operations, permissionConfig)
} = applyOperationsToWorkflowState(workflowState, operationsToApply, permissionConfig)
// Add credential validation errors
validationErrors.push(...credentialErrors)
// Get workspaceId for selector validation
let workspaceId: string | undefined

View File

@@ -9,7 +9,6 @@ import {
generateToolUseId,
} from '@/providers/anthropic/utils'
import {
getMaxOutputTokensForModel,
getProviderDefaultModel,
getProviderModels,
supportsNativeStructuredOutputs,
@@ -179,9 +178,7 @@ export const anthropicProvider: ProviderConfig = {
model: request.model,
messages,
system: systemPrompt,
max_tokens:
Number.parseInt(String(request.maxTokens)) ||
getMaxOutputTokensForModel(request.model, request.stream ?? false),
max_tokens: Number.parseInt(String(request.maxTokens)) || 1024,
temperature: Number.parseFloat(String(request.temperature ?? 0.7)),
}

View File

@@ -20,11 +20,7 @@ import {
generateToolUseId,
getBedrockInferenceProfileId,
} from '@/providers/bedrock/utils'
import {
getMaxOutputTokensForModel,
getProviderDefaultModel,
getProviderModels,
} from '@/providers/models'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
import type {
ProviderConfig,
ProviderRequest,
@@ -263,9 +259,7 @@ export const bedrockProvider: ProviderConfig = {
const inferenceConfig = {
temperature: Number.parseFloat(String(request.temperature ?? 0.7)),
maxTokens:
Number.parseInt(String(request.maxTokens)) ||
getMaxOutputTokensForModel(request.model, request.stream ?? false),
maxTokens: Number.parseInt(String(request.maxTokens)) || 4096,
}
const shouldStreamToolCalls = request.streamToolCalls ?? false

View File

@@ -34,12 +34,6 @@ export interface ModelCapabilities {
toolUsageControl?: boolean
computerUse?: boolean
nativeStructuredOutputs?: boolean
maxOutputTokens?: {
/** Maximum tokens for streaming requests */
max: number
/** Safe default for non-streaming requests (to avoid timeout issues) */
default: number
}
reasoningEffort?: {
values: string[]
}
@@ -619,7 +613,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
capabilities: {
temperature: { min: 0, max: 1 },
nativeStructuredOutputs: true,
maxOutputTokens: { max: 64000, default: 4096 },
},
contextWindow: 200000,
},
@@ -634,7 +627,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
capabilities: {
temperature: { min: 0, max: 1 },
nativeStructuredOutputs: true,
maxOutputTokens: { max: 64000, default: 4096 },
},
contextWindow: 200000,
},
@@ -648,7 +640,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
},
capabilities: {
temperature: { min: 0, max: 1 },
maxOutputTokens: { max: 64000, default: 4096 },
},
contextWindow: 200000,
},
@@ -663,7 +654,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
capabilities: {
temperature: { min: 0, max: 1 },
nativeStructuredOutputs: true,
maxOutputTokens: { max: 64000, default: 4096 },
},
contextWindow: 200000,
},
@@ -678,7 +668,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
capabilities: {
temperature: { min: 0, max: 1 },
nativeStructuredOutputs: true,
maxOutputTokens: { max: 64000, default: 4096 },
},
contextWindow: 200000,
},
@@ -692,7 +681,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
},
capabilities: {
temperature: { min: 0, max: 1 },
maxOutputTokens: { max: 64000, default: 4096 },
},
contextWindow: 200000,
},
@@ -707,7 +695,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
capabilities: {
temperature: { min: 0, max: 1 },
computerUse: true,
maxOutputTokens: { max: 8192, default: 8192 },
},
contextWindow: 200000,
},
@@ -722,7 +709,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
capabilities: {
temperature: { min: 0, max: 1 },
computerUse: true,
maxOutputTokens: { max: 8192, default: 8192 },
},
contextWindow: 200000,
},
@@ -1669,7 +1655,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
capabilities: {
temperature: { min: 0, max: 1 },
nativeStructuredOutputs: true,
maxOutputTokens: { max: 64000, default: 4096 },
},
contextWindow: 200000,
},
@@ -1683,7 +1668,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
capabilities: {
temperature: { min: 0, max: 1 },
nativeStructuredOutputs: true,
maxOutputTokens: { max: 64000, default: 4096 },
},
contextWindow: 200000,
},
@@ -1697,7 +1681,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
capabilities: {
temperature: { min: 0, max: 1 },
nativeStructuredOutputs: true,
maxOutputTokens: { max: 64000, default: 4096 },
},
contextWindow: 200000,
},
@@ -1711,7 +1694,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
capabilities: {
temperature: { min: 0, max: 1 },
nativeStructuredOutputs: true,
maxOutputTokens: { max: 64000, default: 4096 },
},
contextWindow: 200000,
},
@@ -2351,31 +2333,3 @@ export function getThinkingLevelsForModel(modelId: string): string[] | null {
const capability = getThinkingCapability(modelId)
return capability?.levels ?? null
}
/**
* Get the max output tokens for a specific model
* Returns the model's max capacity for streaming requests,
* or the model's safe default for non-streaming requests to avoid timeout issues.
*
* @param modelId - The model ID
* @param streaming - Whether the request is streaming (default: false)
*/
export function getMaxOutputTokensForModel(modelId: string, streaming = false): number {
const normalizedModelId = modelId.toLowerCase()
const STANDARD_MAX_OUTPUT_TOKENS = 4096
for (const provider of Object.values(PROVIDER_DEFINITIONS)) {
for (const model of provider.models) {
const baseModelId = model.id.toLowerCase()
if (normalizedModelId === baseModelId || normalizedModelId.startsWith(`${baseModelId}-`)) {
const outputTokens = model.capabilities.maxOutputTokens
if (outputTokens) {
return streaming ? outputTokens.max : outputTokens.default
}
return STANDARD_MAX_OUTPUT_TOKENS
}
}
}
return STANDARD_MAX_OUTPUT_TOKENS
}

View File

@@ -8,7 +8,6 @@ import {
getComputerUseModels,
getEmbeddingModelPricing,
getHostedModels as getHostedModelsFromDefinitions,
getMaxOutputTokensForModel as getMaxOutputTokensForModelFromDefinitions,
getMaxTemperature as getMaxTempFromDefinitions,
getModelPricing as getModelPricingFromDefinitions,
getModelsWithReasoningEffort,
@@ -993,18 +992,6 @@ export function getThinkingLevelsForModel(model: string): string[] | null {
return getThinkingLevelsForModelFromDefinitions(model)
}
/**
* Get max output tokens for a specific model
* Returns the model's maxOutputTokens capability for streaming requests,
* or a conservative default (8192) for non-streaming requests to avoid timeout issues.
*
* @param model - The model ID
* @param streaming - Whether the request is streaming (default: false)
*/
export function getMaxOutputTokensForModel(model: string, streaming = false): number {
return getMaxOutputTokensForModelFromDefinitions(model, streaming)
}
/**
* Prepare tool execution parameters, separating tool parameters from system parameters
*/

View File

@@ -5,7 +5,7 @@ import type { ToolConfig, ToolResponse } from '@/tools/types'
const logger = createLogger('BrowserUseTool')
const POLL_INTERVAL_MS = 5000
const MAX_POLL_TIME_MS = 600000 // 10 minutes
const MAX_POLL_TIME_MS = 180000
const MAX_CONSECUTIVE_ERRORS = 3
async function createSessionWithProfile(