diff --git a/apps/sim/executor/execution/block-executor.ts b/apps/sim/executor/execution/block-executor.ts index f159e4db0..e73f57323 100644 --- a/apps/sim/executor/execution/block-executor.ts +++ b/apps/sim/executor/execution/block-executor.ts @@ -28,6 +28,7 @@ import type { } from '@/executor/types' import { streamingResponseFormatProcessor } from '@/executor/utils' import { buildBlockExecutionError, normalizeError } from '@/executor/utils/errors' +import { isJSONString } from '@/executor/utils/json' import { filterOutputForLog } from '@/executor/utils/output-filter' import { validateBlockType } from '@/executor/utils/permission-check' import type { VariableResolver } from '@/executor/variables/resolver' @@ -86,7 +87,7 @@ export class BlockExecutor { resolvedInputs = this.resolver.resolveInputs(ctx, node.id, block.config.params, block) if (blockLog) { - blockLog.input = resolvedInputs + blockLog.input = this.parseJsonInputs(resolvedInputs) } } catch (error) { cleanupSelfReference?.() @@ -157,7 +158,14 @@ export class BlockExecutor { const displayOutput = filterOutputForLog(block.metadata?.id || '', normalizedOutput, { block, }) - this.callOnBlockComplete(ctx, node, block, resolvedInputs, displayOutput, duration) + this.callOnBlockComplete( + ctx, + node, + block, + this.parseJsonInputs(resolvedInputs), + displayOutput, + duration + ) } return normalizedOutput @@ -233,7 +241,7 @@ export class BlockExecutor { blockLog.durationMs = duration blockLog.success = false blockLog.error = errorMessage - blockLog.input = input + blockLog.input = this.parseJsonInputs(input) blockLog.output = filterOutputForLog(block.metadata?.id || '', errorOutput, { block }) } @@ -248,7 +256,14 @@ export class BlockExecutor { if (!isSentinel) { const displayOutput = filterOutputForLog(block.metadata?.id || '', errorOutput, { block }) - this.callOnBlockComplete(ctx, node, block, input, displayOutput, duration) + this.callOnBlockComplete( + ctx, + node, + block, + this.parseJsonInputs(input), + displayOutput, + duration + ) } const hasErrorPort = this.hasErrorPortEdge(node) @@ -336,6 +351,36 @@ export class BlockExecutor { return { result: output } } + /** + * Parse JSON string inputs to objects for log display only. + * Attempts to parse any string that looks like JSON. + * Returns a new object - does not mutate the original inputs. + */ + private parseJsonInputs(inputs: Record): Record { + let result = inputs + let hasChanges = false + + for (const [key, value] of Object.entries(inputs)) { + // isJSONString is a quick heuristic (checks for { or [), not a validator. + // Invalid JSON is safely caught below - this just avoids JSON.parse on every string. + if (typeof value !== 'string' || !isJSONString(value)) { + continue + } + + try { + if (!hasChanges) { + result = { ...inputs } + hasChanges = true + } + result[key] = JSON.parse(value.trim()) + } catch { + // Not valid JSON, keep original string + } + } + + return result + } + private callOnBlockStart(ctx: ExecutionContext, node: DAGNode, block: SerializedBlock): void { const blockId = node.id const blockName = block.metadata?.name ?? blockId diff --git a/apps/sim/executor/handlers/agent/agent-handler.ts b/apps/sim/executor/handlers/agent/agent-handler.ts index 6c0d19fc3..a22f7a8c4 100644 --- a/apps/sim/executor/handlers/agent/agent-handler.ts +++ b/apps/sim/executor/handlers/agent/agent-handler.ts @@ -936,8 +936,8 @@ export class AgentBlockHandler implements BlockHandler { systemPrompt: validMessages ? undefined : inputs.systemPrompt, context: validMessages ? undefined : stringifyJSON(messages), tools: formattedTools, - temperature: inputs.temperature, - maxTokens: inputs.maxTokens, + temperature: inputs.temperature != null ? Number(inputs.temperature) : undefined, + maxTokens: inputs.maxTokens != null ? Number(inputs.maxTokens) : undefined, apiKey: inputs.apiKey, azureEndpoint: inputs.azureEndpoint, azureApiVersion: inputs.azureApiVersion, diff --git a/apps/sim/providers/azure-openai/index.ts b/apps/sim/providers/azure-openai/index.ts index f912e92be..195103ffe 100644 --- a/apps/sim/providers/azure-openai/index.ts +++ b/apps/sim/providers/azure-openai/index.ts @@ -102,7 +102,7 @@ export const azureOpenAIProvider: ProviderConfig = { } if (request.temperature !== undefined) payload.temperature = request.temperature - if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens + if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort if (request.verbosity !== undefined) payload.verbosity = request.verbosity diff --git a/apps/sim/providers/cerebras/index.ts b/apps/sim/providers/cerebras/index.ts index 3953c6715..c18560048 100644 --- a/apps/sim/providers/cerebras/index.ts +++ b/apps/sim/providers/cerebras/index.ts @@ -77,7 +77,7 @@ export const cerebrasProvider: ProviderConfig = { messages: allMessages, } if (request.temperature !== undefined) payload.temperature = request.temperature - if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens + if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens if (request.responseFormat) { payload.response_format = { type: 'json_schema', diff --git a/apps/sim/providers/deepseek/index.ts b/apps/sim/providers/deepseek/index.ts index 2aa92b04f..026342498 100644 --- a/apps/sim/providers/deepseek/index.ts +++ b/apps/sim/providers/deepseek/index.ts @@ -81,7 +81,7 @@ export const deepseekProvider: ProviderConfig = { } if (request.temperature !== undefined) payload.temperature = request.temperature - if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens + if (request.maxTokens != null) payload.max_tokens = request.maxTokens let preparedTools: ReturnType | null = null diff --git a/apps/sim/providers/gemini/core.ts b/apps/sim/providers/gemini/core.ts index a7aca1aaa..2dca22e5b 100644 --- a/apps/sim/providers/gemini/core.ts +++ b/apps/sim/providers/gemini/core.ts @@ -349,7 +349,7 @@ export async function executeGeminiRequest( if (request.temperature !== undefined) { geminiConfig.temperature = request.temperature } - if (request.maxTokens !== undefined) { + if (request.maxTokens != null) { geminiConfig.maxOutputTokens = request.maxTokens } if (systemInstruction) { diff --git a/apps/sim/providers/groq/index.ts b/apps/sim/providers/groq/index.ts index c5dad01ef..7be9b7386 100644 --- a/apps/sim/providers/groq/index.ts +++ b/apps/sim/providers/groq/index.ts @@ -74,7 +74,7 @@ export const groqProvider: ProviderConfig = { } if (request.temperature !== undefined) payload.temperature = request.temperature - if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens + if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens if (request.responseFormat) { payload.response_format = { diff --git a/apps/sim/providers/mistral/index.ts b/apps/sim/providers/mistral/index.ts index 736b11c24..f99a3e210 100644 --- a/apps/sim/providers/mistral/index.ts +++ b/apps/sim/providers/mistral/index.ts @@ -91,7 +91,7 @@ export const mistralProvider: ProviderConfig = { } if (request.temperature !== undefined) payload.temperature = request.temperature - if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens + if (request.maxTokens != null) payload.max_tokens = request.maxTokens if (request.responseFormat) { payload.response_format = { diff --git a/apps/sim/providers/ollama/index.ts b/apps/sim/providers/ollama/index.ts index 7b73d1f18..921c1afd0 100644 --- a/apps/sim/providers/ollama/index.ts +++ b/apps/sim/providers/ollama/index.ts @@ -105,7 +105,7 @@ export const ollamaProvider: ProviderConfig = { } if (request.temperature !== undefined) payload.temperature = request.temperature - if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens + if (request.maxTokens != null) payload.max_tokens = request.maxTokens if (request.responseFormat) { payload.response_format = { diff --git a/apps/sim/providers/openai/index.ts b/apps/sim/providers/openai/index.ts index 0d7342fc9..b2cecfceb 100644 --- a/apps/sim/providers/openai/index.ts +++ b/apps/sim/providers/openai/index.ts @@ -81,7 +81,7 @@ export const openaiProvider: ProviderConfig = { } if (request.temperature !== undefined) payload.temperature = request.temperature - if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens + if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort if (request.verbosity !== undefined) payload.verbosity = request.verbosity diff --git a/apps/sim/providers/openrouter/index.ts b/apps/sim/providers/openrouter/index.ts index d937e3d0e..bf54a6457 100644 --- a/apps/sim/providers/openrouter/index.ts +++ b/apps/sim/providers/openrouter/index.ts @@ -121,7 +121,7 @@ export const openRouterProvider: ProviderConfig = { } if (request.temperature !== undefined) payload.temperature = request.temperature - if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens + if (request.maxTokens != null) payload.max_tokens = request.maxTokens let preparedTools: ReturnType | null = null let hasActiveTools = false diff --git a/apps/sim/providers/vllm/index.ts b/apps/sim/providers/vllm/index.ts index 4af4ae9d7..0df587264 100644 --- a/apps/sim/providers/vllm/index.ts +++ b/apps/sim/providers/vllm/index.ts @@ -135,7 +135,7 @@ export const vllmProvider: ProviderConfig = { } if (request.temperature !== undefined) payload.temperature = request.temperature - if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens + if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens if (request.responseFormat) { payload.response_format = { diff --git a/apps/sim/providers/xai/index.ts b/apps/sim/providers/xai/index.ts index 72602ec50..8138265a3 100644 --- a/apps/sim/providers/xai/index.ts +++ b/apps/sim/providers/xai/index.ts @@ -92,7 +92,7 @@ export const xAIProvider: ProviderConfig = { } if (request.temperature !== undefined) basePayload.temperature = request.temperature - if (request.maxTokens !== undefined) basePayload.max_tokens = request.maxTokens + if (request.maxTokens != null) basePayload.max_completion_tokens = request.maxTokens let preparedTools: ReturnType | null = null if (tools?.length) {