From 5158a00b54fd3a003a373e96136391d0640e29f2 Mon Sep 17 00:00:00 2001 From: Siddharth Ganesan <33737564+Sg312@users.noreply.github.com> Date: Tue, 22 Jul 2025 13:02:24 -0700 Subject: [PATCH] feat(copilot-v1): Copilot v1 (#662) * Fix docs agent * Doc agent fixes * Refactor copilot * Lint * Update yaml editor * Lint * Fix block tool * Lint * Get block metadata tool * Lint * Yaml changes * Lint * Fixes? * Lint * Better yaml language * Lint * UPdate * Lint * Fix condition blocks * lint * Fix start block * Fix starter block stuff * Lint * Fix yaml ui * Lint * get yaml tool * Lint * hi * Lint * Agnet * Copilot UI * Lint * Better workflow builder * Lint * REHYDRATION * Lint * Auto layout * Lint * Fixes * Lint * Chatbar sizing * Lint * Initial chat fixes * Lint * Dropdown overflow * Lint * UI text * Lint * Sample question pills * Lint * Ui button * Fix dropdown appearance * Lint * Modal fixes * UI Updates * Lint * Initial ask vs agent mode * Lint * Ask vs agent * Lint * Ask udpate * Ui fixes * Lint * User message width * Chat leak fix * Lint * Agent ui * Checkpointing * Lint * Checkpoints * Lint * Tweaks * Sample questions * Lint * Modal full screen mode * Lint * Prompt updates * Cleaning * Lint * Prompt update * Streaming v1 * Lint * Tool call inline * Lint * Checkpoint * Lint * Fix lint * Sizing * Lint * Copilot ui tool call fixes * Remove output from tool call ui * Updates * Lint * Checkpoitn * Loading icon * Lint * Pulse * Lint * Modal fixes * Sidebar padding * Checkpoint * checkpoint * feat(platform): new UI and templates (#639) (#693) * improvement: control bar * improvement: debug flow * improvement: control bar hovers and skeleton loading * improvement: completed control bar * improvement: panel tab selector complete * refactor: deleted notifications and history dropdown * improvement: chat UI complete * fix: tab change on control bar run * improvement: finshed console (audio display not working) * fix: text wrapping in console content * improvement: audio UI * improvement: image display * feat: add input to console * improvement: code input and showing input on errors * feat: download chat and console * improvement: expandable panel and console visibility * improvement: empty state UI * improvement: finished variables * fix: image in console entry * improvement: sidebar and templates ui * feat: uploading and fetching templates * improvement: sidebar and control bar * improvement: templates * feat: templates done * fix(sockets): remove package-lock * fix: sidebar scroll going over sidebar height (#709) * Checkpoint * Fix build error * Checkpoitn * Docs updates * Checkpoint * Streaming vs non streaming * Clean up yaml save * Fix revert checkpoitn yaml * Doc fixes * Small docs fix * Clean up old yaml docs * Doc updates * Hide copilot * Revert width * Db migration fixes * Add snapshot * Remove space from mdx * Add spaces * Lint * Address greptile comments * lint fix * Hide copilot --------- Co-authored-by: Vikhyath Mondreti Co-authored-by: Waleed Latif Co-authored-by: Emir Karabeg <78010029+emir-karabeg@users.noreply.github.com> Co-authored-by: Siddharth Sim --- apps/docs/content/docs/blocks/loop.mdx | 2 +- apps/docs/content/docs/blocks/parallel.mdx | 2 +- apps/docs/content/docs/blocks/response.mdx | 3 +- apps/docs/content/docs/blocks/workflow.mdx | 2 +- apps/docs/content/docs/meta.json | 1 + apps/docs/content/docs/tools/index.mdx | 11 + .../content/docs/yaml/block-reference.mdx | 238 + apps/docs/content/docs/yaml/blocks/agent.mdx | 218 + apps/docs/content/docs/yaml/blocks/api.mdx | 179 + .../content/docs/yaml/blocks/condition.mdx | 165 + .../content/docs/yaml/blocks/evaluator.mdx | 255 + .../content/docs/yaml/blocks/function.mdx | 162 + apps/docs/content/docs/yaml/blocks/index.mdx | 151 + apps/docs/content/docs/yaml/blocks/loop.mdx | 305 + apps/docs/content/docs/yaml/blocks/meta.json | 17 + .../content/docs/yaml/blocks/parallel.mdx | 322 + .../content/docs/yaml/blocks/response.mdx | 140 + apps/docs/content/docs/yaml/blocks/router.mdx | 200 + .../docs/content/docs/yaml/blocks/starter.mdx | 183 + .../docs/content/docs/yaml/blocks/webhook.mdx | 278 + .../content/docs/yaml/blocks/workflow.mdx | 299 + apps/docs/content/docs/yaml/examples.mdx | 273 + apps/docs/content/docs/yaml/index.mdx | 159 + apps/docs/content/docs/yaml/meta.json | 4 + .../copilot/checkpoints/[id]/revert/route.ts | 138 + apps/sim/app/api/copilot/checkpoints/route.ts | 64 + apps/sim/app/api/copilot/docs/route.ts | 281 - apps/sim/app/api/copilot/route.ts | 6 +- apps/sim/app/api/docs/search/route.ts | 2 +- apps/sim/app/api/tools/edit-workflow/route.ts | 412 ++ .../sim/app/api/tools/get-all-blocks/route.ts | 66 + .../api/tools/get-blocks-metadata/route.ts | 239 + .../app/api/tools/get-user-workflow/route.ts | 6 +- .../app/api/tools/get-yaml-structure/route.ts | 25 + .../api/workflows/[id]/autolayout/route.ts | 223 + .../sim/app/api/workflows/[id]/state/route.ts | 2 +- apps/sim/app/api/workflows/[id]/yaml/route.ts | 538 ++ .../components/message/message.tsx | 86 +- .../components/control-bar/control-bar.tsx | 32 + .../copilot/components/checkpoint-panel.tsx | 156 + .../copilot-modal/copilot-modal.tsx | 501 +- .../professional-input/professional-input.tsx | 98 + .../professional-message.tsx | 403 ++ .../copilot/components/welcome/welcome.tsx | 58 + .../components/copilot/copilot-modal.tsx | 0 .../panel/components/copilot/copilot.tsx | 493 +- .../w/[workflowId]/components/panel/panel.tsx | 48 +- .../components/sub-block/components/table.tsx | 59 +- .../workflow-text-editor/workflow-applier.ts | 276 +- .../workflow-text-editor-modal.tsx | 10 + .../[workspaceId]/w/[workflowId]/workflow.tsx | 4 + .../create-menu/import-controls.tsx | 57 +- apps/sim/components/ui/tool-call.tsx | 302 + apps/sim/contexts/socket-context.tsx | 156 +- apps/sim/db/migrations/0058_clean_shiva.sql | 20 + .../sim/db/migrations/meta/0058_snapshot.json | 5865 +++++++++++++++++ apps/sim/db/migrations/meta/_journal.json | 7 + apps/sim/db/schema.ts | 42 + .../lib/autolayout/algorithms/hierarchical.ts | 427 ++ apps/sim/lib/autolayout/algorithms/smart.ts | 587 ++ apps/sim/lib/autolayout/service.ts | 544 ++ apps/sim/lib/autolayout/types.ts | 101 + apps/sim/lib/copilot-api.ts | 447 -- apps/sim/lib/copilot/api.ts | 486 ++ apps/sim/lib/copilot/config.ts | 418 +- apps/sim/lib/copilot/prompts.ts | 662 ++ apps/sim/lib/copilot/service.ts | 648 +- apps/sim/lib/copilot/tools.ts | 112 +- apps/sim/lib/tool-call-parser.ts | 414 ++ apps/sim/lib/workflows/yaml-generator.ts | 64 +- apps/sim/package.json | 6 + apps/sim/providers/anthropic/index.ts | 870 ++- apps/sim/providers/azure-openai/index.ts | 9 +- apps/sim/providers/cerebras/index.ts | 9 +- apps/sim/providers/deepseek/index.ts | 9 +- apps/sim/providers/groq/index.ts | 9 +- apps/sim/providers/ollama/index.ts | 9 +- apps/sim/providers/types.ts | 2 + apps/sim/providers/utils.ts | 11 +- apps/sim/providers/xai/index.ts | 9 +- apps/sim/socket-server/rooms/manager.ts | 48 + apps/sim/socket-server/routes/http.ts | 42 + apps/sim/stores/copilot/store.ts | 438 +- apps/sim/stores/copilot/types.ts | 105 +- apps/sim/stores/workflows/subblock/store.ts | 38 +- apps/sim/stores/workflows/yaml/importer.ts | 479 +- .../stores/workflows/yaml/parsing-utils.ts | 653 ++ apps/sim/tools/blocks/edit-workflow.ts | 88 + apps/sim/tools/blocks/get-all.ts | 78 + apps/sim/tools/blocks/get-metadata.ts | 104 + apps/sim/tools/blocks/get-yaml-structure.ts | 56 + apps/sim/tools/docs/search.ts | 4 +- apps/sim/tools/utils.ts | 15 +- apps/sim/types/tool-call.ts | 41 + bun.lock | 105 +- package.json | 1 + 96 files changed, 19711 insertions(+), 2681 deletions(-) create mode 100644 apps/docs/content/docs/yaml/block-reference.mdx create mode 100644 apps/docs/content/docs/yaml/blocks/agent.mdx create mode 100644 apps/docs/content/docs/yaml/blocks/api.mdx create mode 100644 apps/docs/content/docs/yaml/blocks/condition.mdx create mode 100644 apps/docs/content/docs/yaml/blocks/evaluator.mdx create mode 100644 apps/docs/content/docs/yaml/blocks/function.mdx create mode 100644 apps/docs/content/docs/yaml/blocks/index.mdx create mode 100644 apps/docs/content/docs/yaml/blocks/loop.mdx create mode 100644 apps/docs/content/docs/yaml/blocks/meta.json create mode 100644 apps/docs/content/docs/yaml/blocks/parallel.mdx create mode 100644 apps/docs/content/docs/yaml/blocks/response.mdx create mode 100644 apps/docs/content/docs/yaml/blocks/router.mdx create mode 100644 apps/docs/content/docs/yaml/blocks/starter.mdx create mode 100644 apps/docs/content/docs/yaml/blocks/webhook.mdx create mode 100644 apps/docs/content/docs/yaml/blocks/workflow.mdx create mode 100644 apps/docs/content/docs/yaml/examples.mdx create mode 100644 apps/docs/content/docs/yaml/index.mdx create mode 100644 apps/docs/content/docs/yaml/meta.json create mode 100644 apps/sim/app/api/copilot/checkpoints/[id]/revert/route.ts create mode 100644 apps/sim/app/api/copilot/checkpoints/route.ts delete mode 100644 apps/sim/app/api/copilot/docs/route.ts create mode 100644 apps/sim/app/api/tools/edit-workflow/route.ts create mode 100644 apps/sim/app/api/tools/get-all-blocks/route.ts create mode 100644 apps/sim/app/api/tools/get-blocks-metadata/route.ts create mode 100644 apps/sim/app/api/tools/get-yaml-structure/route.ts create mode 100644 apps/sim/app/api/workflows/[id]/autolayout/route.ts create mode 100644 apps/sim/app/api/workflows/[id]/yaml/route.ts create mode 100644 apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/checkpoint-panel.tsx create mode 100644 apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/professional-input/professional-input.tsx create mode 100644 apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/professional-message/professional-message.tsx create mode 100644 apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/welcome/welcome.tsx create mode 100644 apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/copilot-modal.tsx create mode 100644 apps/sim/components/ui/tool-call.tsx create mode 100644 apps/sim/db/migrations/0058_clean_shiva.sql create mode 100644 apps/sim/db/migrations/meta/0058_snapshot.json create mode 100644 apps/sim/lib/autolayout/algorithms/hierarchical.ts create mode 100644 apps/sim/lib/autolayout/algorithms/smart.ts create mode 100644 apps/sim/lib/autolayout/service.ts create mode 100644 apps/sim/lib/autolayout/types.ts delete mode 100644 apps/sim/lib/copilot-api.ts create mode 100644 apps/sim/lib/copilot/api.ts create mode 100644 apps/sim/lib/copilot/prompts.ts create mode 100644 apps/sim/lib/tool-call-parser.ts create mode 100644 apps/sim/stores/workflows/yaml/parsing-utils.ts create mode 100644 apps/sim/tools/blocks/edit-workflow.ts create mode 100644 apps/sim/tools/blocks/get-all.ts create mode 100644 apps/sim/tools/blocks/get-metadata.ts create mode 100644 apps/sim/tools/blocks/get-yaml-structure.ts create mode 100644 apps/sim/types/tool-call.ts diff --git a/apps/docs/content/docs/blocks/loop.mdx b/apps/docs/content/docs/blocks/loop.mdx index 777988015..5ee2ae296 100644 --- a/apps/docs/content/docs/blocks/loop.mdx +++ b/apps/docs/content/docs/blocks/loop.mdx @@ -172,4 +172,4 @@ After a loop completes, you can access aggregated results: - **Set reasonable limits**: Keep iteration counts reasonable to avoid long execution times - **Use ForEach for collections**: When processing arrays or objects, use ForEach instead of For loops -- **Handle errors gracefully**: Consider adding error handling inside loops for robust workflows \ No newline at end of file +- **Handle errors gracefully**: Consider adding error handling inside loops for robust workflows diff --git a/apps/docs/content/docs/blocks/parallel.mdx b/apps/docs/content/docs/blocks/parallel.mdx index 719b2ab89..c997d6689 100644 --- a/apps/docs/content/docs/blocks/parallel.mdx +++ b/apps/docs/content/docs/blocks/parallel.mdx @@ -207,4 +207,4 @@ Understanding when to use each: - **Independent operations only**: Ensure operations don't depend on each other - **Handle rate limits**: Add delays or throttling for API-heavy workflows -- **Error handling**: Each instance should handle its own errors gracefully \ No newline at end of file +- **Error handling**: Each instance should handle its own errors gracefully diff --git a/apps/docs/content/docs/blocks/response.mdx b/apps/docs/content/docs/blocks/response.mdx index ad6f5f3cd..5523cffdb 100644 --- a/apps/docs/content/docs/blocks/response.mdx +++ b/apps/docs/content/docs/blocks/response.mdx @@ -182,4 +182,5 @@ headers: - **Structure your responses consistently**: Maintain a consistent JSON structure across all your API endpoints for better developer experience - **Include relevant metadata**: Add timestamps and version information to help with debugging and monitoring - **Handle errors gracefully**: Use conditional logic in your workflow to set appropriate error responses with descriptive messages -- **Validate variable references**: Ensure all referenced variables exist and contain the expected data types before the Response block executes \ No newline at end of file +- **Validate variable references**: Ensure all referenced variables exist and contain the expected data types before the Response block executes + diff --git a/apps/docs/content/docs/blocks/workflow.mdx b/apps/docs/content/docs/blocks/workflow.mdx index fce0d9497..27040ecfc 100644 --- a/apps/docs/content/docs/blocks/workflow.mdx +++ b/apps/docs/content/docs/blocks/workflow.mdx @@ -256,4 +256,4 @@ return { - **Document dependencies**: Clearly document which workflows depend on others and maintain dependency maps - **Test independently**: Ensure child workflows can be tested and validated independently from parent workflows - **Monitor performance**: Be aware that nested workflows can impact overall execution time and resource usage -- **Use semantic naming**: Give workflows descriptive names that clearly indicate their purpose and functionality \ No newline at end of file +- **Use semantic naming**: Give workflows descriptive names that clearly indicate their purpose and functionality diff --git a/apps/docs/content/docs/meta.json b/apps/docs/content/docs/meta.json index ae9e41be4..3e4b1097c 100644 --- a/apps/docs/content/docs/meta.json +++ b/apps/docs/content/docs/meta.json @@ -14,6 +14,7 @@ "execution", "---Advanced---", "./variables/index", + "yaml", "---SDKs---", "./sdks/python", "./sdks/typescript" diff --git a/apps/docs/content/docs/tools/index.mdx b/apps/docs/content/docs/tools/index.mdx index 9dd3a80e1..95f69f3d7 100644 --- a/apps/docs/content/docs/tools/index.mdx +++ b/apps/docs/content/docs/tools/index.mdx @@ -64,3 +64,14 @@ Tools typically return structured data that can be processed by subsequent block - Status information Refer to each tool's specific documentation to understand its exact output format. + +## YAML Configuration + +For detailed YAML workflow configuration and syntax, see the [YAML Workflow Reference](/yaml) documentation. This includes comprehensive guides for: + +- **Block Reference Syntax**: How to connect and reference data between blocks +- **Tool Configuration**: Using tools in both standalone blocks and agent configurations +- **Environment Variables**: Secure handling of API keys and credentials +- **Complete Examples**: Real-world workflow patterns and configurations + +For specific tool parameters and configuration options, refer to each tool's individual documentation page. diff --git a/apps/docs/content/docs/yaml/block-reference.mdx b/apps/docs/content/docs/yaml/block-reference.mdx new file mode 100644 index 000000000..907e5c367 --- /dev/null +++ b/apps/docs/content/docs/yaml/block-reference.mdx @@ -0,0 +1,238 @@ +--- +title: Block Reference Syntax +description: How to reference data between blocks in YAML workflows +--- + +import { Callout } from 'fumadocs-ui/components/callout' +import { Tab, Tabs } from 'fumadocs-ui/components/tabs' + +Block references are the foundation of data flow in Sim Studio workflows. Understanding how to correctly reference outputs from one block as inputs to another is essential for building functional workflows. + +## Basic Reference Rules + +### 1. Use Block Names, Not Block IDs + + + + ```yaml + # Block definition + email-sender: + type: agent + name: "Email Generator" + # ... configuration + + # Reference the block + next-block: + inputs: + userPrompt: "Process this: " + ``` + + + ```yaml + # Block definition + email-sender: + type: agent + name: "Email Generator" + # ... configuration + + # ❌ Don't reference by block ID + next-block: + inputs: + userPrompt: "Process this: " + ``` + + + +### 2. Convert Names to Reference Format + +To create a block reference: + +1. **Take the block name**: "Email Generator" +2. **Convert to lowercase**: "email generator" +3. **Remove spaces and special characters**: "emailgenerator" +4. **Add property**: `` + +### 3. Use Correct Properties + +Different block types expose different properties: + +- **Agent blocks**: `.content` (the AI response) +- **Function blocks**: `.output` (the return value) +- **API blocks**: `.output` (the response data) +- **Tool blocks**: `.output` (the tool result) + +## Reference Examples + +### Common Block References + +```yaml +# Agent block outputs + # Primary AI response + # Token usage information + # Estimated cost + # Tool execution details + +# Function block outputs + # Function return value + # Error information (if any) + +# API block outputs + # Response data + # HTTP status code + # Response headers + +# Tool block outputs + # Tool execution result +``` + +### Multi-Word Block Names + +```yaml +# Block name: "Data Processor 2" + + +# Block name: "Email Validation Service" + + +# Block name: "Customer Info Agent" + +``` + +## Special Reference Cases + +### Starter Block + + + The starter block is always referenced as `` regardless of its actual name. + + +```yaml +# Starter block definition +my-custom-start: + type: starter + name: "Custom Workflow Start" + # ... configuration + +# Always reference as 'start' +agent-1: + inputs: + userPrompt: # ✅ Correct + # userPrompt: # ❌ Wrong +``` + +### Loop Variables + +Inside loop blocks, special variables are available: + +```yaml +# Available in loop child blocks + # Current iteration (0-based) + # Current item being processed (forEach loops) + # Full collection (forEach loops) +``` + +### Parallel Variables + +Inside parallel blocks, special variables are available: + +```yaml +# Available in parallel child blocks + # Instance number (0-based) + # Item for this instance + # Full collection +``` + +## Complex Reference Examples + +### Nested Data Access + +When referencing complex objects, use dot notation: + +```yaml +# If an agent returns structured data +data-analyzer: + type: agent + name: "Data Analyzer" + inputs: + responseFormat: | + { + "schema": { + "type": "object", + "properties": { + "analysis": {"type": "object"}, + "summary": {"type": "string"}, + "metrics": {"type": "object"} + } + } + } + +# Reference nested properties +next-step: + inputs: + userPrompt: | + Summary: + Score: + Full data: +``` + +### Multiple References in Text + +```yaml +email-composer: + type: agent + inputs: + userPrompt: | + Create an email with the following information: + + Customer: + Order Details: + Support Ticket: + + Original request: +``` + +### References in Code Blocks + +When using references in function blocks, they're replaced as JavaScript values: + +```yaml +data-processor: + type: function + inputs: + code: | + // References are replaced with actual values + const customerData = ; + const orderInfo = ; + const originalInput = ; + + // Process the data + return { + customer: customerData.name, + orderId: orderInfo.id, + processed: true + }; +``` + +## Reference Validation + +Sim Studio validates all references when importing YAML: + +### Valid References +- Block exists in the workflow +- Property is appropriate for block type +- No circular dependencies +- Proper syntax formatting + +### Common Errors +- **Block not found**: Referenced block doesn't exist +- **Wrong property**: Using `.content` on a function block +- **Typos**: Misspelled block names or properties +- **Circular references**: Block references itself directly or indirectly + +## Best Practices + +1. **Use descriptive block names**: Makes references more readable +2. **Be consistent**: Use the same naming convention throughout +3. **Check references**: Ensure all referenced blocks exist +4. **Avoid deep nesting**: Keep reference chains manageable +5. **Document complex flows**: Add comments to explain reference relationships \ No newline at end of file diff --git a/apps/docs/content/docs/yaml/blocks/agent.mdx b/apps/docs/content/docs/yaml/blocks/agent.mdx new file mode 100644 index 000000000..e93e16c16 --- /dev/null +++ b/apps/docs/content/docs/yaml/blocks/agent.mdx @@ -0,0 +1,218 @@ +--- +title: Agent Block YAML Schema +description: YAML configuration reference for Agent blocks +--- + +## Schema Definition + +```yaml +type: object +required: + - type + - name +properties: + type: + type: string + enum: [agent] + description: Block type identifier + name: + type: string + description: Display name for this agent block + inputs: + type: object + properties: + systemPrompt: + type: string + description: Instructions that define the agent's role and behavior + userPrompt: + type: string + description: Input content to process (can reference other blocks) + model: + type: string + description: AI model identifier (e.g., gpt-4o, gemini-2.5-pro, deepseek-chat) + temperature: + type: number + minimum: 0 + maximum: 2 + description: Response creativity level (varies by model) + apiKey: + type: string + description: API key for the model provider (use {{ENV_VAR}} format) + azureEndpoint: + type: string + description: Azure OpenAI endpoint URL (required for Azure models) + azureApiVersion: + type: string + description: Azure API version (required for Azure models) + memories: + type: string + description: Memory context from memory blocks + tools: + type: array + description: List of external tools the agent can use + items: + type: object + required: [type, title, toolId, operation, usageControl] + properties: + type: + type: string + description: Tool type identifier + title: + type: string + description: Human-readable display name + toolId: + type: string + description: Internal tool identifier + operation: + type: string + description: Tool operation/method name + usageControl: + type: string + enum: [auto, required, none] + description: When AI can use the tool + params: + type: object + description: Tool-specific configuration parameters + isExpanded: + type: boolean + description: UI state + default: false + responseFormat: + type: object + description: JSON Schema to enforce structured output + required: + - model + - apiKey + connections: + type: object + properties: + success: + type: string + description: Target block ID for successful execution + error: + type: string + description: Target block ID for error handling +``` + +## Tool Configuration + +Tools are defined as an array where each tool has this structure: + +```yaml +tools: + - type: # Tool type identifier (exa, gmail, slack, etc.) + title: # Human-readable display name + toolId: # Internal tool identifier + operation: # Tool operation/method name + usageControl: # When AI can use it (auto | required | none) + params: # Tool-specific configuration parameters + isExpanded: # UI state (optional, default: false) +``` + +## Connection Configuration + +Connections define where the workflow goes based on execution results: + +```yaml +connections: + success: # Target block ID for successful execution + error: # Target block ID for error handling (optional) +``` + +## Examples + +### Basic Agent + +```yaml +content-agent: + type: agent + name: "Content Analyzer 1" + inputs: + systemPrompt: "You are a helpful content analyzer. Be concise and clear." + userPrompt: + model: gpt-4o + temperature: 0.3 + apiKey: '{{OPENAI_API_KEY}}' + connections: + success: summary-block + +summary-block: + type: agent + name: "Summary Generator" + inputs: + systemPrompt: "Create a brief summary of the analysis." + userPrompt: "Analyze this: " + model: gpt-4o + apiKey: '{{OPENAI_API_KEY}}' + connections: + success: final-step +``` + +### Agent with Tools + +```yaml +research-agent: + type: agent + name: "Research Assistant" + inputs: + systemPrompt: "Research the topic and provide detailed information." + userPrompt: + model: gpt-4o + apiKey: '{{OPENAI_API_KEY}}' + tools: + - type: exa + title: "Web Search" + toolId: exa_search + operation: exa_search + usageControl: auto + params: + apiKey: '{{EXA_API_KEY}}' + connections: + success: summary-block +``` + +### Structured Output + +```yaml +data-extractor: + type: agent + name: "Extract Contact Info" + inputs: + systemPrompt: "Extract contact information from the text." + userPrompt: + model: gpt-4o + apiKey: '{{OPENAI_API_KEY}}' + responseFormat: | + { + "name": "contact_extraction", + "schema": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "email": {"type": "string"}, + "phone": {"type": "string"} + }, + "required": ["name"] + }, + "strict": true + } + connections: + success: save-contact +``` + +### Azure OpenAI + +```yaml +azure-agent: + type: agent + name: "Azure AI Assistant" + inputs: + systemPrompt: "You are a helpful assistant." + userPrompt: + model: gpt-4o + apiKey: '{{AZURE_OPENAI_API_KEY}}' + azureEndpoint: '{{AZURE_OPENAI_ENDPOINT}}' + azureApiVersion: "2024-07-01-preview" + connections: + success: response-block +``` \ No newline at end of file diff --git a/apps/docs/content/docs/yaml/blocks/api.mdx b/apps/docs/content/docs/yaml/blocks/api.mdx new file mode 100644 index 000000000..8f92e8bee --- /dev/null +++ b/apps/docs/content/docs/yaml/blocks/api.mdx @@ -0,0 +1,179 @@ +--- +title: API Block YAML Schema +description: YAML configuration reference for API blocks +--- + +## Schema Definition + +```yaml +type: object +required: + - type + - name + - inputs +properties: + type: + type: string + enum: [api] + description: Block type identifier + name: + type: string + description: Display name for this API block + inputs: + type: object + required: + - url + - method + properties: + url: + type: string + description: The endpoint URL to send the request to + method: + type: string + enum: [GET, POST, PUT, DELETE, PATCH] + description: HTTP method for the request + default: GET + queryParams: + type: array + description: Query parameters as key-value pairs + items: + type: object + properties: + key: + type: string + description: Parameter name + value: + type: string + description: Parameter value + headers: + type: array + description: HTTP headers as key-value pairs + items: + type: object + properties: + key: + type: string + description: Header name + value: + type: string + description: Header value + body: + type: string + description: Request body for POST/PUT/PATCH methods + timeout: + type: number + description: Request timeout in milliseconds + default: 30000 + minimum: 1000 + maximum: 300000 + connections: + type: object + properties: + success: + type: string + description: Target block ID for successful requests + error: + type: string + description: Target block ID for error handling +``` + +## Connection Configuration + +Connections define where the workflow goes based on request results: + +```yaml +connections: + success: # Target block ID for successful requests + error: # Target block ID for error handling (optional) +``` + +## Examples + +### Simple GET Request + +```yaml +user-api: + type: api + name: "Fetch User Data" + inputs: + url: "https://api.example.com/users/123" + method: GET + headers: + - key: "Authorization" + value: "Bearer {{API_TOKEN}}" + - key: "Content-Type" + value: "application/json" + connections: + success: process-user-data + error: handle-api-error +``` + +### POST Request with Body + +```yaml +create-ticket: + type: api + name: "Create Support Ticket" + inputs: + url: "https://api.support.com/tickets" + method: POST + headers: + - key: "Authorization" + value: "Bearer {{SUPPORT_API_KEY}}" + - key: "Content-Type" + value: "application/json" + body: | + { + "title": "", + "description": "", + "priority": "high" + } + connections: + success: ticket-created + error: ticket-error +``` + +### Dynamic URL with Query Parameters + +```yaml +search-api: + type: api + name: "Search Products" + inputs: + url: "https://api.store.com/products" + method: GET + queryParams: + - key: "q" + value: + - key: "limit" + value: "10" + - key: "category" + value: + headers: + - key: "Authorization" + value: "Bearer {{STORE_API_KEY}}" + connections: + success: display-results +``` + +## Output References + +After an API block executes, you can reference its outputs: + +```yaml +# In subsequent blocks +next-block: + inputs: + data: # Response data + status: # HTTP status code + headers: # Response headers + error: # Error details (if any) +``` + +## Best Practices + +- Use environment variables for API keys: `{{API_KEY_NAME}}` +- Include error handling with error connections +- Set appropriate timeouts for your use case +- Validate response status codes in subsequent blocks +- Use meaningful block names for easier reference \ No newline at end of file diff --git a/apps/docs/content/docs/yaml/blocks/condition.mdx b/apps/docs/content/docs/yaml/blocks/condition.mdx new file mode 100644 index 000000000..37836ecf6 --- /dev/null +++ b/apps/docs/content/docs/yaml/blocks/condition.mdx @@ -0,0 +1,165 @@ +--- +title: Condition Block YAML Schema +description: YAML configuration reference for Condition blocks +--- + +## Schema Definition + +```yaml +type: object +required: + - type + - name + - inputs + - connections +properties: + type: + type: string + enum: [condition] + description: Block type identifier + name: + type: string + description: Display name for this condition block + inputs: + type: object + required: + - conditions + properties: + conditions: + type: object + description: Conditional expressions and their logic + properties: + if: + type: string + description: Primary condition expression (boolean) + else-if: + type: string + description: Secondary condition expression (optional) + else-if-2: + type: string + description: Third condition expression (optional) + else-if-3: + type: string + description: Fourth condition expression (optional) + # Additional else-if-N conditions can be added as needed + else: + type: boolean + description: Default fallback condition (optional) + default: true + connections: + type: object + required: + - conditions + properties: + conditions: + type: object + description: Target blocks for each condition outcome + properties: + if: + type: string + description: Target block ID when 'if' condition is true + else-if: + type: string + description: Target block ID when 'else-if' condition is true + else-if-2: + type: string + description: Target block ID when 'else-if-2' condition is true + else-if-3: + type: string + description: Target block ID when 'else-if-3' condition is true + # Additional else-if-N connections can be added as needed + else: + type: string + description: Target block ID when no conditions match +``` + +## Connection Configuration + +Unlike other blocks, conditions use branching connections based on condition outcomes: + +```yaml +connections: + conditions: + if: # Target block ID when primary condition is true + else-if: # Target block ID when secondary condition is true (optional) + else-if-2: # Target block ID when third condition is true (optional) + else-if-3: # Target block ID when fourth condition is true (optional) + # Additional else-if-N connections can be added as needed + else: # Target block ID when no conditions match (optional) +``` + +## Examples + +### Simple If-Else + +```yaml +status-check: + type: condition + name: "Status Check" + inputs: + conditions: + if: === "approved" + else: true + connections: + conditions: + if: send-approval-email + else: send-rejection-email +``` + +### Multiple Conditions + +```yaml +user-routing: + type: condition + name: "User Type Router" + inputs: + conditions: + if: === "admin" + else-if: === "premium" + else-if-2: === "basic" + else: true + connections: + conditions: + if: admin-dashboard + else-if: premium-features + else-if-2: basic-features + else: registration-flow +``` + +### Numeric Comparisons + +```yaml +score-evaluation: + type: condition + name: "Score Evaluation" + inputs: + conditions: + if: >= 90 + else-if: >= 70 + else-if-2: >= 50 + else: true + connections: + conditions: + if: excellent-response + else-if: good-response + else-if-2: average-response + else: poor-response +``` + +### Complex Logic + +```yaml +eligibility-check: + type: condition + name: "Eligibility Check" + inputs: + conditions: + if: >= 18 && === true + else-if: >= 16 && === true + else: true + connections: + conditions: + if: full-access + else-if: limited-access + else: access-denied +``` \ No newline at end of file diff --git a/apps/docs/content/docs/yaml/blocks/evaluator.mdx b/apps/docs/content/docs/yaml/blocks/evaluator.mdx new file mode 100644 index 000000000..9d51de389 --- /dev/null +++ b/apps/docs/content/docs/yaml/blocks/evaluator.mdx @@ -0,0 +1,255 @@ +--- +title: Evaluator Block YAML Schema +description: YAML configuration reference for Evaluator blocks +--- + +## Schema Definition + +```yaml +type: object +required: + - type + - name + - inputs +properties: + type: + type: string + enum: [evaluator] + description: Block type identifier + name: + type: string + description: Display name for this evaluator block + inputs: + type: object + required: + - content + - metrics + - model + - apiKey + properties: + content: + type: string + description: Content to evaluate (can reference other blocks) + metrics: + type: array + description: Evaluation criteria and scoring ranges + items: + type: object + properties: + name: + type: string + description: Metric identifier + description: + type: string + description: Detailed explanation of what the metric measures + range: + type: object + properties: + min: + type: number + description: Minimum score value + max: + type: number + description: Maximum score value + required: [min, max] + description: Scoring range with numeric bounds + model: + type: string + description: AI model identifier (e.g., gpt-4o, claude-3-5-sonnet-20241022) + apiKey: + type: string + description: API key for the model provider (use {{ENV_VAR}} format) + temperature: + type: number + minimum: 0 + maximum: 2 + description: Model temperature for evaluation + default: 0.3 + azureEndpoint: + type: string + description: Azure OpenAI endpoint URL (required for Azure models) + azureApiVersion: + type: string + description: Azure API version (required for Azure models) + connections: + type: object + properties: + success: + type: string + description: Target block ID for successful evaluation + error: + type: string + description: Target block ID for error handling +``` + +## Connection Configuration + +Connections define where the workflow goes based on evaluation results: + +```yaml +connections: + success: # Target block ID for successful evaluation + error: # Target block ID for error handling (optional) +``` + +## Examples + +### Content Quality Evaluation + +```yaml +content-evaluator: + type: evaluator + name: "Content Quality Evaluator" + inputs: + content: + metrics: + - name: "accuracy" + description: "How factually accurate is the content?" + range: + min: 1 + max: 5 + - name: "clarity" + description: "How clear and understandable is the content?" + range: + min: 1 + max: 5 + - name: "relevance" + description: "How relevant is the content to the original query?" + range: + min: 1 + max: 5 + - name: "completeness" + description: "How complete and comprehensive is the content?" + range: + min: 1 + max: 5 + model: gpt-4o + temperature: 0.2 + apiKey: '{{OPENAI_API_KEY}}' + connections: + success: quality-report + error: evaluation-error +``` + +### Customer Response Evaluation + +```yaml +response-evaluator: + type: evaluator + name: "Customer Response Evaluator" + inputs: + content: + metrics: + - name: "helpfulness" + description: "How helpful is the response in addressing the customer's needs?" + range: + min: 1 + max: 10 + - name: "tone" + description: "How appropriate and professional is the tone?" + range: + min: 1 + max: 10 + - name: "completeness" + description: "Does the response fully address all aspects of the inquiry?" + range: + min: 1 + max: 10 + model: claude-3-5-sonnet-20241022 + apiKey: '{{ANTHROPIC_API_KEY}}' + connections: + success: response-processor +``` + +### A/B Testing Evaluation + +```yaml +ab-test-evaluator: + type: evaluator + name: "A/B Test Evaluator" + inputs: + content: | + Version A: + Version B: + + Compare these two versions for the following criteria. + metrics: + - name: "engagement" + description: "Which version is more likely to engage users?" + range: "A, B, or Tie" + - name: "clarity" + description: "Which version communicates more clearly?" + range: "A, B, or Tie" + - name: "persuasiveness" + description: "Which version is more persuasive?" + range: "A, B, or Tie" + model: gpt-4o + temperature: 0.1 + apiKey: '{{OPENAI_API_KEY}}' + connections: + success: test-results +``` + +### Multi-Dimensional Content Scoring + +```yaml +comprehensive-evaluator: + type: evaluator + name: "Comprehensive Content Evaluator" + inputs: + content: + metrics: + - name: "technical_accuracy" + description: "How technically accurate and correct is the information?" + range: + min: 0 + max: 100 + - name: "readability" + description: "How easy is the content to read and understand?" + range: + min: 0 + max: 100 + - name: "seo_optimization" + description: "How well optimized is the content for search engines?" + range: + min: 0 + max: 100 + - name: "user_engagement" + description: "How likely is this content to engage and retain readers?" + range: + min: 0 + max: 100 + - name: "brand_alignment" + description: "How well does the content align with brand voice and values?" + range: + min: 0 + max: 100 + model: gpt-4o + temperature: 0.3 + apiKey: '{{OPENAI_API_KEY}}' + connections: + success: content-optimization +``` + +## Output References + +After an evaluator block executes, you can reference its outputs: + +```yaml +# In subsequent blocks +next-block: + inputs: + evaluation: # Evaluation summary + scores: # Individual metric scores + overall: # Overall assessment +``` + +## Best Practices + +- Define clear, specific evaluation criteria +- Use appropriate scoring ranges for your use case +- Choose models with strong reasoning capabilities +- Use lower temperature for consistent scoring +- Include detailed metric descriptions +- Test with diverse content types +- Consider multiple evaluators for complex assessments \ No newline at end of file diff --git a/apps/docs/content/docs/yaml/blocks/function.mdx b/apps/docs/content/docs/yaml/blocks/function.mdx new file mode 100644 index 000000000..951f91b50 --- /dev/null +++ b/apps/docs/content/docs/yaml/blocks/function.mdx @@ -0,0 +1,162 @@ +--- +title: Function Block YAML Schema +description: YAML configuration reference for Function blocks +--- + +## Schema Definition + +```yaml +type: object +required: + - type + - name + - inputs +properties: + type: + type: string + enum: [function] + description: Block type identifier + name: + type: string + description: Display name for this function block + inputs: + type: object + required: + - code + properties: + code: + type: string + description: JavaScript/TypeScript code to execute (multiline string) + timeout: + type: number + description: Maximum execution time in milliseconds + default: 30000 + minimum: 1000 + maximum: 300000 + connections: + type: object + properties: + success: + type: string + description: Target block ID for successful execution + error: + type: string + description: Target block ID for error handling +``` + +## Connection Configuration + +Connections define where the workflow goes based on execution results: + +```yaml +connections: + success: # Target block ID for successful execution + error: # Target block ID for error handling (optional) +``` + +## Examples + +### Simple Validation + +```yaml +input-validator: + type: function + name: "Input Validator" + inputs: + code: |- + // Check if input number is greater than 5 + const inputValue = parseInt(, 10); + + if (inputValue > 5) { + return { + valid: true, + value: inputValue, + message: "Input is valid" + }; + } else { + return { + valid: false, + value: inputValue, + message: "Input must be greater than 5" + }; + } + connections: + success: next-step + error: handle-error +``` + +### Data Processing + +```yaml +data-processor: + type: function + name: "Data Transformer" + inputs: + code: | + // Transform the input data + const rawData = ; + + // Process and clean the data + const processed = rawData + .filter(item => item.status === 'active') + .map(item => ({ + id: item.id, + name: item.name.trim(), + date: new Date(item.created).toISOString() + })); + + return processed; + connections: + success: api-save + error: error-handler +``` + +### API Integration + +```yaml +api-formatter: + type: function + name: "Format API Request" + inputs: + code: | + // Prepare data for API submission + const userData = ; + + const apiPayload = { + timestamp: new Date().toISOString(), + data: userData, + source: "workflow-automation", + version: "1.0" + }; + + return apiPayload; + connections: + success: api-call +``` + +### Calculations + +```yaml +calculator: + type: function + name: "Calculate Results" + inputs: + code: | + // Perform calculations on input data + const numbers = ; + + const sum = numbers.reduce((a, b) => a + b, 0); + const average = sum / numbers.length; + const max = Math.max(...numbers); + const min = Math.min(...numbers); + + return { + sum, + average, + max, + min, + count: numbers.length + }; + connections: + success: results-display +``` \ No newline at end of file diff --git a/apps/docs/content/docs/yaml/blocks/index.mdx b/apps/docs/content/docs/yaml/blocks/index.mdx new file mode 100644 index 000000000..3a12cbab4 --- /dev/null +++ b/apps/docs/content/docs/yaml/blocks/index.mdx @@ -0,0 +1,151 @@ +--- +title: Block Schemas +description: Complete YAML schema reference for all Sim Studio blocks +--- + +import { Card, Cards } from "fumadocs-ui/components/card"; + +This section contains the complete YAML schema definitions for all available block types in Sim Studio. Each block type has specific configuration requirements and output formats. + +## Core Blocks + +These are the essential building blocks for creating workflows: + + + + Workflow entry point supporting manual triggers, webhooks, and schedules + + + AI-powered processing with LLM integration and tool support + + + Custom JavaScript/TypeScript code execution environment + + + Format and return final workflow results + + + +## Logic & Control Flow + +Blocks for implementing conditional logic and control flow: + + + + Conditional branching based on boolean expressions + + + AI-powered intelligent routing to multiple paths + + + Iterative processing with for and forEach loops + + + Concurrent execution across multiple instances + + + +## Integration Blocks + +Blocks for connecting to external services and systems: + + + + HTTP requests to external REST APIs + + + Webhook triggers for external integrations + + + +## Advanced Blocks + +Specialized blocks for complex workflow patterns: + + + + Validate outputs against defined criteria and metrics + + + Execute other workflows as reusable components + + + +## Common Schema Elements + +All blocks share these common elements: + +### Basic Structure + +```yaml +block-id: + type: + name: + inputs: + # Block-specific configuration + connections: + # Connection definitions +``` + +### Connection Types + +- **success**: Target block for successful execution +- **error**: Target block for error handling (optional) +- **conditions**: Multiple paths for conditional blocks + +### Environment Variables + +Use double curly braces for environment variables: + +```yaml +inputs: + apiKey: '{{API_KEY_NAME}}' + endpoint: '{{SERVICE_ENDPOINT}}' +``` + +### Block References + +Reference other block outputs using the block name in lowercase: + +```yaml +inputs: + userPrompt: + data: + originalInput: +``` + +## Validation Rules + +All YAML blocks are validated against their schemas: + +1. **Required fields**: Must be present +2. **Type validation**: Values must match expected types +3. **Enum validation**: String values must be from allowed lists +4. **Range validation**: Numbers must be within specified ranges +5. **Pattern validation**: Strings must match regex patterns (where applicable) + +## Quick Reference + +### Block Types and Properties + +| Block Type | Primary Output | Common Use Cases | +|------------|----------------|------------------| +| starter | `.input` | Workflow entry point | +| agent | `.content` | AI processing, text generation | +| function | `.output` | Data transformation, calculations | +| api | `.output` | External service integration | +| condition | N/A (branching) | Conditional logic | +| router | N/A (branching) | Intelligent routing | +| response | N/A (terminal) | Final output formatting | +| loop | `.results` | Iterative processing | +| parallel | `.results` | Concurrent processing | +| webhook | `.payload` | External triggers | +| evaluator | `.score` | Output validation, quality assessment | +| workflow | `.output` | Sub-workflow execution, modularity | + +### Required vs Optional + +- **Always required**: `type`, `name` +- **Usually required**: `inputs`, `connections` +- **Context dependent**: Specific input fields vary by block type +- **Always optional**: `error` connections, UI-specific fields \ No newline at end of file diff --git a/apps/docs/content/docs/yaml/blocks/loop.mdx b/apps/docs/content/docs/yaml/blocks/loop.mdx new file mode 100644 index 000000000..cad80a7b8 --- /dev/null +++ b/apps/docs/content/docs/yaml/blocks/loop.mdx @@ -0,0 +1,305 @@ +--- +title: Loop Block YAML Schema +description: YAML configuration reference for Loop blocks +--- + +## Schema Definition + +```yaml +type: object +required: + - type + - name + - inputs + - connections +properties: + type: + type: string + enum: [loop] + description: Block type identifier + name: + type: string + description: Display name for this loop block + inputs: + type: object + required: + - loopType + properties: + loopType: + type: string + enum: [for, forEach] + description: Type of loop to execute + iterations: + type: number + description: Number of iterations (for 'for' loops) + minimum: 1 + maximum: 1000 + collection: + type: string + description: Collection to iterate over (for 'forEach' loops) + maxConcurrency: + type: number + description: Maximum concurrent executions + default: 1 + minimum: 1 + maximum: 10 + connections: + type: object + required: + - loop + properties: + loop: + type: object + required: + - start + properties: + start: + type: string + description: Target block ID to execute inside the loop + end: + type: string + description: Target block ID for loop completion (optional) + success: + type: string + description: Target block ID after loop completion (alternative format) + error: + type: string + description: Target block ID for error handling +``` + +## Connection Configuration + +Loop blocks use a special connection format with a `loop` section: + +```yaml +connections: + loop: + start: # Target block ID to execute inside the loop + end: # Target block ID after loop completion (optional) + error: # Target block ID for error handling (optional) +``` + +Alternative format (legacy): +```yaml +connections: + success: # Target block ID after loop completion + error: # Target block ID for error handling (optional) +``` + +## Child Block Configuration + +Blocks inside a loop must have their `parentId` set to the loop block ID: + +```yaml +loop-1: + type: loop + name: "Process Items" + inputs: + loopType: forEach + collection: + connections: + loop: + start: process-item + end: final-results + +# Child block inside the loop +process-item: + type: agent + name: "Process Item" + parentId: loop-1 # References the loop block + inputs: + systemPrompt: "Process this item" + userPrompt: + model: gpt-4o + apiKey: '{{OPENAI_API_KEY}}' +``` + +## Examples + +### For Loop (Fixed Iterations) + +```yaml +countdown-loop: + type: loop + name: "Countdown Loop" + inputs: + loopType: for + iterations: 5 + connections: + loop: + start: countdown-agent + end: countdown-complete + +countdown-agent: + type: agent + name: "Countdown Agent" + parentId: countdown-loop + inputs: + systemPrompt: "Generate a countdown message" + userPrompt: "Count down from 5. Current number: " + model: gpt-4o + apiKey: '{{OPENAI_API_KEY}}' +``` + +### ForEach Loop (Collection Processing) + +```yaml +email-processor-loop: + type: loop + name: "Email Processor Loop" + inputs: + loopType: forEach + collection: + connections: + loop: + start: process-single-email + end: all-emails-processed + +process-single-email: + type: agent + name: "Process Single Email" + parentId: email-processor-loop + inputs: + systemPrompt: "Classify and respond to this email" + userPrompt: "Email content: " + model: gpt-4o + apiKey: '{{OPENAI_API_KEY}}' +``` + +### Complex Loop with Multiple Child Blocks + +```yaml +data-analysis-loop: + type: loop + name: "Data Analysis Loop" + inputs: + loopType: forEach + collection: + maxConcurrency: 3 + connections: + loop: + start: validate-record + end: generate-report + error: handle-loop-error + +validate-record: + type: function + name: "Validate Record" + parentId: data-analysis-loop + inputs: + code: | + const record = ; + const index = ; + + // Validate the record + if (!record.id || !record.data) { + throw new Error(`Invalid record at index ${index}`); + } + + return { + valid: true, + recordId: record.id, + processedAt: new Date().toISOString() + }; + connections: + success: analyze-record + error: record-error + +analyze-record: + type: agent + name: "Analyze Record" + parentId: data-analysis-loop + inputs: + systemPrompt: "Analyze this data record and extract insights" + userPrompt: | + Record ID: + Data: + Position in collection: + model: gpt-4o + apiKey: '{{OPENAI_API_KEY}}' + connections: + success: store-analysis + +store-analysis: + type: function + name: "Store Analysis" + parentId: data-analysis-loop + inputs: + code: | + const analysis = ; + const recordId = ; + + // Store analysis result + return { + recordId, + analysis, + completedAt: new Date().toISOString() + }; +``` + +### Concurrent Processing Loop + +```yaml +parallel-processing-loop: + type: loop + name: "Parallel Processing Loop" + inputs: + loopType: forEach + collection: + maxConcurrency: 5 + connections: + loop: + start: process-task + end: aggregate-results + +process-task: + type: api + name: "Process Task" + parentId: parallel-processing-loop + inputs: + url: "https://api.example.com/process" + method: POST + headers: + - key: "Authorization" + value: "Bearer {{API_TOKEN}}" + body: | + { + "taskId": "", + "data": "" + } + connections: + success: task-completed +``` + +## Loop Variables + +Inside loop child blocks, these special variables are available: + +```yaml +# Available in all child blocks of the loop + # Current iteration number (0-based) + # Current item being processed (forEach loops) + # Full collection (forEach loops) +``` + +## Output References + +After a loop completes, you can reference its aggregated results: + +```yaml +# In blocks after the loop +final-processor: + inputs: + all-results: # Array of all iteration results + total-count: # Number of iterations completed +``` + +## Best Practices + +- Set reasonable iteration limits to avoid long execution times +- Use forEach for collection processing, for loops for fixed iterations +- Consider using maxConcurrency for I/O bound operations +- Include error handling for robust loop execution +- Use descriptive names for loop child blocks +- Test with small collections first +- Monitor execution time for large collections \ No newline at end of file diff --git a/apps/docs/content/docs/yaml/blocks/meta.json b/apps/docs/content/docs/yaml/blocks/meta.json new file mode 100644 index 000000000..56beda389 --- /dev/null +++ b/apps/docs/content/docs/yaml/blocks/meta.json @@ -0,0 +1,17 @@ +{ + "title": "Block Schemas", + "pages": [ + "starter", + "agent", + "function", + "api", + "condition", + "router", + "evaluator", + "response", + "loop", + "parallel", + "webhook", + "workflow" + ] +} diff --git a/apps/docs/content/docs/yaml/blocks/parallel.mdx b/apps/docs/content/docs/yaml/blocks/parallel.mdx new file mode 100644 index 000000000..9700aa7c9 --- /dev/null +++ b/apps/docs/content/docs/yaml/blocks/parallel.mdx @@ -0,0 +1,322 @@ +--- +title: Parallel Block YAML Schema +description: YAML configuration reference for Parallel blocks +--- + +## Schema Definition + +```yaml +type: object +required: + - type + - name + - inputs + - connections +properties: + type: + type: string + enum: [parallel] + description: Block type identifier + name: + type: string + description: Display name for this parallel block + inputs: + type: object + required: + - parallelType + properties: + parallelType: + type: string + enum: [count, collection] + description: Type of parallel execution + count: + type: number + description: Number of parallel instances (for 'count' type) + minimum: 1 + maximum: 100 + collection: + type: string + description: Collection to distribute across instances (for 'collection' type) + maxConcurrency: + type: number + description: Maximum concurrent executions + default: 10 + minimum: 1 + maximum: 50 + connections: + type: object + required: + - parallel + properties: + parallel: + type: object + required: + - start + properties: + start: + type: string + description: Target block ID to execute inside each parallel instance + end: + type: string + description: Target block ID after all parallel instances complete (optional) + success: + type: string + description: Target block ID after all instances complete (alternative format) + error: + type: string + description: Target block ID for error handling +``` + +## Connection Configuration + +Parallel blocks use a special connection format with a `parallel` section: + +```yaml +connections: + parallel: + start: # Target block ID to execute inside each parallel instance + end: # Target block ID after all instances complete (optional) + error: # Target block ID for error handling (optional) +``` + +Alternative format (legacy): +```yaml +connections: + success: # Target block ID after all instances complete + error: # Target block ID for error handling (optional) +``` + +## Child Block Configuration + +Blocks inside a parallel block must have their `parentId` set to the parallel block ID: + +```yaml +parallel-1: + type: parallel + name: "Process Items" + inputs: + parallelType: collection + collection: + connections: + parallel: + start: process-item + end: aggregate-results + +# Child block inside the parallel +process-item: + type: agent + name: "Process Item" + parentId: parallel-1 # References the parallel block + inputs: + systemPrompt: "Process this item" + userPrompt: + model: gpt-4o + apiKey: '{{OPENAI_API_KEY}}' +``` + +## Examples + +### Count-Based Parallel Processing + +```yaml +worker-parallel: + type: parallel + name: "Worker Parallel" + inputs: + parallelType: count + count: 5 + maxConcurrency: 3 + connections: + parallel: + start: worker-task + end: collect-worker-results + +worker-task: + type: api + name: "Worker Task" + parentId: worker-parallel + inputs: + url: "https://api.worker.com/process" + method: POST + headers: + - key: "Authorization" + value: "Bearer {{WORKER_API_KEY}}" + body: | + { + "instanceId": , + "timestamp": "{{new Date().toISOString()}}" + } + connections: + success: worker-complete +``` + +### Collection-Based Parallel Processing + +```yaml +api-parallel: + type: parallel + name: "API Parallel" + inputs: + parallelType: collection + collection: + maxConcurrency: 10 + connections: + parallel: + start: call-api + end: merge-api-results + +call-api: + type: api + name: "Call API" + parentId: api-parallel + inputs: + url: + method: + headers: + - key: "Authorization" + value: "Bearer {{API_TOKEN}}" + connections: + success: api-complete +``` + +### Complex Parallel Processing Pipeline + +```yaml +data-processing-parallel: + type: parallel + name: "Data Processing Parallel" + inputs: + parallelType: collection + collection: + maxConcurrency: 8 + connections: + parallel: + start: validate-data + end: final-aggregation + error: parallel-error-handler + +validate-data: + type: function + name: "Validate Data" + parentId: data-processing-parallel + inputs: + code: | + const record = ; + const index = ; + + // Validate record structure + if (!record.id || !record.content) { + throw new Error(`Invalid record at index ${index}`); + } + + return { + valid: true, + recordId: record.id, + validatedAt: new Date().toISOString() + }; + connections: + success: process-data + error: validation-error + +process-data: + type: agent + name: "Process Data" + parentId: data-processing-parallel + inputs: + systemPrompt: "Process and analyze this data record" + userPrompt: | + Record ID: + Content: + Instance: + model: gpt-4o + temperature: 0.3 + apiKey: '{{OPENAI_API_KEY}}' + connections: + success: store-result + +store-result: + type: function + name: "Store Result" + parentId: data-processing-parallel + inputs: + code: | + const processed = ; + const recordId = ; + + return { + recordId, + processed, + completedAt: new Date().toISOString(), + instanceIndex: + }; +``` + +### Concurrent AI Analysis + +```yaml +multi-model-parallel: + type: parallel + name: "Multi-Model Analysis" + inputs: + parallelType: collection + collection: | + [ + {"model": "gpt-4o", "focus": "technical accuracy"}, + {"model": "claude-3-5-sonnet-20241022", "focus": "creative quality"}, + {"model": "gemini-2.0-flash-exp", "focus": "factual verification"} + ] + maxConcurrency: 3 + connections: + parallel: + start: analyze-content + end: combine-analyses + +analyze-content: + type: agent + name: "Analyze Content" + parentId: multi-model-parallel + inputs: + systemPrompt: | + You are analyzing content with a focus on . + Provide detailed analysis from this perspective. + userPrompt: | + Content to analyze: + Analysis focus: + model: + apiKey: '{{OPENAI_API_KEY}}' + connections: + success: analysis-complete +``` + +## Parallel Variables + +Inside parallel child blocks, these special variables are available: + +```yaml +# Available in all child blocks of the parallel + # Instance number (0-based) + # Item for this instance (collection type) + # Full collection (collection type) +``` + +## Output References + +After a parallel block completes, you can reference its aggregated results: + +```yaml +# In blocks after the parallel +final-processor: + inputs: + all-results: # Array of all instance results + total-count: # Number of instances completed +``` + +## Best Practices + +- Use appropriate maxConcurrency to avoid overwhelming APIs +- Ensure operations are independent and don't rely on each other +- Include error handling for robust parallel execution +- Test with small collections first +- Monitor rate limits for external APIs +- Use collection type for distributing work, count type for fixed instances +- Consider memory usage with large collections \ No newline at end of file diff --git a/apps/docs/content/docs/yaml/blocks/response.mdx b/apps/docs/content/docs/yaml/blocks/response.mdx new file mode 100644 index 000000000..46419f4f5 --- /dev/null +++ b/apps/docs/content/docs/yaml/blocks/response.mdx @@ -0,0 +1,140 @@ +--- +title: Response Block YAML Schema +description: YAML configuration reference for Response blocks +--- + +## Schema Definition + +```yaml +type: object +required: + - type + - name +properties: + type: + type: string + enum: [response] + description: Block type identifier + name: + type: string + description: Display name for this response block + inputs: + type: object + properties: + dataMode: + type: string + enum: [structured, json] + description: Mode for defining response data structure + default: structured + builderData: + type: object + description: Structured response data (when dataMode is 'structured') + data: + type: object + description: JSON response data (when dataMode is 'json') + status: + type: number + description: HTTP status code + default: 200 + minimum: 100 + maximum: 599 + headers: + type: array + description: Response headers as key-value pairs + items: + type: object + properties: + key: + type: string + description: Header name + value: + type: string + description: Header value +``` + +## Connection Configuration + +Response blocks are terminal blocks (no outgoing connections) and define the final output: + +```yaml +# No connections object needed - Response blocks are always terminal +``` + +## Examples + +### Simple Response + +```yaml +simple-response: + type: response + name: "Simple Response" + inputs: + data: + message: "Hello World" + timestamp: + status: 200 +``` + +### Success Response + +```yaml +success-response: + type: response + name: "Success Response" + inputs: + data: + success: true + user: + id: + name: + email: + created_at: + status: 201 + headers: + - key: "Location" + value: "/api/users/" + - key: "X-Created-By" + value: "workflow-engine" +``` + +### Error Response + +```yaml +error-response: + type: response + name: "Error Response" + inputs: + data: + error: true + message: + code: "VALIDATION_FAILED" + details: + status: 400 + headers: + - key: "X-Error-Code" + value: "VALIDATION_FAILED" +``` + +### Paginated Response + +```yaml +paginated-response: + type: response + name: "Paginated Response" + inputs: + data: + data: + pagination: + page: + per_page: + total: + total_pages: + status: 200 + headers: + - key: "X-Total-Count" + value: + - key: "Cache-Control" + value: "public, max-age=300" + - key: "Content-Type" + value: "application/json" +``` \ No newline at end of file diff --git a/apps/docs/content/docs/yaml/blocks/router.mdx b/apps/docs/content/docs/yaml/blocks/router.mdx new file mode 100644 index 000000000..2c225d429 --- /dev/null +++ b/apps/docs/content/docs/yaml/blocks/router.mdx @@ -0,0 +1,200 @@ +--- +title: Router Block YAML Schema +description: YAML configuration reference for Router blocks +--- + +## Schema Definition + +```yaml +type: object +required: + - type + - name + - inputs +properties: + type: + type: string + enum: [router] + description: Block type identifier + name: + type: string + description: Display name for this router block + inputs: + type: object + required: + - prompt + - model + - apiKey + properties: + prompt: + type: string + description: Instructions for routing decisions and criteria + model: + type: string + description: AI model identifier (e.g., gpt-4o, gemini-2.5-pro, deepseek-chat) + apiKey: + type: string + description: API key for the model provider (use {{ENV_VAR}} format) + temperature: + type: number + minimum: 0 + maximum: 2 + description: Model temperature for routing decisions + default: 0.3 + azureEndpoint: + type: string + description: Azure OpenAI endpoint URL (required for Azure models) + azureApiVersion: + type: string + description: Azure API version (required for Azure models) + connections: + type: object + description: Multiple connection paths for different routing outcomes + properties: + success: + type: array + items: + type: string + description: Array of target block IDs for routing destinations +``` + +## Connection Configuration + +Router blocks use a success array containing all possible routing destinations: + +```yaml +connections: + success: + - # Target block ID option 1 + - # Target block ID option 2 + - # Target block ID option 3 + # Additional target block IDs as needed +``` + +## Examples + +### Content Type Router + +```yaml +content-router: + type: router + name: "Content Type Router" + inputs: + prompt: | + Route this content based on its type: + - If it's a question, route to question-handler + - If it's a complaint, route to complaint-handler + - If it's feedback, route to feedback-handler + - If it's a request, route to request-handler + + Content: + model: gpt-4o + apiKey: '{{OPENAI_API_KEY}}' + connections: + success: + - question-handler + - complaint-handler + - feedback-handler + - request-handler +``` + +### Priority Router + +```yaml +priority-router: + type: router + name: "Priority Router" + inputs: + prompt: | + Analyze the urgency and route accordingly: + - urgent-queue: High priority, needs immediate attention + - standard-queue: Normal priority, standard processing + - low-queue: Low priority, can be delayed + + Email content: + + Route based on urgency indicators, deadlines, and tone. + model: gpt-4o + temperature: 0.2 + apiKey: '{{OPENAI_API_KEY}}' + connections: + success: + - urgent-queue + - standard-queue + - low-queue +``` + +### Department Router + +```yaml +department-router: + type: router + name: "Department Router" + inputs: + prompt: | + Route this customer inquiry to the appropriate department: + + - sales-team: Sales questions, pricing, demos + - support-team: Technical issues, bug reports, how-to questions + - billing-team: Payment issues, subscription changes, invoices + - general-team: General inquiries, feedback, other topics + + Customer message: + Customer type: + model: claude-3-5-sonnet-20241022 + apiKey: '{{ANTHROPIC_API_KEY}}' + connections: + success: + - sales-team + - support-team + - billing-team + - general-team +``` + +## Advanced Configuration + +### Multiple Models Router + +```yaml +model-selector-router: + type: router + name: "Model Selection Router" + inputs: + prompt: | + Based on the task complexity, route to the appropriate model: + - simple-gpt35: Simple questions, basic tasks + - advanced-gpt4: Complex analysis, detailed reasoning + - specialized-claude: Creative writing, nuanced analysis + + Task: + Complexity indicators: + model: gpt-4o-mini + temperature: 0.1 + apiKey: '{{OPENAI_API_KEY}}' + connections: + success: + - simple-gpt35 + - advanced-gpt4 + - specialized-claude +``` + +## Output References + +Router blocks don't produce direct outputs but control workflow path: + +```yaml +# Router decisions affect which subsequent blocks execute +# Access the routed block's outputs normally: +final-step: + inputs: + routed-result: +``` + +## Best Practices + +- Provide clear routing criteria in the prompt +- Use specific, descriptive target block names +- Include examples of content for each routing path +- Use lower temperature values for consistent routing +- Test with diverse input types to ensure accurate routing +- Consider fallback paths for edge cases \ No newline at end of file diff --git a/apps/docs/content/docs/yaml/blocks/starter.mdx b/apps/docs/content/docs/yaml/blocks/starter.mdx new file mode 100644 index 000000000..101cf9c3b --- /dev/null +++ b/apps/docs/content/docs/yaml/blocks/starter.mdx @@ -0,0 +1,183 @@ +--- +title: Starter Block YAML Schema +description: YAML configuration reference for Starter blocks +--- + +## Schema Definition + +```yaml +type: object +required: + - type + - name +properties: + type: + type: string + enum: [starter] + description: Block type identifier + name: + type: string + description: Display name for this starter block + inputs: + type: object + properties: + startWorkflow: + type: string + enum: [manual, webhook, schedule] + description: How the workflow should be triggered + default: manual + inputFormat: + type: array + description: Expected input structure for API calls (manual workflows) + items: + type: object + properties: + name: + type: string + description: Field name + type: + type: string + enum: [string, number, boolean, object, array] + description: Field type + scheduleType: + type: string + enum: [hourly, daily, weekly, monthly] + description: Schedule frequency (schedule workflows only) + hourlyMinute: + type: number + minimum: 0 + maximum: 59 + description: Minute of the hour to run (hourly schedules) + dailyTime: + type: string + pattern: "^([01]?[0-9]|2[0-3]):[0-5][0-9]$" + description: Time of day to run in HH:MM format (daily schedules) + weeklyDay: + type: string + enum: [MON, TUE, WED, THU, FRI, SAT, SUN] + description: Day of week to run (weekly schedules) + weeklyTime: + type: string + pattern: "^([01]?[0-9]|2[0-3]):[0-5][0-9]$" + description: Time of day to run in HH:MM format (weekly schedules) + monthlyDay: + type: number + minimum: 1 + maximum: 28 + description: Day of month to run (monthly schedules) + monthlyTime: + type: string + pattern: "^([01]?[0-9]|2[0-3]):[0-5][0-9]$" + description: Time of day to run in HH:MM format (monthly schedules) + timezone: + type: string + description: Timezone for scheduled workflows + default: UTC + webhookProvider: + type: string + enum: [slack, gmail, airtable, telegram, generic, whatsapp, github, discord, stripe] + description: Provider for webhook integration (webhook workflows only) + webhookConfig: + type: object + description: Provider-specific webhook configuration + connections: + type: object + properties: + success: + type: string + description: Target block ID to execute when workflow starts +``` + +## Connection Configuration + +The starter block only has a success connection since it's the entry point: + +```yaml +connections: + success: # Target block ID to execute when workflow starts +``` + +## Examples + +### Manual Start + +```yaml +start: + type: starter + name: Start + inputs: + startWorkflow: manual + connections: + success: next-block +``` + +### Manual Start with Input Format + +```yaml +start: + type: starter + name: Start + inputs: + startWorkflow: manual + inputFormat: + - name: query + type: string + - name: email + type: string + - name: age + type: number + - name: isActive + type: boolean + - name: preferences + type: object + - name: tags + type: array + connections: + success: agent-1 +``` + +### Daily Schedule + +```yaml +start: + type: starter + name: Start + inputs: + startWorkflow: schedule + scheduleType: daily + dailyTime: "09:00" + timezone: "America/New_York" + connections: + success: daily-task +``` + +### Weekly Schedule + +```yaml +start: + type: starter + name: Start + inputs: + startWorkflow: schedule + scheduleType: weekly + weeklyDay: MON + weeklyTime: "08:30" + timezone: UTC + connections: + success: weekly-report +``` + +### Webhook Trigger + +```yaml +start: + type: starter + name: Start + inputs: + startWorkflow: webhook + webhookProvider: slack + webhookConfig: + # Provider-specific configuration + connections: + success: process-webhook +``` \ No newline at end of file diff --git a/apps/docs/content/docs/yaml/blocks/webhook.mdx b/apps/docs/content/docs/yaml/blocks/webhook.mdx new file mode 100644 index 000000000..19d78e71f --- /dev/null +++ b/apps/docs/content/docs/yaml/blocks/webhook.mdx @@ -0,0 +1,278 @@ +--- +title: Webhook Block YAML Schema +description: YAML configuration reference for Webhook blocks +--- + +## Schema Definition + +```yaml +type: object +required: + - type + - name +properties: + type: + type: string + enum: [webhook] + description: Block type identifier + name: + type: string + description: Display name for this webhook block + inputs: + type: object + properties: + webhookConfig: + type: object + description: Webhook configuration settings + properties: + enabled: + type: boolean + description: Whether the webhook is active + default: true + secret: + type: string + description: Secret key for webhook verification + headers: + type: array + description: Expected headers for validation + items: + type: object + properties: + key: + type: string + description: Header name + value: + type: string + description: Expected header value + methods: + type: array + description: Allowed HTTP methods + items: + type: string + enum: [GET, POST, PUT, DELETE, PATCH] + default: [POST] + responseConfig: + type: object + description: Response configuration for the webhook + properties: + status: + type: number + description: HTTP status code to return + default: 200 + minimum: 100 + maximum: 599 + headers: + type: array + description: Response headers + items: + type: object + properties: + key: + type: string + description: Header name + value: + type: string + description: Header value + body: + type: string + description: Response body content + connections: + type: object + properties: + success: + type: string + description: Target block ID for successful webhook processing + error: + type: string + description: Target block ID for error handling +``` + +## Connection Configuration + +Connections define where the workflow goes based on webhook processing: + +```yaml +connections: + success: # Target block ID for successful processing + error: # Target block ID for error handling (optional) +``` + +## Examples + +### Basic Webhook Trigger + +```yaml +github-webhook: + type: webhook + name: "GitHub Webhook" + inputs: + webhookConfig: + enabled: true + secret: "{{GITHUB_WEBHOOK_SECRET}}" + methods: [POST] + headers: + - key: "X-GitHub-Event" + value: "push" + responseConfig: + status: 200 + body: | + { + "message": "Webhook received successfully", + "timestamp": "{{new Date().toISOString()}}" + } + connections: + success: process-github-event + error: webhook-error-handler +``` + +### Slack Event Webhook + +```yaml +slack-events: + type: webhook + name: "Slack Events" + inputs: + webhookConfig: + enabled: true + secret: "{{SLACK_SIGNING_SECRET}}" + methods: [POST] + headers: + - key: "Content-Type" + value: "application/json" + responseConfig: + status: 200 + headers: + - key: "Content-Type" + value: "application/json" + body: | + { + "challenge": "" + } + connections: + success: handle-slack-event +``` + +### Payment Webhook (Stripe) + +```yaml +stripe-webhook: + type: webhook + name: "Stripe Payment Webhook" + inputs: + webhookConfig: + enabled: true + secret: "{{STRIPE_WEBHOOK_SECRET}}" + methods: [POST] + headers: + - key: "Stripe-Signature" + value: "*" + responseConfig: + status: 200 + headers: + - key: "Content-Type" + value: "application/json" + body: | + { + "received": true + } + connections: + success: process-payment-event + error: payment-webhook-error +``` + +### Generic API Webhook + +```yaml +api-webhook: + type: webhook + name: "API Webhook" + inputs: + webhookConfig: + enabled: true + methods: [POST, PUT] + headers: + - key: "Authorization" + value: "Bearer {{WEBHOOK_API_KEY}}" + - key: "Content-Type" + value: "application/json" + responseConfig: + status: 202 + headers: + - key: "Content-Type" + value: "application/json" + - key: "X-Processed-By" + value: "Sim Studio" + body: | + { + "status": "accepted", + "id": "{{Math.random().toString(36).substr(2, 9)}}", + "received_at": "{{new Date().toISOString()}}" + } + connections: + success: process-webhook-data +``` + +### Multi-Method Webhook + +```yaml +crud-webhook: + type: webhook + name: "CRUD Webhook" + inputs: + webhookConfig: + enabled: true + methods: [GET, POST, PUT, DELETE] + headers: + - key: "X-API-Key" + value: "{{CRUD_API_KEY}}" + responseConfig: + status: 200 + headers: + - key: "Content-Type" + value: "application/json" + body: | + { + "method": "", + "processed": true, + "timestamp": "{{new Date().toISOString()}}" + } + connections: + success: route-by-method +``` + +## Webhook Variables + +Inside webhook-triggered workflows, these special variables are available: + +```yaml +# Available in blocks after the webhook + # Full request payload/body + # Request headers + # HTTP method used + # Query parameters + # Request path + # Challenge parameter (for verification) +``` + +## Output References + +After a webhook processes a request, you can reference its data: + +```yaml +# In subsequent blocks +process-webhook: + inputs: + payload: # Request payload + headers: # Request headers + method: # HTTP method +``` + +## Security Best Practices + +- Always use webhook secrets for verification +- Validate expected headers and methods +- Implement proper error handling +- Use HTTPS endpoints in production +- Monitor webhook activity and failures +- Set appropriate response timeouts +- Validate payload structure before processing \ No newline at end of file diff --git a/apps/docs/content/docs/yaml/blocks/workflow.mdx b/apps/docs/content/docs/yaml/blocks/workflow.mdx new file mode 100644 index 000000000..bfdfbbc1c --- /dev/null +++ b/apps/docs/content/docs/yaml/blocks/workflow.mdx @@ -0,0 +1,299 @@ +--- +title: Workflow Block YAML Schema +description: YAML configuration reference for Workflow blocks +--- + +## Schema Definition + +```yaml +type: object +required: + - type + - name + - inputs +properties: + type: + type: string + enum: [workflow] + description: Block type identifier + name: + type: string + description: Display name for this workflow block + inputs: + type: object + required: + - workflowId + properties: + workflowId: + type: string + description: ID of the workflow to execute + inputMapping: + type: object + description: Map current workflow data to sub-workflow inputs + additionalProperties: + type: string + description: Input value or reference to parent workflow data + environmentVariables: + type: object + description: Environment variables to pass to sub-workflow + additionalProperties: + type: string + description: Environment variable value + timeout: + type: number + description: Maximum execution time in milliseconds + default: 300000 + minimum: 1000 + maximum: 1800000 + connections: + type: object + properties: + success: + type: string + description: Target block ID for successful workflow completion + error: + type: string + description: Target block ID for error handling +``` + +## Connection Configuration + +Connections define where the workflow goes based on sub-workflow results: + +```yaml +connections: + success: # Target block ID for successful completion + error: # Target block ID for error handling (optional) +``` + +## Examples + +### Simple Workflow Execution + +```yaml +data-processor: + type: workflow + name: "Data Processing Workflow" + inputs: + workflowId: "data-processing-v2" + inputMapping: + rawData: + userId: + environmentVariables: + PROCESSING_MODE: "production" + LOG_LEVEL: "info" + connections: + success: process-results + error: workflow-error-handler +``` + +### Content Generation Pipeline + +```yaml +content-generator: + type: workflow + name: "Content Generation Pipeline" + inputs: + workflowId: "content-generation-v3" + inputMapping: + topic: + style: + targetAudience: + brandGuidelines: + environmentVariables: + CONTENT_API_KEY: "{{CONTENT_API_KEY}}" + QUALITY_THRESHOLD: "high" + timeout: 120000 + connections: + success: review-content + error: content-generation-failed +``` + +### Multi-Step Analysis Workflow + +```yaml +analysis-workflow: + type: workflow + name: "Analysis Workflow" + inputs: + workflowId: "comprehensive-analysis" + inputMapping: + document: + analysisType: "comprehensive" + includeMetrics: true + outputFormat: "structured" + environmentVariables: + ANALYSIS_MODEL: "gpt-4o" + OPENAI_API_KEY: "{{OPENAI_API_KEY}}" + CLAUDE_API_KEY: "{{CLAUDE_API_KEY}}" + connections: + success: compile-analysis-report + error: analysis-workflow-error +``` + +### Conditional Workflow Execution + +```yaml +customer-workflow-router: + type: condition + name: "Customer Workflow Router" + inputs: + conditions: + if: === "enterprise" + else-if: === "premium" + else: true + connections: + conditions: + if: enterprise-workflow + else-if: premium-workflow + else: standard-workflow + +enterprise-workflow: + type: workflow + name: "Enterprise Customer Workflow" + inputs: + workflowId: "enterprise-customer-processing" + inputMapping: + customerData: + accountManager: + tier: "enterprise" + environmentVariables: + PRIORITY_LEVEL: "high" + SLA_REQUIREMENTS: "strict" + connections: + success: enterprise-complete + +premium-workflow: + type: workflow + name: "Premium Customer Workflow" + inputs: + workflowId: "premium-customer-processing" + inputMapping: + customerData: + supportLevel: "premium" + environmentVariables: + PRIORITY_LEVEL: "medium" + connections: + success: premium-complete + +standard-workflow: + type: workflow + name: "Standard Customer Workflow" + inputs: + workflowId: "standard-customer-processing" + inputMapping: + customerData: + environmentVariables: + PRIORITY_LEVEL: "standard" + connections: + success: standard-complete +``` + +### Parallel Workflow Execution + +```yaml +parallel-workflows: + type: parallel + name: "Parallel Workflow Processing" + inputs: + parallelType: collection + collection: | + [ + {"workflowId": "sentiment-analysis", "focus": "sentiment"}, + {"workflowId": "topic-extraction", "focus": "topics"}, + {"workflowId": "entity-recognition", "focus": "entities"} + ] + connections: + success: merge-workflow-results + +execute-analysis-workflow: + type: workflow + name: "Execute Analysis Workflow" + parentId: parallel-workflows + inputs: + workflowId: + inputMapping: + content: + analysisType: + environmentVariables: + ANALYSIS_API_KEY: "{{ANALYSIS_API_KEY}}" + connections: + success: workflow-complete +``` + +### Error Handling Workflow + +```yaml +main-workflow: + type: workflow + name: "Main Processing Workflow" + inputs: + workflowId: "main-processing-v1" + inputMapping: + data: + timeout: 180000 + connections: + success: main-complete + error: error-recovery-workflow + +error-recovery-workflow: + type: workflow + name: "Error Recovery Workflow" + inputs: + workflowId: "error-recovery-v1" + inputMapping: + originalInput: + errorDetails: + failureTimestamp: "{{new Date().toISOString()}}" + environmentVariables: + RECOVERY_MODE: "automatic" + FALLBACK_ENABLED: "true" + connections: + success: recovery-complete + error: manual-intervention-required +``` + +## Input Mapping + +Map data from the parent workflow to the sub-workflow: + +```yaml +inputMapping: + # Static values + mode: "production" + version: "1.0" + + # References to parent workflow data + userData: + settings: + + # Complex object mapping + requestData: + id: + timestamp: "{{new Date().toISOString()}}" + source: "parent-workflow" +``` + +## Output References + +After a workflow block completes, you can reference its outputs: + +```yaml +# In subsequent blocks +next-block: + inputs: + workflowResult: # Sub-workflow output + executionTime: # Execution duration + status: # Execution status +``` + +## Best Practices + +- Use descriptive workflow IDs for clarity +- Map only necessary data to sub-workflows +- Set appropriate timeouts for workflow complexity +- Include error handling for robust execution +- Pass environment variables securely +- Test sub-workflows independently first +- Monitor nested workflow performance +- Use versioned workflow IDs for stability \ No newline at end of file diff --git a/apps/docs/content/docs/yaml/examples.mdx b/apps/docs/content/docs/yaml/examples.mdx new file mode 100644 index 000000000..b80718521 --- /dev/null +++ b/apps/docs/content/docs/yaml/examples.mdx @@ -0,0 +1,273 @@ +--- +title: YAML Workflow Examples +description: Examples of complete YAML workflows +--- + +import { Tab, Tabs } from 'fumadocs-ui/components/tabs' + +## Multi-Agent Chain Workflow + +A workflow where multiple AI agents process information sequentially: + +```yaml +version: '1.0' +blocks: + start: + type: starter + name: Start + inputs: + startWorkflow: manual + connections: + success: agent-1-initiator + + agent-1-initiator: + type: agent + name: Agent 1 Initiator + inputs: + systemPrompt: You are the first agent in a chain. Your role is to analyze the input and create an initial response that will be passed to the next agent. + userPrompt: |- + Welcome! I'm the first agent in our chain. + + Input to process: + + Please create an initial analysis or greeting that the next agent can build upon. Be creative and set a positive tone for the chain! + model: gpt-4o + temperature: 0.7 + apiKey: '{{OPENAI_API_KEY}}' + connections: + success: agent-2-enhancer + + agent-2-enhancer: + type: agent + name: Agent 2 Enhancer + inputs: + systemPrompt: You are the second agent in a chain. Take the output from Agent 1 and enhance it with additional insights or improvements. + userPrompt: |- + I'm the second agent! Here's what Agent 1 provided: + + + + Now I'll enhance this with additional details, insights, or improvements. Let me build upon their work! + model: gpt-4o + temperature: 0.7 + apiKey: '{{OPENAI_API_KEY}}' + connections: + success: agent-3-refiner + + agent-3-refiner: + type: agent + name: Agent 3 Refiner + inputs: + systemPrompt: You are the third agent in a chain. Take the enhanced output from Agent 2 and refine it further, adding structure or organization. + userPrompt: |- + I'm the third agent in our chain! Here's the enhanced work from Agent 2: + + + + My job is to refine and organize this content. I'll add structure, clarity, and polish to make it even better! + model: gpt-4o + temperature: 0.6 + apiKey: '{{OPENAI_API_KEY}}' + connections: + success: agent-4-finalizer + + agent-4-finalizer: + type: agent + name: Agent 4 Finalizer + inputs: + systemPrompt: You are the final agent in a chain of 4. Create a comprehensive summary and conclusion based on all the previous agents' work. + userPrompt: |- + I'm the final agent! Here's the refined work from Agent 3: + + + + As the last agent in our chain, I'll create a final, polished summary that brings together all the work from our team of 4 agents. Let me conclude this beautifully! + model: gpt-4o + temperature: 0.5 + apiKey: '{{OPENAI_API_KEY}}' +``` + +## Router-Based Conditional Workflow + +A workflow that uses routing logic to send data to different agents based on conditions: + +```yaml +version: '1.0' +blocks: + start: + type: starter + name: Start + inputs: + startWorkflow: manual + connections: + success: router-1 + + router-1: + type: router + name: Router 1 + inputs: + prompt: go to agent 1 if is greater than 5. else agent 2 if greater than 10. else agent 3 + model: gpt-4o + apiKey: '{{OPENAI_API_KEY}}' + connections: + success: + - agent-1 + - agent-2 + - agent-3 + + agent-1: + type: agent + name: Agent 1 + inputs: + systemPrompt: say 1 + model: gpt-4o + apiKey: '{{OPENAI_API_KEY}}' + + agent-2: + type: agent + name: Agent 2 + inputs: + systemPrompt: say 2 + model: gpt-4o + apiKey: '{{OPENAI_API_KEY}}' + + agent-3: + type: agent + name: Agent 3 + inputs: + systemPrompt: say 3 + model: gpt-4o + apiKey: '{{OPENAI_API_KEY}}' +``` + +## Web Search with Structured Output + +A workflow that searches the web using tools and returns structured data: + +```yaml +version: '1.0' +blocks: + 59eb07c1-1411-4b28-a274-fa78f55daf72: + type: starter + name: Start + inputs: + startWorkflow: manual + connections: + success: d77c2c98-56c4-432d-9338-9bac54a2d42f + d77c2c98-56c4-432d-9338-9bac54a2d42f: + type: agent + name: Agent 1 + inputs: + systemPrompt: look up the user input. use structured output + userPrompt: + model: claude-sonnet-4-0 + apiKey: '{{ANTHROPIC_API_KEY}}' + tools: + - type: exa + title: Exa + params: + type: auto + apiKey: '{{EXA_API_KEY}}' + numResults: '' + toolId: exa_search + operation: exa_search + isExpanded: true + usageControl: auto + responseFormat: |- + { + "name": "output_schema", + "description": "Defines the structure for an output object.", + "strict": true, + "schema": { + "type": "object", + "properties": { + "output": { + "type": "string", + "description": "The output value" + } + }, + "additionalProperties": false, + "required": ["output"] + } + } +``` + +## Loop Processing with Collection + +A workflow that processes each item in a collection using a loop: + +```yaml +version: '1.0' +blocks: + start: + type: starter + name: Start + inputs: + startWorkflow: manual + connections: + success: food-analysis-loop + food-analysis-loop: + type: loop + name: Food Analysis Loop + inputs: + count: 5 + loopType: forEach + collection: '["apple", "banana", "carrot"]' + connections: + loop: + start: calorie-agent + calorie-agent: + type: agent + name: Calorie Analyzer + inputs: + systemPrompt: Return the number of calories in the food + userPrompt: + model: claude-sonnet-4-0 + apiKey: '{{ANTHROPIC_API_KEY}}' + parentId: food-analysis-loop +``` + +## Email Classification and Response + +A workflow that classifies emails and generates appropriate responses: + +```yaml +version: '1.0' +blocks: + start: + type: starter + name: Start + inputs: + startWorkflow: manual + connections: + success: email-classifier + + email-classifier: + type: agent + name: Email Classifier + inputs: + systemPrompt: Classify emails into categories and extract key information. + userPrompt: | + Classify this email: + + Categories: support, billing, sales, feedback + Extract: urgency level, customer sentiment, main request + model: gpt-4o + apiKey: '{{OPENAI_API_KEY}}' + connections: + success: response-generator + + response-generator: + type: agent + name: Response Generator + inputs: + systemPrompt: Generate appropriate responses based on email classification. + userPrompt: | + Email classification: + Original email: + + Generate a professional, helpful response addressing the customer's needs. + model: gpt-4o + temperature: 0.7 + apiKey: '{{OPENAI_API_KEY}}' +``` diff --git a/apps/docs/content/docs/yaml/index.mdx b/apps/docs/content/docs/yaml/index.mdx new file mode 100644 index 000000000..236723913 --- /dev/null +++ b/apps/docs/content/docs/yaml/index.mdx @@ -0,0 +1,159 @@ +--- +title: YAML Workflow Reference +description: Complete guide to writing YAML workflows in Sim Studio +--- + +import { Card, Cards } from "fumadocs-ui/components/card"; +import { Step, Steps } from "fumadocs-ui/components/steps"; +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +YAML workflows provide a powerful way to define, version, and share workflow configurations in Sim Studio. This reference guide covers the complete YAML syntax, block schemas, and best practices for creating robust workflows. + +## Quick Start + +Every Sim Studio workflow follows this basic structure: + +```yaml +version: '1.0' +blocks: + start: + type: starter + name: Start + inputs: + startWorkflow: manual + connections: + success: agent-1 + + agent-1: + type: agent + name: "AI Assistant" + inputs: + systemPrompt: "You are a helpful assistant." + userPrompt: 'Hi' + model: gpt-4o + apiKey: '{{OPENAI_API_KEY}}' +``` + +## Core Concepts + + + + Version Declaration: Must be exactly `version: '1.0'` (with quotes) + + + Blocks Structure: All workflow blocks are defined under the `blocks` key + + + Block References: Use block names in lowercase with spaces removed (e.g., ``) + + + Environment Variables: Reference with double curly braces `{{VARIABLE_NAME}}` + + + +## Block Types + +Sim Studio supports several core block types, each with specific YAML schemas: + + + + Workflow entry point with support for manual, webhook, and scheduled triggers + + + AI-powered processing with support for tools and structured output + + + Custom JavaScript/TypeScript code execution + + + HTTP requests to external services + + + Conditional branching based on boolean expressions + + + AI-powered intelligent routing to multiple paths + + + Iterative processing with for and forEach loops + + + Concurrent execution across multiple instances + + + Webhook triggers for external integrations + + + Validate outputs against defined criteria and metrics + + + Execute other workflows as reusable components + + + Final workflow output formatting + + + +## Block Reference Syntax + +The most critical aspect of YAML workflows is understanding how to reference data between blocks: + +### Basic Rules + +1. **Use the block name** (not the block ID) converted to lowercase with spaces removed +2. **Add the appropriate property** (.content for agents, .output for tools) +3. **When using chat, reference the starter block** as `` + +### Examples + +```yaml +# Block definitions +email-processor: + type: agent + name: "Email Agent" + # ... configuration + +data-formatter: + type: function + name: "Data Agent" + # ... configuration + +# Referencing their outputs +next-block: + type: agent + name: "Next Step" + inputs: + userPrompt: | + Process this email: + Use this formatted data: + Original input: +``` + +### Special Cases + +- **Loop Variables**: ``, ``, `` +- **Parallel Variables**: ``, `` + +## Environment Variables + +Use environment variables for sensitive data like API keys: + +```yaml +inputs: + apiKey: '{{OPENAI_API_KEY}}' + database: '{{DATABASE_URL}}' + token: '{{SLACK_BOT_TOKEN}}' +``` + +## Best Practices + +- **Keep block names human-readable**: "Email Processor" for UI display +- **Reference environment variables**: Never hardcode API keys +- **Structure for readability**: Group related blocks logically +- **Test incrementally**: Build workflows step by step + +## Next Steps + +- [Block Reference Syntax](/yaml/block-reference) - Detailed reference rules +- [Complete Block Schemas](/yaml/blocks) - All available block types +- [Workflow Examples](/yaml/examples) - Real-world workflow patterns \ No newline at end of file diff --git a/apps/docs/content/docs/yaml/meta.json b/apps/docs/content/docs/yaml/meta.json new file mode 100644 index 000000000..79321b1ea --- /dev/null +++ b/apps/docs/content/docs/yaml/meta.json @@ -0,0 +1,4 @@ +{ + "title": "YAML Reference", + "pages": ["index", "block-reference", "blocks", "examples"] +} diff --git a/apps/sim/app/api/copilot/checkpoints/[id]/revert/route.ts b/apps/sim/app/api/copilot/checkpoints/[id]/revert/route.ts new file mode 100644 index 000000000..3c307ef5f --- /dev/null +++ b/apps/sim/app/api/copilot/checkpoints/[id]/revert/route.ts @@ -0,0 +1,138 @@ +import { and, eq } from 'drizzle-orm' +import { type NextRequest, NextResponse } from 'next/server' +import { getSession } from '@/lib/auth' +import { createLogger } from '@/lib/logs/console-logger' +import { db } from '@/db' +import { copilotCheckpoints, workflow as workflowTable } from '@/db/schema' + +const logger = createLogger('RevertCheckpointAPI') + +/** + * POST /api/copilot/checkpoints/[id]/revert + * Revert workflow to a specific checkpoint + */ +export async function POST(request: NextRequest, { params }: { params: Promise<{ id: string }> }) { + const requestId = crypto.randomUUID().slice(0, 8) + const checkpointId = (await params).id + + try { + const session = await getSession() + if (!session?.user?.id) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) + } + + logger.info(`[${requestId}] Reverting to checkpoint: ${checkpointId}`, { + userId: session.user.id, + }) + + // Get the checkpoint + const checkpoint = await db + .select() + .from(copilotCheckpoints) + .where( + and(eq(copilotCheckpoints.id, checkpointId), eq(copilotCheckpoints.userId, session.user.id)) + ) + .limit(1) + + if (!checkpoint.length) { + return NextResponse.json({ error: 'Checkpoint not found' }, { status: 404 }) + } + + const checkpointData = checkpoint[0] + const { workflowId, yaml: yamlContent } = checkpointData + + logger.info(`[${requestId}] Processing checkpoint revert`, { + workflowId, + yamlLength: yamlContent.length, + }) + + // Use the consolidated YAML endpoint instead of duplicating the processing logic + const yamlEndpointUrl = `${process.env.NEXT_PUBLIC_BASE_URL || 'http://localhost:3000'}/api/workflows/${workflowId}/yaml` + + const yamlResponse = await fetch(yamlEndpointUrl, { + method: 'PUT', + headers: { + 'Content-Type': 'application/json', + // Forward auth cookies from the original request + Cookie: request.headers.get('Cookie') || '', + }, + body: JSON.stringify({ + yamlContent, + description: `Reverted to checkpoint from ${new Date(checkpointData.createdAt).toLocaleString()}`, + source: 'checkpoint_revert', + applyAutoLayout: true, + createCheckpoint: false, // Don't create a checkpoint when reverting to one + }), + }) + + if (!yamlResponse.ok) { + const errorData = await yamlResponse.json() + logger.error(`[${requestId}] Consolidated YAML endpoint failed:`, errorData) + return NextResponse.json( + { + success: false, + error: 'Failed to revert checkpoint via YAML endpoint', + details: errorData.errors || [errorData.error || 'Unknown error'], + }, + { status: yamlResponse.status } + ) + } + + const yamlResult = await yamlResponse.json() + + if (!yamlResult.success) { + logger.error(`[${requestId}] YAML endpoint returned failure:`, yamlResult) + return NextResponse.json( + { + success: false, + error: 'Failed to process checkpoint YAML', + details: yamlResult.errors || ['Unknown error'], + }, + { status: 400 } + ) + } + + // Update workflow's lastSynced timestamp + await db + .update(workflowTable) + .set({ + lastSynced: new Date(), + updatedAt: new Date(), + }) + .where(eq(workflowTable.id, workflowId)) + + // Notify the socket server to tell clients to rehydrate stores from database + try { + const socketUrl = process.env.SOCKET_URL || 'http://localhost:3002' + await fetch(`${socketUrl}/api/copilot-workflow-edit`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + workflowId, + description: `Reverted to checkpoint from ${new Date(checkpointData.createdAt).toLocaleString()}`, + }), + }) + logger.info(`[${requestId}] Notified socket server of checkpoint revert`) + } catch (socketError) { + logger.warn(`[${requestId}] Failed to notify socket server:`, socketError) + } + + logger.info(`[${requestId}] Successfully reverted to checkpoint`) + + return NextResponse.json({ + success: true, + message: `Successfully reverted to checkpoint from ${new Date(checkpointData.createdAt).toLocaleString()}`, + summary: yamlResult.summary || `Restored workflow from checkpoint.`, + warnings: yamlResult.warnings || [], + data: yamlResult.data, + }) + } catch (error) { + logger.error(`[${requestId}] Error reverting checkpoint:`, error) + return NextResponse.json( + { + error: `Failed to revert checkpoint: ${error instanceof Error ? error.message : 'Unknown error'}`, + }, + { status: 500 } + ) + } +} diff --git a/apps/sim/app/api/copilot/checkpoints/route.ts b/apps/sim/app/api/copilot/checkpoints/route.ts new file mode 100644 index 000000000..2f7d97f96 --- /dev/null +++ b/apps/sim/app/api/copilot/checkpoints/route.ts @@ -0,0 +1,64 @@ +import { and, desc, eq } from 'drizzle-orm' +import { type NextRequest, NextResponse } from 'next/server' +import { getSession } from '@/lib/auth' +import { createLogger } from '@/lib/logs/console-logger' +import { db } from '@/db' +import { copilotCheckpoints } from '@/db/schema' + +const logger = createLogger('CopilotCheckpointsAPI') + +/** + * GET /api/copilot/checkpoints + * List checkpoints for a specific chat + */ +export async function GET(request: NextRequest) { + const requestId = crypto.randomUUID() + + try { + const session = await getSession() + if (!session?.user?.id) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) + } + + const { searchParams } = new URL(request.url) + const chatId = searchParams.get('chatId') + const limit = Number(searchParams.get('limit')) || 10 + const offset = Number(searchParams.get('offset')) || 0 + + if (!chatId) { + return NextResponse.json({ error: 'chatId is required' }, { status: 400 }) + } + + logger.info(`[${requestId}] Listing checkpoints for chat: ${chatId}`, { + userId: session.user.id, + limit, + offset, + }) + + const checkpoints = await db + .select() + .from(copilotCheckpoints) + .where( + and(eq(copilotCheckpoints.userId, session.user.id), eq(copilotCheckpoints.chatId, chatId)) + ) + .orderBy(desc(copilotCheckpoints.createdAt)) + .limit(limit) + .offset(offset) + + // Format timestamps to ISO strings for consistent timezone handling + const formattedCheckpoints = checkpoints.map((checkpoint) => ({ + id: checkpoint.id, + userId: checkpoint.userId, + workflowId: checkpoint.workflowId, + chatId: checkpoint.chatId, + yaml: checkpoint.yaml, + createdAt: checkpoint.createdAt.toISOString(), + updatedAt: checkpoint.updatedAt.toISOString(), + })) + + return NextResponse.json({ checkpoints: formattedCheckpoints }) + } catch (error) { + logger.error(`[${requestId}] Error listing checkpoints:`, error) + return NextResponse.json({ error: 'Failed to list checkpoints' }, { status: 500 }) + } +} diff --git a/apps/sim/app/api/copilot/docs/route.ts b/apps/sim/app/api/copilot/docs/route.ts deleted file mode 100644 index a037d8029..000000000 --- a/apps/sim/app/api/copilot/docs/route.ts +++ /dev/null @@ -1,281 +0,0 @@ -import { type NextRequest, NextResponse } from 'next/server' -import { z } from 'zod' -import { getSession } from '@/lib/auth' -import { - type CopilotChat, - type CopilotMessage, - createChat, - generateChatTitle, - generateDocsResponse, - getChat, - updateChat, -} from '@/lib/copilot/service' -import { createLogger } from '@/lib/logs/console-logger' - -const logger = createLogger('CopilotDocsAPI') - -// Schema for docs queries -const DocsQuerySchema = z.object({ - query: z.string().min(1, 'Query is required'), - topK: z.number().min(1).max(20).default(5), - provider: z.string().optional(), - model: z.string().optional(), - stream: z.boolean().optional().default(false), - chatId: z.string().optional(), - workflowId: z.string().optional(), - createNewChat: z.boolean().optional().default(false), -}) - -/** - * POST /api/copilot/docs - * Ask questions about documentation using RAG - */ -export async function POST(req: NextRequest) { - const requestId = crypto.randomUUID() - - try { - const session = await getSession() - if (!session?.user?.id) { - return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) - } - - const body = await req.json() - const { query, topK, provider, model, stream, chatId, workflowId, createNewChat } = - DocsQuerySchema.parse(body) - - logger.info(`[${requestId}] Docs RAG query: "${query}"`, { - provider, - model, - topK, - chatId, - workflowId, - createNewChat, - userId: session.user.id, - }) - - // Handle chat context - let currentChat: CopilotChat | null = null - let conversationHistory: CopilotMessage[] = [] - - if (chatId) { - // Load existing chat - currentChat = await getChat(chatId, session.user.id) - if (currentChat) { - conversationHistory = currentChat.messages - } - } else if (createNewChat && workflowId) { - // Create new chat - currentChat = await createChat(session.user.id, workflowId) - } - - // Generate docs response - const result = await generateDocsResponse(query, conversationHistory, { - topK, - provider, - model, - stream, - workflowId, - requestId, - }) - - if (stream && result.response instanceof ReadableStream) { - // Handle streaming response with docs sources - logger.info(`[${requestId}] Returning streaming docs response`) - - const encoder = new TextEncoder() - - return new Response( - new ReadableStream({ - async start(controller) { - const reader = (result.response as ReadableStream).getReader() - let accumulatedResponse = '' - - try { - // Send initial metadata including sources - const metadata = { - type: 'metadata', - chatId: currentChat?.id, - sources: result.sources, - citations: result.sources.map((source, index) => ({ - id: index + 1, - title: source.title, - url: source.url, - })), - metadata: { - requestId, - chunksFound: result.sources.length, - query, - topSimilarity: result.sources[0]?.similarity, - provider, - model, - }, - } - controller.enqueue(encoder.encode(`data: ${JSON.stringify(metadata)}\n\n`)) - - while (true) { - const { done, value } = await reader.read() - if (done) break - - const chunk = new TextDecoder().decode(value) - // Clean up any object serialization artifacts in streaming content - const cleanedChunk = chunk.replace(/\[object Object\],?/g, '') - accumulatedResponse += cleanedChunk - - const contentChunk = { - type: 'content', - content: cleanedChunk, - } - controller.enqueue(encoder.encode(`data: ${JSON.stringify(contentChunk)}\n\n`)) - } - - // Send completion marker first to unblock the user - controller.enqueue(encoder.encode(`data: {"type":"done"}\n\n`)) - - // Save conversation to database asynchronously (non-blocking) - if (currentChat) { - // Fire-and-forget database save to avoid blocking stream completion - Promise.resolve() - .then(async () => { - try { - const userMessage: CopilotMessage = { - id: crypto.randomUUID(), - role: 'user', - content: query, - timestamp: new Date().toISOString(), - } - - const assistantMessage: CopilotMessage = { - id: crypto.randomUUID(), - role: 'assistant', - content: accumulatedResponse, - timestamp: new Date().toISOString(), - citations: result.sources.map((source, index) => ({ - id: index + 1, - title: source.title, - url: source.url, - })), - } - - const updatedMessages = [ - ...conversationHistory, - userMessage, - assistantMessage, - ] - - // Generate title if this is the first message - let updatedTitle = currentChat.title ?? undefined - if (!updatedTitle && conversationHistory.length === 0) { - updatedTitle = await generateChatTitle(query) - } - - // Update the chat in database - await updateChat(currentChat.id, session.user.id, { - title: updatedTitle, - messages: updatedMessages, - }) - - logger.info( - `[${requestId}] Updated chat ${currentChat.id} with new docs messages` - ) - } catch (dbError) { - logger.error(`[${requestId}] Failed to save chat to database:`, dbError) - // Database errors don't affect the user's streaming experience - } - }) - .catch((error) => { - logger.error(`[${requestId}] Unexpected error in async database save:`, error) - }) - } - } catch (error) { - logger.error(`[${requestId}] Docs streaming error:`, error) - try { - const errorChunk = { - type: 'error', - error: 'Streaming failed', - } - controller.enqueue(encoder.encode(`data: ${JSON.stringify(errorChunk)}\n\n`)) - } catch (enqueueError) { - logger.error(`[${requestId}] Failed to enqueue error response:`, enqueueError) - } - } finally { - controller.close() - } - }, - }), - { - headers: { - 'Content-Type': 'text/event-stream', - 'Cache-Control': 'no-cache', - Connection: 'keep-alive', - }, - } - ) - } - - // Handle non-streaming response - logger.info(`[${requestId}] Docs RAG response generated successfully`) - - // Save conversation to database if we have a chat - if (currentChat) { - const userMessage: CopilotMessage = { - id: crypto.randomUUID(), - role: 'user', - content: query, - timestamp: new Date().toISOString(), - } - - const assistantMessage: CopilotMessage = { - id: crypto.randomUUID(), - role: 'assistant', - content: typeof result.response === 'string' ? result.response : '[Streaming Response]', - timestamp: new Date().toISOString(), - citations: result.sources.map((source, index) => ({ - id: index + 1, - title: source.title, - url: source.url, - })), - } - - const updatedMessages = [...conversationHistory, userMessage, assistantMessage] - - // Generate title if this is the first message - let updatedTitle = currentChat.title ?? undefined - if (!updatedTitle && conversationHistory.length === 0) { - updatedTitle = await generateChatTitle(query) - } - - // Update the chat in database - await updateChat(currentChat.id, session.user.id, { - title: updatedTitle, - messages: updatedMessages, - }) - - logger.info(`[${requestId}] Updated chat ${currentChat.id} with new docs messages`) - } - - return NextResponse.json({ - success: true, - response: result.response, - sources: result.sources, - chatId: currentChat?.id, - metadata: { - requestId, - chunksFound: result.sources.length, - query, - topSimilarity: result.sources[0]?.similarity, - provider, - model, - }, - }) - } catch (error) { - if (error instanceof z.ZodError) { - return NextResponse.json( - { error: 'Invalid request data', details: error.errors }, - { status: 400 } - ) - } - - logger.error(`[${requestId}] Copilot docs error:`, error) - return NextResponse.json({ error: 'Internal server error' }, { status: 500 }) - } -} diff --git a/apps/sim/app/api/copilot/route.ts b/apps/sim/app/api/copilot/route.ts index ddf06baf9..189a5ddd2 100644 --- a/apps/sim/app/api/copilot/route.ts +++ b/apps/sim/app/api/copilot/route.ts @@ -25,6 +25,7 @@ const SendMessageSchema = z.object({ message: z.string().min(1, 'Message is required'), chatId: z.string().optional(), workflowId: z.string().optional(), + mode: z.enum(['ask', 'agent']).optional().default('ask'), createNewChat: z.boolean().optional().default(false), stream: z.boolean().optional().default(false), }) @@ -90,7 +91,8 @@ export async function POST(req: NextRequest) { try { const body = await req.json() - const { message, chatId, workflowId, createNewChat, stream } = SendMessageSchema.parse(body) + const { message, chatId, workflowId, mode, createNewChat, stream } = + SendMessageSchema.parse(body) const session = await getSession() if (!session?.user?.id) { @@ -100,6 +102,7 @@ export async function POST(req: NextRequest) { logger.info(`[${requestId}] Copilot message: "${message}"`, { chatId, workflowId, + mode, createNewChat, stream, userId: session.user.id, @@ -110,6 +113,7 @@ export async function POST(req: NextRequest) { message, chatId, workflowId, + mode, createNewChat, stream, userId: session.user.id, diff --git a/apps/sim/app/api/docs/search/route.ts b/apps/sim/app/api/docs/search/route.ts index 23ee3a034..28ab0bb9e 100644 --- a/apps/sim/app/api/docs/search/route.ts +++ b/apps/sim/app/api/docs/search/route.ts @@ -36,7 +36,7 @@ export async function POST( ): Promise> { try { const requestBody: DocsSearchRequest = await request.json() - const { query, topK = 5 } = requestBody + const { query, topK = 10 } = requestBody if (!query) { const errorResponse: DocsSearchErrorResponse = { diff --git a/apps/sim/app/api/tools/edit-workflow/route.ts b/apps/sim/app/api/tools/edit-workflow/route.ts new file mode 100644 index 000000000..5cd036c71 --- /dev/null +++ b/apps/sim/app/api/tools/edit-workflow/route.ts @@ -0,0 +1,412 @@ +import { eq } from 'drizzle-orm' +import { type NextRequest, NextResponse } from 'next/server' +import { autoLayoutWorkflow } from '@/lib/autolayout/service' +import { createLogger } from '@/lib/logs/console-logger' +import { + loadWorkflowFromNormalizedTables, + saveWorkflowToNormalizedTables, +} from '@/lib/workflows/db-helpers' +import { generateWorkflowYaml } from '@/lib/workflows/yaml-generator' +import { getUserId } from '@/app/api/auth/oauth/utils' +import { getBlock } from '@/blocks' +import { db } from '@/db' +import { copilotCheckpoints, workflow as workflowTable } from '@/db/schema' +import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils' +import { convertYamlToWorkflow, parseWorkflowYaml } from '@/stores/workflows/yaml/importer' + +const logger = createLogger('EditWorkflowAPI') + +export async function POST(request: NextRequest) { + const requestId = crypto.randomUUID().slice(0, 8) + + try { + const body = await request.json() + const { yamlContent, workflowId, description, chatId } = body + + if (!yamlContent) { + return NextResponse.json( + { success: false, error: 'yamlContent is required' }, + { status: 400 } + ) + } + + if (!workflowId) { + return NextResponse.json({ success: false, error: 'workflowId is required' }, { status: 400 }) + } + + logger.info(`[${requestId}] Processing workflow edit request`, { + workflowId, + yamlLength: yamlContent.length, + hasDescription: !!description, + hasChatId: !!chatId, + }) + + // Log the full YAML content for debugging + logger.info(`[${requestId}] Full YAML content from copilot:`) + logger.info('='.repeat(80)) + logger.info(yamlContent) + logger.info('='.repeat(80)) + + // Get the user ID for checkpoint creation + const userId = await getUserId(requestId, workflowId) + if (!userId) { + return NextResponse.json({ success: false, error: 'User not found' }, { status: 404 }) + } + + // Create checkpoint before making changes (only if chatId is provided) + if (chatId) { + try { + logger.info(`[${requestId}] Creating checkpoint before workflow edit`) + + // Get current workflow state + const currentWorkflowData = await loadWorkflowFromNormalizedTables(workflowId) + + if (currentWorkflowData) { + // Generate YAML from current state + const currentYaml = generateWorkflowYaml(currentWorkflowData) + + // Create checkpoint + await db.insert(copilotCheckpoints).values({ + userId, + workflowId, + chatId, + yaml: currentYaml, + }) + + logger.info(`[${requestId}] Checkpoint created successfully`) + } else { + logger.warn(`[${requestId}] Could not load current workflow state for checkpoint`) + } + } catch (checkpointError) { + logger.error(`[${requestId}] Failed to create checkpoint:`, checkpointError) + // Continue with workflow edit even if checkpoint fails + } + } + + // Parse YAML content server-side + const { data: yamlWorkflow, errors: parseErrors } = parseWorkflowYaml(yamlContent) + + if (!yamlWorkflow || parseErrors.length > 0) { + logger.error('[edit-workflow] YAML parsing failed', { parseErrors }) + return NextResponse.json({ + success: true, + data: { + success: false, + message: 'Failed to parse YAML workflow', + errors: parseErrors, + warnings: [], + }, + }) + } + + // Convert YAML to workflow format + const { blocks, edges, errors: convertErrors, warnings } = convertYamlToWorkflow(yamlWorkflow) + + if (convertErrors.length > 0) { + logger.error('[edit-workflow] YAML conversion failed', { convertErrors }) + return NextResponse.json({ + success: true, + data: { + success: false, + message: 'Failed to convert YAML to workflow', + errors: convertErrors, + warnings, + }, + }) + } + + // Create workflow state (same format as applyWorkflowDiff) + const newWorkflowState: any = { + blocks: {} as Record, + edges: [] as any[], + loops: {} as Record, + parallels: {} as Record, + lastSaved: Date.now(), + isDeployed: false, + deployedAt: undefined, + deploymentStatuses: {} as Record, + hasActiveSchedule: false, + hasActiveWebhook: false, + } + + // Process blocks and assign new IDs (complete replacement) + const blockIdMapping = new Map() + + for (const block of blocks) { + const newId = crypto.randomUUID() + blockIdMapping.set(block.id, newId) + + // Get block configuration to set proper defaults + const blockConfig = getBlock(block.type) + const subBlocks: Record = {} + const outputs: Record = {} + + // Set up subBlocks from block configuration + if (blockConfig?.subBlocks) { + blockConfig.subBlocks.forEach((subBlock) => { + subBlocks[subBlock.id] = { + id: subBlock.id, + type: subBlock.type, + value: null, + } + }) + } + + // Set up outputs from block configuration + if (blockConfig?.outputs) { + if (Array.isArray(blockConfig.outputs)) { + blockConfig.outputs.forEach((output) => { + outputs[output.id] = { type: output.type } + }) + } else if (typeof blockConfig.outputs === 'object') { + Object.assign(outputs, blockConfig.outputs) + } + } + + newWorkflowState.blocks[newId] = { + id: newId, + type: block.type, + name: block.name, + position: block.position, + subBlocks, + outputs, + enabled: true, + horizontalHandles: true, + isWide: false, + height: 0, + data: block.data || {}, + } + + // Set input values as subblock values with block reference mapping + if (block.inputs && typeof block.inputs === 'object') { + Object.entries(block.inputs).forEach(([key, value]) => { + if (newWorkflowState.blocks[newId].subBlocks[key]) { + // Update block references in values to use new mapped IDs + let processedValue = value + if (typeof value === 'string' && value.includes('<') && value.includes('>')) { + // Update block references to use new mapped IDs + const blockMatches = value.match(/<([^>]+)>/g) + if (blockMatches) { + for (const match of blockMatches) { + const path = match.slice(1, -1) + const [blockRef] = path.split('.') + + // Skip system references (start, loop, parallel, variable) + if (['start', 'loop', 'parallel', 'variable'].includes(blockRef.toLowerCase())) { + continue + } + + // Check if this references an old block ID that needs mapping + const newMappedId = blockIdMapping.get(blockRef) + if (newMappedId) { + logger.info( + `[${requestId}] Updating block reference: ${blockRef} -> ${newMappedId}` + ) + processedValue = processedValue.replace( + new RegExp(`<${blockRef}\\.`, 'g'), + `<${newMappedId}.` + ) + processedValue = processedValue.replace( + new RegExp(`<${blockRef}>`, 'g'), + `<${newMappedId}>` + ) + } + } + } + } + newWorkflowState.blocks[newId].subBlocks[key].value = processedValue + } + }) + } + } + + // Update parent-child relationships with mapped IDs + logger.info(`[${requestId}] Block ID mapping:`, Object.fromEntries(blockIdMapping)) + for (const [newId, blockData] of Object.entries(newWorkflowState.blocks)) { + const block = blockData as any + if (block.data?.parentId) { + logger.info( + `[${requestId}] Found child block ${block.name} with parentId: ${block.data.parentId}` + ) + const mappedParentId = blockIdMapping.get(block.data.parentId) + if (mappedParentId) { + logger.info( + `[${requestId}] Updating parent reference: ${block.data.parentId} -> ${mappedParentId}` + ) + block.data.parentId = mappedParentId + // Ensure extent is set for child blocks + if (!block.data.extent) { + block.data.extent = 'parent' + } + } else { + logger.error( + `[${requestId}] ❌ Parent block not found for mapping: ${block.data.parentId}` + ) + logger.error(`[${requestId}] Available mappings:`, Array.from(blockIdMapping.keys())) + // Remove invalid parent reference + block.data.parentId = undefined + block.data.extent = undefined + } + } + } + + // Process edges with mapped IDs + for (const edge of edges) { + const sourceId = blockIdMapping.get(edge.source) + const targetId = blockIdMapping.get(edge.target) + + if (sourceId && targetId) { + newWorkflowState.edges.push({ + id: crypto.randomUUID(), + source: sourceId, + target: targetId, + sourceHandle: edge.sourceHandle, + targetHandle: edge.targetHandle, + type: edge.type || 'default', + }) + } + } + + // Generate loop and parallel configurations from the imported blocks + const loops = generateLoopBlocks(newWorkflowState.blocks) + const parallels = generateParallelBlocks(newWorkflowState.blocks) + + // Update workflow state with generated configurations + newWorkflowState.loops = loops + newWorkflowState.parallels = parallels + + logger.info(`[${requestId}] Generated loop and parallel configurations`, { + loopsCount: Object.keys(loops).length, + parallelsCount: Object.keys(parallels).length, + loopIds: Object.keys(loops), + parallelIds: Object.keys(parallels), + }) + + // Apply intelligent autolayout to optimize block positions + try { + logger.info( + `[${requestId}] Applying autolayout to ${Object.keys(newWorkflowState.blocks).length} blocks` + ) + + const layoutedBlocks = await autoLayoutWorkflow( + newWorkflowState.blocks, + newWorkflowState.edges, + { + strategy: 'smart', + direction: 'auto', + spacing: { + horizontal: 400, + vertical: 200, + layer: 600, + }, + alignment: 'center', + padding: { + x: 200, + y: 200, + }, + } + ) + + // Update workflow state with optimized positions + newWorkflowState.blocks = layoutedBlocks + + logger.info(`[${requestId}] Autolayout completed successfully`) + } catch (layoutError) { + // Log the error but don't fail the entire workflow save + logger.warn(`[${requestId}] Autolayout failed, using original positions:`, layoutError) + } + + // Save directly to database using the same function as the workflow state API + const saveResult = await saveWorkflowToNormalizedTables(workflowId, newWorkflowState) + + if (!saveResult.success) { + logger.error('[edit-workflow] Failed to save workflow state:', saveResult.error) + return NextResponse.json({ + success: true, + data: { + success: false, + message: `Database save failed: ${saveResult.error || 'Unknown error'}`, + errors: [saveResult.error || 'Database save failed'], + warnings, + }, + }) + } + + // Update workflow's lastSynced timestamp + await db + .update(workflowTable) + .set({ + lastSynced: new Date(), + updatedAt: new Date(), + state: saveResult.jsonBlob, // Also update JSON blob for backward compatibility + }) + .where(eq(workflowTable.id, workflowId)) + + // Notify the socket server to tell clients to rehydrate stores from database + try { + const socketUrl = process.env.SOCKET_URL || 'http://localhost:3002' + await fetch(`${socketUrl}/api/copilot-workflow-edit`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + workflowId, + description: description || 'Copilot edited workflow', + }), + }) + logger.info('[edit-workflow] Notified socket server to rehydrate client stores from database') + } catch (socketError) { + // Don't fail the main request if socket notification fails + logger.warn('[edit-workflow] Failed to notify socket server:', socketError) + } + + // Calculate summary with loop/parallel information + const loopBlocksCount = Object.values(newWorkflowState.blocks).filter( + (b: any) => b.type === 'loop' + ).length + const parallelBlocksCount = Object.values(newWorkflowState.blocks).filter( + (b: any) => b.type === 'parallel' + ).length + + let summaryDetails = `Successfully created workflow with ${blocks.length} blocks and ${edges.length} connections.` + + if (loopBlocksCount > 0 || parallelBlocksCount > 0) { + summaryDetails += ` Generated ${Object.keys(loops).length} loop configurations and ${Object.keys(parallels).length} parallel configurations.` + } + + const result = { + success: true, + errors: [], + warnings, + summary: summaryDetails, + } + + logger.info('[edit-workflow] Import result', { + success: result.success, + errorCount: result.errors.length, + warningCount: result.warnings.length, + summary: result.summary, + }) + + return NextResponse.json({ + success: true, + data: { + success: result.success, + message: result.success + ? `Workflow updated successfully${description ? `: ${description}` : ''}` + : 'Failed to update workflow', + summary: result.summary, + errors: result.errors, + warnings: result.warnings, + }, + }) + } catch (error) { + logger.error('[edit-workflow] Error:', error) + return NextResponse.json( + { + success: false, + error: `Failed to edit workflow: ${error instanceof Error ? error.message : 'Unknown error'}`, + }, + { status: 500 } + ) + } +} diff --git a/apps/sim/app/api/tools/get-all-blocks/route.ts b/apps/sim/app/api/tools/get-all-blocks/route.ts new file mode 100644 index 000000000..6cf96bf15 --- /dev/null +++ b/apps/sim/app/api/tools/get-all-blocks/route.ts @@ -0,0 +1,66 @@ +import { type NextRequest, NextResponse } from 'next/server' +import { createLogger } from '@/lib/logs/console-logger' +import { registry as blockRegistry } from '@/blocks/registry' + +const logger = createLogger('GetAllBlocksAPI') + +export async function POST(request: NextRequest) { + try { + const body = await request.json() + const { includeDetails = false, filterCategory } = body + + logger.info('Getting all blocks and tools', { includeDetails, filterCategory }) + + // Create mapping of block_id -> [tool_ids] + const blockToToolsMapping: Record = {} + + // Process blocks - filter out hidden blocks and map to their tools + Object.entries(blockRegistry) + .filter(([blockType, blockConfig]) => { + // Filter out hidden blocks + if (blockConfig.hideFromToolbar) return false + + // Apply category filter if specified + if (filterCategory && blockConfig.category !== filterCategory) return false + + return true + }) + .forEach(([blockType, blockConfig]) => { + // Get the tools for this block + const blockTools = blockConfig.tools?.access || [] + blockToToolsMapping[blockType] = blockTools + }) + + const totalBlocks = Object.keys(blockRegistry).length + const includedBlocks = Object.keys(blockToToolsMapping).length + const filteredBlocksCount = totalBlocks - includedBlocks + + // Log block to tools mapping for debugging + const blockToolsInfo = Object.entries(blockToToolsMapping) + .map(([blockType, tools]) => `${blockType}: [${tools.join(', ')}]`) + .sort() + + logger.info(`Successfully mapped ${includedBlocks} blocks to their tools`, { + totalBlocks, + includedBlocks, + filteredBlocks: filteredBlocksCount, + filterCategory, + blockToolsMapping: blockToolsInfo, + outputMapping: blockToToolsMapping, + }) + + return NextResponse.json({ + success: true, + data: blockToToolsMapping, + }) + } catch (error) { + logger.error('Get all blocks failed', error) + return NextResponse.json( + { + success: false, + error: `Failed to get blocks and tools: ${error instanceof Error ? error.message : 'Unknown error'}`, + }, + { status: 500 } + ) + } +} diff --git a/apps/sim/app/api/tools/get-blocks-metadata/route.ts b/apps/sim/app/api/tools/get-blocks-metadata/route.ts new file mode 100644 index 000000000..aa8bffa12 --- /dev/null +++ b/apps/sim/app/api/tools/get-blocks-metadata/route.ts @@ -0,0 +1,239 @@ +import { existsSync, readFileSync } from 'fs' +import { join } from 'path' +import { type NextRequest, NextResponse } from 'next/server' +import { createLogger } from '@/lib/logs/console-logger' +import { registry as blockRegistry } from '@/blocks/registry' +import { tools as toolsRegistry } from '@/tools/registry' + +const logger = createLogger('GetBlockMetadataAPI') + +// Core blocks that have documentation with YAML schemas +const CORE_BLOCKS_WITH_DOCS = [ + 'agent', + 'function', + 'api', + 'condition', + 'loop', + 'parallel', + 'response', + 'router', + 'evaluator', + 'webhook', +] + +// Mapping for blocks that have different doc file names +const DOCS_FILE_MAPPING: Record = { + webhook: 'webhook_trigger', +} + +// Helper function to read YAML schema from dedicated YAML documentation files +function getYamlSchemaFromDocs(blockType: string): string | null { + try { + const docFileName = DOCS_FILE_MAPPING[blockType] || blockType + // Read from the new YAML documentation structure + const yamlDocsPath = join( + process.cwd(), + '..', + 'docs/content/docs/yaml/blocks', + `${docFileName}.mdx` + ) + + if (!existsSync(yamlDocsPath)) { + logger.warn(`YAML schema file not found for ${blockType} at ${yamlDocsPath}`) + return null + } + + const content = readFileSync(yamlDocsPath, 'utf-8') + + // Remove the frontmatter and return the content after the title + const contentWithoutFrontmatter = content.replace(/^---[\s\S]*?---\s*/, '') + return contentWithoutFrontmatter.trim() + } catch (error) { + logger.warn(`Failed to read YAML schema for ${blockType}:`, error) + return null + } +} + +export async function POST(request: NextRequest) { + try { + const body = await request.json() + const { blockIds } = body + + if (!blockIds || !Array.isArray(blockIds)) { + return NextResponse.json( + { + success: false, + error: 'blockIds must be an array of block IDs', + }, + { status: 400 } + ) + } + + logger.info('Getting block metadata', { + blockIds, + blockCount: blockIds.length, + requestedBlocks: blockIds.join(', '), + }) + + // Create result object + const result: Record = {} + + for (const blockId of blockIds) { + const blockConfig = blockRegistry[blockId] + + if (!blockConfig) { + logger.warn(`Block not found: ${blockId}`) + continue + } + + // Always include code schemas from block configuration + const codeSchemas = { + inputs: blockConfig.inputs, + outputs: blockConfig.outputs, + subBlocks: blockConfig.subBlocks, + } + + // Check if this is a core block with YAML documentation + if (CORE_BLOCKS_WITH_DOCS.includes(blockId)) { + // For core blocks, return both YAML schema from documentation AND code schemas + const yamlSchema = getYamlSchemaFromDocs(blockId) + + if (yamlSchema) { + result[blockId] = { + type: 'block', + description: blockConfig.description || '', + longDescription: blockConfig.longDescription, + category: blockConfig.category || '', + yamlSchema: yamlSchema, + docsLink: blockConfig.docsLink, + // Include actual schemas from code + codeSchemas: codeSchemas, + } + } else { + // Fallback to regular metadata if YAML schema not found + result[blockId] = { + type: 'block', + description: blockConfig.description || '', + longDescription: blockConfig.longDescription, + category: blockConfig.category || '', + inputs: blockConfig.inputs, + outputs: blockConfig.outputs, + subBlocks: blockConfig.subBlocks, + // Include actual schemas from code + codeSchemas: codeSchemas, + } + } + } else { + // For tool blocks, return tool schema information AND code schemas + const blockTools = blockConfig.tools?.access || [] + const toolSchemas: Record = {} + + for (const toolId of blockTools) { + const toolConfig = toolsRegistry[toolId] + if (toolConfig) { + toolSchemas[toolId] = { + id: toolConfig.id, + name: toolConfig.name, + description: toolConfig.description || '', + version: toolConfig.version, + params: toolConfig.params, + request: toolConfig.request + ? { + method: toolConfig.request.method, + url: toolConfig.request.url, + headers: + typeof toolConfig.request.headers === 'function' + ? 'function' + : toolConfig.request.headers, + isInternalRoute: toolConfig.request.isInternalRoute, + } + : undefined, + } + } else { + logger.warn(`Tool not found: ${toolId} for block: ${blockId}`) + toolSchemas[toolId] = { + id: toolId, + description: 'Tool not found', + } + } + } + + result[blockId] = { + type: 'tool', + description: blockConfig.description || '', + longDescription: blockConfig.longDescription, + category: blockConfig.category || '', + inputs: blockConfig.inputs, + outputs: blockConfig.outputs, + subBlocks: blockConfig.subBlocks, + toolSchemas: toolSchemas, + // Include actual schemas from code + codeSchemas: codeSchemas, + } + } + } + + const processedBlocks = Object.keys(result).length + const requestedBlocks = blockIds.length + const notFoundBlocks = requestedBlocks - processedBlocks + + // Log detailed output for debugging + Object.entries(result).forEach(([blockId, blockData]) => { + if (blockData.type === 'block' && blockData.yamlSchema) { + logger.info(`Retrieved YAML schema + code schemas for core block: ${blockId}`, { + blockId, + type: blockData.type, + description: blockData.description, + yamlSchemaLength: blockData.yamlSchema.length, + yamlSchemaPreview: `${blockData.yamlSchema.substring(0, 200)}...`, + hasCodeSchemas: !!blockData.codeSchemas, + codeSubBlocksCount: blockData.codeSchemas?.subBlocks?.length || 0, + }) + } else if (blockData.type === 'tool' && blockData.toolSchemas) { + const toolIds = Object.keys(blockData.toolSchemas) + logger.info(`Retrieved tool schemas + code schemas for tool block: ${blockId}`, { + blockId, + type: blockData.type, + description: blockData.description, + toolCount: toolIds.length, + toolIds: toolIds, + hasCodeSchemas: !!blockData.codeSchemas, + codeSubBlocksCount: blockData.codeSchemas?.subBlocks?.length || 0, + }) + } else { + logger.info(`Retrieved metadata + code schemas for block: ${blockId}`, { + blockId, + type: blockData.type, + description: blockData.description, + hasInputs: !!blockData.inputs, + hasOutputs: !!blockData.outputs, + hasSubBlocks: !!blockData.subBlocks, + hasCodeSchemas: !!blockData.codeSchemas, + codeSubBlocksCount: blockData.codeSchemas?.subBlocks?.length || 0, + }) + } + }) + + logger.info(`Successfully processed ${processedBlocks} block metadata`, { + requestedBlocks, + processedBlocks, + notFoundBlocks, + coreBlocks: blockIds.filter((id) => CORE_BLOCKS_WITH_DOCS.includes(id)), + toolBlocks: blockIds.filter((id) => !CORE_BLOCKS_WITH_DOCS.includes(id)), + }) + + return NextResponse.json({ + success: true, + data: result, + }) + } catch (error) { + logger.error('Get block metadata failed', error) + return NextResponse.json( + { + success: false, + error: `Failed to get block metadata: ${error instanceof Error ? error.message : 'Unknown error'}`, + }, + { status: 500 } + ) + } +} diff --git a/apps/sim/app/api/tools/get-user-workflow/route.ts b/apps/sim/app/api/tools/get-user-workflow/route.ts index 775bfcdb0..94889577a 100644 --- a/apps/sim/app/api/tools/get-user-workflow/route.ts +++ b/apps/sim/app/api/tools/get-user-workflow/route.ts @@ -21,7 +21,7 @@ export async function POST(request: NextRequest) { ) } - logger.info('Fetching workflow for YAML generation', { workflowId }) + logger.info('Fetching user workflow', { workflowId }) // Fetch workflow from database const [workflowRecord] = await db @@ -190,9 +190,9 @@ export async function POST(request: NextRequest) { } } - logger.info('Successfully generated workflow YAML', { + logger.info('Successfully fetched user workflow YAML', { workflowId, - blockCount: response.blockCount, + blockCount: response.summary.blockCount, yamlLength: yaml.length, }) diff --git a/apps/sim/app/api/tools/get-yaml-structure/route.ts b/apps/sim/app/api/tools/get-yaml-structure/route.ts new file mode 100644 index 000000000..2b9f1039c --- /dev/null +++ b/apps/sim/app/api/tools/get-yaml-structure/route.ts @@ -0,0 +1,25 @@ +import { type NextRequest, NextResponse } from 'next/server' +import { YAML_WORKFLOW_PROMPT } from '../../../../lib/copilot/prompts' + +export async function POST(request: NextRequest) { + try { + console.log('[get-yaml-structure] API endpoint called') + + return NextResponse.json({ + success: true, + data: { + guide: YAML_WORKFLOW_PROMPT, + message: 'Complete YAML workflow syntax guide with examples and best practices', + }, + }) + } catch (error) { + console.error('[get-yaml-structure] Error:', error) + return NextResponse.json( + { + success: false, + error: 'Failed to get YAML structure guide', + }, + { status: 500 } + ) + } +} diff --git a/apps/sim/app/api/workflows/[id]/autolayout/route.ts b/apps/sim/app/api/workflows/[id]/autolayout/route.ts new file mode 100644 index 000000000..ae7539d6c --- /dev/null +++ b/apps/sim/app/api/workflows/[id]/autolayout/route.ts @@ -0,0 +1,223 @@ +import { eq } from 'drizzle-orm' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { getSession } from '@/lib/auth' +import { autoLayoutWorkflow } from '@/lib/autolayout/service' +import { createLogger } from '@/lib/logs/console-logger' +import { getUserEntityPermissions } from '@/lib/permissions/utils' +import { + loadWorkflowFromNormalizedTables, + saveWorkflowToNormalizedTables, +} from '@/lib/workflows/db-helpers' +import { db } from '@/db' +import { workflow as workflowTable } from '@/db/schema' + +const logger = createLogger('AutoLayoutAPI') + +const AutoLayoutRequestSchema = z.object({ + strategy: z + .enum(['smart', 'hierarchical', 'layered', 'force-directed']) + .optional() + .default('smart'), + direction: z.enum(['horizontal', 'vertical', 'auto']).optional().default('auto'), + spacing: z + .object({ + horizontal: z.number().min(100).max(1000).optional().default(400), + vertical: z.number().min(50).max(500).optional().default(200), + layer: z.number().min(200).max(1200).optional().default(600), + }) + .optional() + .default({}), + alignment: z.enum(['start', 'center', 'end']).optional().default('center'), + padding: z + .object({ + x: z.number().min(50).max(500).optional().default(200), + y: z.number().min(50).max(500).optional().default(200), + }) + .optional() + .default({}), +}) + +type AutoLayoutRequest = z.infer + +/** + * POST /api/workflows/[id]/autolayout + * Apply autolayout to an existing workflow + */ +export async function POST(request: NextRequest, { params }: { params: Promise<{ id: string }> }) { + const requestId = crypto.randomUUID().slice(0, 8) + const startTime = Date.now() + const { id: workflowId } = await params + + try { + // Get the session + const session = await getSession() + if (!session?.user?.id) { + logger.warn(`[${requestId}] Unauthorized autolayout attempt for workflow ${workflowId}`) + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) + } + + const userId = session.user.id + + // Parse request body + const body = await request.json() + const layoutOptions = AutoLayoutRequestSchema.parse(body) + + logger.info(`[${requestId}] Processing autolayout request for workflow ${workflowId}`, { + strategy: layoutOptions.strategy, + direction: layoutOptions.direction, + userId, + }) + + // Fetch the workflow to check ownership/access + const workflowData = await db + .select() + .from(workflowTable) + .where(eq(workflowTable.id, workflowId)) + .then((rows) => rows[0]) + + if (!workflowData) { + logger.warn(`[${requestId}] Workflow ${workflowId} not found for autolayout`) + return NextResponse.json({ error: 'Workflow not found' }, { status: 404 }) + } + + // Check if user has permission to update this workflow + let canUpdate = false + + // Case 1: User owns the workflow + if (workflowData.userId === userId) { + canUpdate = true + } + + // Case 2: Workflow belongs to a workspace and user has write or admin permission + if (!canUpdate && workflowData.workspaceId) { + const userPermission = await getUserEntityPermissions( + userId, + 'workspace', + workflowData.workspaceId + ) + if (userPermission === 'write' || userPermission === 'admin') { + canUpdate = true + } + } + + if (!canUpdate) { + logger.warn( + `[${requestId}] User ${userId} denied permission to autolayout workflow ${workflowId}` + ) + return NextResponse.json({ error: 'Access denied' }, { status: 403 }) + } + + // Load current workflow state + const currentWorkflowData = await loadWorkflowFromNormalizedTables(workflowId) + + if (!currentWorkflowData) { + logger.error(`[${requestId}] Could not load workflow ${workflowId} for autolayout`) + return NextResponse.json({ error: 'Could not load workflow data' }, { status: 500 }) + } + + // Apply autolayout + logger.info( + `[${requestId}] Applying autolayout to ${Object.keys(currentWorkflowData.blocks).length} blocks` + ) + + const layoutedBlocks = await autoLayoutWorkflow( + currentWorkflowData.blocks, + currentWorkflowData.edges, + { + strategy: layoutOptions.strategy, + direction: layoutOptions.direction, + spacing: { + horizontal: layoutOptions.spacing?.horizontal || 400, + vertical: layoutOptions.spacing?.vertical || 200, + layer: layoutOptions.spacing?.layer || 600, + }, + alignment: layoutOptions.alignment, + padding: { + x: layoutOptions.padding?.x || 200, + y: layoutOptions.padding?.y || 200, + }, + } + ) + + // Create updated workflow state + const updatedWorkflowState = { + ...currentWorkflowData, + blocks: layoutedBlocks, + lastSaved: Date.now(), + } + + // Save to database + const saveResult = await saveWorkflowToNormalizedTables(workflowId, updatedWorkflowState) + + if (!saveResult.success) { + logger.error(`[${requestId}] Failed to save autolayout results:`, saveResult.error) + return NextResponse.json( + { error: 'Failed to save autolayout results', details: saveResult.error }, + { status: 500 } + ) + } + + // Update workflow's lastSynced timestamp + await db + .update(workflowTable) + .set({ + lastSynced: new Date(), + updatedAt: new Date(), + state: saveResult.jsonBlob, + }) + .where(eq(workflowTable.id, workflowId)) + + // Notify the socket server to tell clients about the autolayout update + try { + const socketUrl = process.env.SOCKET_URL || 'http://localhost:3002' + await fetch(`${socketUrl}/api/workflow-updated`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ workflowId }), + }) + logger.info(`[${requestId}] Notified socket server of autolayout update`) + } catch (socketError) { + logger.warn(`[${requestId}] Failed to notify socket server:`, socketError) + } + + const elapsed = Date.now() - startTime + const blockCount = Object.keys(layoutedBlocks).length + + logger.info(`[${requestId}] Autolayout completed successfully in ${elapsed}ms`, { + blockCount, + strategy: layoutOptions.strategy, + workflowId, + }) + + return NextResponse.json({ + success: true, + message: `Autolayout applied successfully to ${blockCount} blocks`, + data: { + strategy: layoutOptions.strategy, + direction: layoutOptions.direction, + blockCount, + elapsed: `${elapsed}ms`, + }, + }) + } catch (error) { + const elapsed = Date.now() - startTime + + if (error instanceof z.ZodError) { + logger.warn(`[${requestId}] Invalid autolayout request data`, { errors: error.errors }) + return NextResponse.json( + { error: 'Invalid request data', details: error.errors }, + { status: 400 } + ) + } + + logger.error(`[${requestId}] Autolayout failed after ${elapsed}ms:`, error) + return NextResponse.json( + { + error: 'Autolayout failed', + details: error instanceof Error ? error.message : 'Unknown error', + }, + { status: 500 } + ) + } +} diff --git a/apps/sim/app/api/workflows/[id]/state/route.ts b/apps/sim/app/api/workflows/[id]/state/route.ts index 8a62c00b6..db440fb81 100644 --- a/apps/sim/app/api/workflows/[id]/state/route.ts +++ b/apps/sim/app/api/workflows/[id]/state/route.ts @@ -31,7 +31,7 @@ const BlockDataSchema = z.object({ const SubBlockStateSchema = z.object({ id: z.string(), type: z.string(), - value: z.union([z.string(), z.number(), z.array(z.array(z.string())), z.null()]), + value: z.any(), }) const BlockOutputSchema = z.any() diff --git a/apps/sim/app/api/workflows/[id]/yaml/route.ts b/apps/sim/app/api/workflows/[id]/yaml/route.ts new file mode 100644 index 000000000..1ed9645a7 --- /dev/null +++ b/apps/sim/app/api/workflows/[id]/yaml/route.ts @@ -0,0 +1,538 @@ +import { eq } from 'drizzle-orm' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { autoLayoutWorkflow } from '@/lib/autolayout/service' +import { createLogger } from '@/lib/logs/console-logger' +import { getUserEntityPermissions } from '@/lib/permissions/utils' +import { + loadWorkflowFromNormalizedTables, + saveWorkflowToNormalizedTables, +} from '@/lib/workflows/db-helpers' +import { generateWorkflowYaml } from '@/lib/workflows/yaml-generator' +import { getUserId as getOAuthUserId } from '@/app/api/auth/oauth/utils' +import { getBlock } from '@/blocks' +import { resolveOutputType } from '@/blocks/utils' +import { db } from '@/db' +import { copilotCheckpoints, workflow as workflowTable } from '@/db/schema' +import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils' +import { convertYamlToWorkflow, parseWorkflowYaml } from '@/stores/workflows/yaml/importer' + +const logger = createLogger('WorkflowYamlAPI') + +// Request schema for YAML workflow operations +const YamlWorkflowRequestSchema = z.object({ + yamlContent: z.string().min(1, 'YAML content is required'), + description: z.string().optional(), + chatId: z.string().optional(), // For copilot checkpoints + source: z.enum(['copilot', 'import', 'editor']).default('editor'), + applyAutoLayout: z.boolean().default(true), + createCheckpoint: z.boolean().default(false), +}) + +type YamlWorkflowRequest = z.infer + +/** + * Helper function to create a checkpoint before workflow changes + */ +async function createWorkflowCheckpoint( + userId: string, + workflowId: string, + chatId: string, + requestId: string +): Promise { + try { + logger.info(`[${requestId}] Creating checkpoint before workflow edit`) + + // Get current workflow state + const currentWorkflowData = await loadWorkflowFromNormalizedTables(workflowId) + + if (currentWorkflowData) { + // Generate YAML from current state + const currentYaml = generateWorkflowYaml(currentWorkflowData) + + // Create checkpoint + await db.insert(copilotCheckpoints).values({ + userId, + workflowId, + chatId, + yaml: currentYaml, + }) + + logger.info(`[${requestId}] Checkpoint created successfully`) + return true + } + logger.warn(`[${requestId}] Could not load current workflow state for checkpoint`) + return false + } catch (error) { + logger.error(`[${requestId}] Failed to create checkpoint:`, error) + return false + } +} + +/** + * Helper function to get user ID with proper authentication for both tool calls and direct requests + */ +async function getUserId(requestId: string, workflowId: string): Promise { + // Use the OAuth utils function that handles both session and workflow-based auth + const userId = await getOAuthUserId(requestId, workflowId) + + if (!userId) { + logger.warn(`[${requestId}] Could not determine user ID for workflow ${workflowId}`) + return null + } + + // For additional security, verify the user has permission to access this workflow + const workflowData = await db + .select() + .from(workflowTable) + .where(eq(workflowTable.id, workflowId)) + .then((rows) => rows[0]) + + if (!workflowData) { + logger.warn(`[${requestId}] Workflow ${workflowId} not found`) + return null + } + + // Check if user has permission to update this workflow + let canUpdate = false + + // Case 1: User owns the workflow + if (workflowData.userId === userId) { + canUpdate = true + } + + // Case 2: Workflow belongs to a workspace and user has write or admin permission + if (!canUpdate && workflowData.workspaceId) { + try { + const userPermission = await getUserEntityPermissions( + userId, + 'workspace', + workflowData.workspaceId + ) + if (userPermission === 'write' || userPermission === 'admin') { + canUpdate = true + } + } catch (error) { + logger.warn(`[${requestId}] Error checking workspace permissions:`, error) + } + } + + if (!canUpdate) { + logger.warn(`[${requestId}] User ${userId} denied permission to update workflow ${workflowId}`) + return null + } + + return userId +} + +/** + * Helper function to update block references in values with new mapped IDs + */ +function updateBlockReferences( + value: any, + blockIdMapping: Map, + requestId: string +): any { + if (typeof value === 'string' && value.includes('<') && value.includes('>')) { + let processedValue = value + const blockMatches = value.match(/<([^>]+)>/g) + + if (blockMatches) { + for (const match of blockMatches) { + const path = match.slice(1, -1) + const [blockRef] = path.split('.') + + // Skip system references (start, loop, parallel, variable) + if (['start', 'loop', 'parallel', 'variable'].includes(blockRef.toLowerCase())) { + continue + } + + // Check if this references an old block ID that needs mapping + const newMappedId = blockIdMapping.get(blockRef) + if (newMappedId) { + logger.info(`[${requestId}] Updating block reference: ${blockRef} -> ${newMappedId}`) + processedValue = processedValue.replace( + new RegExp(`<${blockRef}\\.`, 'g'), + `<${newMappedId}.` + ) + processedValue = processedValue.replace( + new RegExp(`<${blockRef}>`, 'g'), + `<${newMappedId}>` + ) + } + } + } + + return processedValue + } + + // Handle arrays + if (Array.isArray(value)) { + return value.map((item) => updateBlockReferences(item, blockIdMapping, requestId)) + } + + // Handle objects + if (value !== null && typeof value === 'object') { + const result = { ...value } + for (const key in result) { + result[key] = updateBlockReferences(result[key], blockIdMapping, requestId) + } + return result + } + + return value +} + +/** + * PUT /api/workflows/[id]/yaml + * Consolidated YAML workflow saving endpoint + * Handles copilot edits, imports, and text editor saves + */ +export async function PUT(request: NextRequest, { params }: { params: Promise<{ id: string }> }) { + const requestId = crypto.randomUUID().slice(0, 8) + const startTime = Date.now() + const { id: workflowId } = await params + + try { + // Parse and validate request + const body = await request.json() + const { yamlContent, description, chatId, source, applyAutoLayout, createCheckpoint } = + YamlWorkflowRequestSchema.parse(body) + + logger.info(`[${requestId}] Processing ${source} YAML workflow save`, { + workflowId, + yamlLength: yamlContent.length, + hasDescription: !!description, + hasChatId: !!chatId, + applyAutoLayout, + createCheckpoint, + }) + + // Get and validate user + const userId = await getUserId(requestId, workflowId) + if (!userId) { + return NextResponse.json({ error: 'Unauthorized or workflow not found' }, { status: 403 }) + } + + // Create checkpoint if requested (typically for copilot) + if (createCheckpoint && chatId) { + await createWorkflowCheckpoint(userId, workflowId, chatId, requestId) + } + + // Parse YAML content + const { data: yamlWorkflow, errors: parseErrors } = parseWorkflowYaml(yamlContent) + + if (!yamlWorkflow || parseErrors.length > 0) { + logger.error(`[${requestId}] YAML parsing failed`, { parseErrors }) + return NextResponse.json({ + success: false, + message: 'Failed to parse YAML workflow', + errors: parseErrors, + warnings: [], + }) + } + + // Convert YAML to workflow format + const { blocks, edges, errors: convertErrors, warnings } = convertYamlToWorkflow(yamlWorkflow) + + if (convertErrors.length > 0) { + logger.error(`[${requestId}] YAML conversion failed`, { convertErrors }) + return NextResponse.json({ + success: false, + message: 'Failed to convert YAML to workflow', + errors: convertErrors, + warnings, + }) + } + + // Create workflow state + const newWorkflowState: any = { + blocks: {} as Record, + edges: [] as any[], + loops: {} as Record, + parallels: {} as Record, + lastSaved: Date.now(), + isDeployed: false, + deployedAt: undefined, + deploymentStatuses: {} as Record, + hasActiveSchedule: false, + hasActiveWebhook: false, + } + + // Process blocks with proper configuration setup and assign new IDs + const blockIdMapping = new Map() + + for (const block of blocks) { + const newId = crypto.randomUUID() + blockIdMapping.set(block.id, newId) + + // Get block configuration for proper setup + const blockConfig = getBlock(block.type) + + if (!blockConfig && (block.type === 'loop' || block.type === 'parallel')) { + // Handle loop/parallel blocks (they don't have regular block configs) + newWorkflowState.blocks[newId] = { + id: newId, + type: block.type, + name: block.name, + position: block.position, + subBlocks: {}, + outputs: {}, + enabled: true, + horizontalHandles: true, + isWide: false, + height: 0, + data: block.data || {}, + } + logger.debug(`[${requestId}] Processed loop/parallel block: ${block.id} -> ${newId}`) + } else if (blockConfig) { + // Handle regular blocks with proper configuration + const subBlocks: Record = {} + + // Set up subBlocks from block configuration + blockConfig.subBlocks.forEach((subBlock) => { + subBlocks[subBlock.id] = { + id: subBlock.id, + type: subBlock.type, + value: null, + } + }) + + // Also ensure we have subBlocks for any YAML inputs that might not be in the config + // This handles cases where hidden fields or dynamic configurations exist + Object.keys(block.inputs).forEach((inputKey) => { + if (!subBlocks[inputKey]) { + subBlocks[inputKey] = { + id: inputKey, + type: 'short-input', // Default type for dynamic inputs + value: null, + } + } + }) + + // Set up outputs from block configuration + const outputs = resolveOutputType(blockConfig.outputs) + + newWorkflowState.blocks[newId] = { + id: newId, + type: block.type, + name: block.name, + position: block.position, + subBlocks, + outputs, + enabled: true, + horizontalHandles: true, + isWide: false, + height: 0, + data: block.data || {}, + } + + logger.debug(`[${requestId}] Processed regular block: ${block.id} -> ${newId}`) + } else { + logger.warn(`[${requestId}] Unknown block type: ${block.type}`) + } + } + + // Set input values as subblock values with block reference mapping + for (const block of blocks) { + const newId = blockIdMapping.get(block.id) + if (!newId || !newWorkflowState.blocks[newId]) continue + + if (block.inputs && typeof block.inputs === 'object') { + Object.entries(block.inputs).forEach(([key, value]) => { + if (newWorkflowState.blocks[newId].subBlocks[key]) { + // Update block references in values to use new mapped IDs + const processedValue = updateBlockReferences(value, blockIdMapping, requestId) + newWorkflowState.blocks[newId].subBlocks[key].value = processedValue + } + }) + } + } + + // Update parent-child relationships with mapped IDs + logger.info(`[${requestId}] Block ID mapping:`, Object.fromEntries(blockIdMapping)) + for (const [newId, blockData] of Object.entries(newWorkflowState.blocks)) { + const block = blockData as any + if (block.data?.parentId) { + logger.info( + `[${requestId}] Found child block ${block.name} with parentId: ${block.data.parentId}` + ) + const mappedParentId = blockIdMapping.get(block.data.parentId) + if (mappedParentId) { + logger.info( + `[${requestId}] Updating parent reference: ${block.data.parentId} -> ${mappedParentId}` + ) + block.data.parentId = mappedParentId + // Ensure extent is set for child blocks + if (!block.data.extent) { + block.data.extent = 'parent' + } + } else { + logger.error( + `[${requestId}] ❌ Parent block not found for mapping: ${block.data.parentId}` + ) + logger.error(`[${requestId}] Available mappings:`, Array.from(blockIdMapping.keys())) + // Remove invalid parent reference + block.data.parentId = undefined + block.data.extent = undefined + } + } + } + + // Process edges with mapped IDs and handles + for (const edge of edges) { + const sourceId = blockIdMapping.get(edge.source) + const targetId = blockIdMapping.get(edge.target) + + if (sourceId && targetId) { + const newEdgeId = crypto.randomUUID() + newWorkflowState.edges.push({ + id: newEdgeId, + source: sourceId, + target: targetId, + sourceHandle: edge.sourceHandle, + targetHandle: edge.targetHandle, + type: edge.type || 'default', + }) + } else { + logger.warn( + `[${requestId}] Skipping edge - missing blocks: ${edge.source} -> ${edge.target}` + ) + } + } + + // Generate loop and parallel configurations + const loops = generateLoopBlocks(newWorkflowState.blocks) + const parallels = generateParallelBlocks(newWorkflowState.blocks) + newWorkflowState.loops = loops + newWorkflowState.parallels = parallels + + logger.info(`[${requestId}] Generated workflow state`, { + blocksCount: Object.keys(newWorkflowState.blocks).length, + edgesCount: newWorkflowState.edges.length, + loopsCount: Object.keys(loops).length, + parallelsCount: Object.keys(parallels).length, + }) + + // Apply intelligent autolayout if requested + if (applyAutoLayout) { + try { + logger.info(`[${requestId}] Applying autolayout`) + + const layoutedBlocks = await autoLayoutWorkflow( + newWorkflowState.blocks, + newWorkflowState.edges, + { + strategy: 'smart', + direction: 'auto', + spacing: { + horizontal: 400, + vertical: 200, + layer: 600, + }, + alignment: 'center', + padding: { + x: 200, + y: 200, + }, + } + ) + + newWorkflowState.blocks = layoutedBlocks + logger.info(`[${requestId}] Autolayout completed successfully`) + } catch (layoutError) { + logger.warn(`[${requestId}] Autolayout failed, using original positions:`, layoutError) + } + } + + // Save to database + const saveResult = await saveWorkflowToNormalizedTables(workflowId, newWorkflowState) + + if (!saveResult.success) { + logger.error(`[${requestId}] Failed to save workflow state:`, saveResult.error) + return NextResponse.json({ + success: false, + message: `Database save failed: ${saveResult.error || 'Unknown error'}`, + errors: [saveResult.error || 'Database save failed'], + warnings, + }) + } + + // Update workflow's lastSynced timestamp + await db + .update(workflowTable) + .set({ + lastSynced: new Date(), + updatedAt: new Date(), + state: saveResult.jsonBlob, + }) + .where(eq(workflowTable.id, workflowId)) + + // Notify socket server for real-time collaboration (for copilot and editor) + if (source === 'copilot' || source === 'editor') { + try { + const socketUrl = process.env.SOCKET_URL || 'http://localhost:3002' + await fetch(`${socketUrl}/api/copilot-workflow-edit`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + workflowId, + description: description || `${source} edited workflow`, + }), + }) + logger.info(`[${requestId}] Notified socket server`) + } catch (socketError) { + logger.warn(`[${requestId}] Failed to notify socket server:`, socketError) + } + } + + const elapsed = Date.now() - startTime + const totalBlocksInWorkflow = Object.keys(newWorkflowState.blocks).length + const summary = `Successfully saved workflow with ${totalBlocksInWorkflow} blocks and ${newWorkflowState.edges.length} connections.` + + logger.info(`[${requestId}] YAML workflow save completed in ${elapsed}ms`, { + success: true, + blocksCount: totalBlocksInWorkflow, + edgesCount: newWorkflowState.edges.length, + }) + + return NextResponse.json({ + success: true, + message: description ? `Workflow updated: ${description}` : 'Workflow updated successfully', + summary, + data: { + blocksCount: totalBlocksInWorkflow, + edgesCount: newWorkflowState.edges.length, + loopsCount: Object.keys(loops).length, + parallelsCount: Object.keys(parallels).length, + }, + errors: [], + warnings, + }) + } catch (error) { + const elapsed = Date.now() - startTime + logger.error(`[${requestId}] YAML workflow save failed in ${elapsed}ms:`, error) + + if (error instanceof z.ZodError) { + return NextResponse.json( + { + success: false, + message: 'Invalid request data', + errors: error.errors.map((e) => `${e.path.join('.')}: ${e.message}`), + warnings: [], + }, + { status: 400 } + ) + } + + return NextResponse.json( + { + success: false, + message: `Failed to save YAML workflow: ${error instanceof Error ? error.message : 'Unknown error'}`, + errors: [error instanceof Error ? error.message : 'Unknown error'], + warnings: [], + }, + { status: 500 } + ) + } +} diff --git a/apps/sim/app/chat/[subdomain]/components/message/message.tsx b/apps/sim/app/chat/[subdomain]/components/message/message.tsx index c548abd9c..3ebdbf4d9 100644 --- a/apps/sim/app/chat/[subdomain]/components/message/message.tsx +++ b/apps/sim/app/chat/[subdomain]/components/message/message.tsx @@ -3,7 +3,9 @@ import { memo, useMemo, useState } from 'react' import { Check, Copy } from 'lucide-react' import { Button } from '@/components/ui/button' +import { ToolCallCompletion, ToolCallExecution } from '@/components/ui/tool-call' import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from '@/components/ui/tooltip' +import { parseMessageContent, stripToolCallIndicators } from '@/lib/tool-call-parser' import MarkdownRenderer from './components/markdown-renderer' export interface ChatMessage { @@ -31,6 +33,22 @@ export const ClientChatMessage = memo( return typeof message.content === 'object' && message.content !== null }, [message.content]) + // Parse message content to separate text and tool calls (only for assistant messages) + const parsedContent = useMemo(() => { + if (message.type === 'assistant' && typeof message.content === 'string') { + return parseMessageContent(message.content) + } + return null + }, [message.type, message.content]) + + // Get clean text content without tool call indicators + const cleanTextContent = useMemo(() => { + if (message.type === 'assistant' && typeof message.content === 'string') { + return stripToolCallIndicators(message.content) + } + return message.content + }, [message.type, message.content]) + // For user messages (on the right) if (message.type === 'user') { return ( @@ -56,18 +74,58 @@ export const ClientChatMessage = memo( return (
-
-
-
- {isJsonObject ? ( -
-                    {JSON.stringify(message.content, null, 2)}
-                  
- ) : ( - - )} +
+ {/* Inline content rendering - tool calls and text in order */} + {parsedContent?.inlineContent && parsedContent.inlineContent.length > 0 ? ( +
+ {parsedContent.inlineContent.map((item, index) => { + if (item.type === 'tool_call' && item.toolCall) { + const toolCall = item.toolCall + return ( +
+ {toolCall.state === 'detecting' && ( +
+
+ + Detecting {toolCall.displayName || toolCall.name}... + +
+ )} + {toolCall.state === 'executing' && ( + + )} + {(toolCall.state === 'completed' || toolCall.state === 'error') && ( + + )} +
+ ) + } + if (item.type === 'text' && item.content.trim()) { + return ( +
+
+ +
+
+ ) + } + return null + })}
-
+ ) : ( + /* Fallback for empty content or no inline content */ +
+
+ {isJsonObject ? ( +
+                      {JSON.stringify(cleanTextContent, null, 2)}
+                    
+ ) : ( + + )} +
+
+ )} {message.type === 'assistant' && !isJsonObject && !message.isInitialMessage && (
{/* Copy Button - Only show when not streaming */} @@ -80,7 +138,11 @@ export const ClientChatMessage = memo( size='sm' className='flex items-center gap-1.5 px-2 py-1' onClick={() => { - navigator.clipboard.writeText(message.content as string) + const contentToCopy = + typeof cleanTextContent === 'string' + ? cleanTextContent + : JSON.stringify(cleanTextContent, null, 2) + navigator.clipboard.writeText(contentToCopy) setIsCopied(true) setTimeout(() => setIsCopied(false), 2000) }} diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/control-bar/control-bar.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/control-bar/control-bar.tsx index d5a4d4dad..33f929b9a 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/control-bar/control-bar.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/control-bar/control-bar.tsx @@ -46,6 +46,7 @@ import { useKeyboardShortcuts, } from '../../../hooks/use-keyboard-shortcuts' import { useWorkflowExecution } from '../../hooks/use-workflow-execution' +import { WorkflowTextEditorModal } from '../workflow-text-editor/workflow-text-editor-modal' import { DeploymentControls } from './components/deployment-controls/deployment-controls' import { ExportControls } from './components/export-controls/export-controls' import { TemplateModal } from './components/template-modal/template-modal' @@ -508,6 +509,36 @@ export function ControlBar({ hasValidationErrors = false }: ControlBarProps) { ) } + /** + * Render YAML editor button + */ + const renderYamlEditorButton = () => { + const canEdit = userPermissions.canEdit + const isDisabled = isExecuting || isDebugging || !canEdit + + const getTooltipText = () => { + if (!canEdit) return 'Admin permission required to edit YAML' + if (isDebugging) return 'Cannot edit YAML while debugging' + if (isExecuting) return 'Cannot edit YAML while workflow is running' + return 'Edit workflow as YAML/JSON' + } + + return ( + + + + + {getTooltipText()} + + ) + } + /** * Render auto-layout button */ @@ -943,6 +974,7 @@ export function ControlBar({ hasValidationErrors = false }: ControlBarProps) { {renderDisconnectionNotice()} {renderToggleButton()} {isExpanded && } + {isExpanded && renderYamlEditorButton()} {isExpanded && renderAutoLayoutButton()} {isExpanded && renderDuplicateButton()} {renderDeleteButton()} diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/checkpoint-panel.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/checkpoint-panel.tsx new file mode 100644 index 000000000..e82c606a2 --- /dev/null +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/checkpoint-panel.tsx @@ -0,0 +1,156 @@ +'use client' + +import { useEffect } from 'react' +import { formatDistanceToNow } from 'date-fns' +import { AlertCircle, History, RotateCcw } from 'lucide-react' +import { Button } from '@/components/ui/button' +import { ScrollArea } from '@/components/ui/scroll-area' +import { Separator } from '@/components/ui/separator' +import { useCopilotStore } from '@/stores/copilot/store' + +export function CheckpointPanel() { + const { + currentChat, + checkpoints, + isLoadingCheckpoints, + isRevertingCheckpoint, + checkpointError, + loadCheckpoints, + revertToCheckpoint: revertToCheckpointAction, + clearCheckpointError, + } = useCopilotStore() + + // Load checkpoints when chat changes + useEffect(() => { + if (currentChat?.id) { + loadCheckpoints(currentChat.id) + } + }, [currentChat?.id, loadCheckpoints]) + + if (!currentChat) { + return ( +
+ +

No chat selected

+
+ ) + } + + if (isLoadingCheckpoints) { + return ( +
+
+

Loading checkpoints...

+
+ ) + } + + if (checkpointError) { + return ( +
+
+ + Error loading checkpoints +
+

{checkpointError}

+ +
+ ) + } + + if (checkpoints.length === 0) { + return ( +
+ +

No checkpoints yet

+

+ Checkpoints are created automatically when the agent edits your workflow +

+
+ ) + } + + const handleRevert = async (checkpointId: string) => { + if ( + window.confirm( + 'Are you sure you want to revert to this checkpoint? This will replace your current workflow.' + ) + ) { + await revertToCheckpointAction(checkpointId) + } + } + + return ( +
+
+
+ +

Workflow Checkpoints

+
+

+ Restore your workflow to a previous state +

+
+ + +
+ {checkpoints.map((checkpoint, index) => ( +
+
+
+
+
+
+ + Checkpoint {checkpoints.length - index} + +
+

+ {formatDistanceToNow(new Date(checkpoint.createdAt), { addSuffix: true })} +

+

+ {new Date(checkpoint.createdAt).toLocaleDateString()} at{' '} + {new Date(checkpoint.createdAt).toLocaleTimeString([], { + hour: '2-digit', + minute: '2-digit', + })} +

+
+ +
+
+ {index < checkpoints.length - 1 && } +
+ ))} +
+ + + {isRevertingCheckpoint && ( +
+
+
+ Reverting workflow... +
+
+ )} +
+ ) +} diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-modal/copilot-modal.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-modal/copilot-modal.tsx index 1a10a9683..663061a79 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-modal/copilot-modal.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-modal/copilot-modal.tsx @@ -1,10 +1,10 @@ 'use client' -import { type KeyboardEvent, useEffect, useRef } from 'react' +import { useEffect, useRef, useState } from 'react' import { - ArrowUp, Bot, ChevronDown, + History, MessageSquarePlus, MoreHorizontal, Trash2, @@ -17,126 +17,34 @@ import { DropdownMenuItem, DropdownMenuTrigger, } from '@/components/ui/dropdown-menu' -import { Input } from '@/components/ui/input' -import type { CopilotChat } from '@/lib/copilot-api' +import type { CopilotChat } from '@/lib/copilot/api' import { createLogger } from '@/lib/logs/console-logger' +import type { CopilotMessage } from '@/stores/copilot/types' +import { CheckpointPanel } from '../checkpoint-panel' +import { ProfessionalInput } from '../professional-input/professional-input' +import { ProfessionalMessage } from '../professional-message/professional-message' +import { CopilotWelcome } from '../welcome/welcome' const logger = createLogger('CopilotModal') -interface Message { - id: string - content: string - type: 'user' | 'assistant' - timestamp: Date - citations?: Array<{ - id: number - title: string - url: string - }> -} - -interface CopilotModalMessage { - message: Message -} - -// Modal-specific message component -function ModalCopilotMessage({ message }: CopilotModalMessage) { - const renderMarkdown = (text: string) => { - let processedText = text - - // Process markdown links: [text](url) - processedText = processedText.replace( - /\[([^\]]+)\]\(([^)]+)\)/g, - '$1' - ) - - // Handle code blocks - processedText = processedText.replace( - /```(\w+)?\n([\s\S]*?)\n```/g, - '
$2
' - ) - - // Handle inline code - processedText = processedText.replace( - /`([^`]+)`/g, - '$1' - ) - - // Handle headers - processedText = processedText.replace( - /^### (.*$)/gm, - '

$1

' - ) - processedText = processedText.replace( - /^## (.*$)/gm, - '

$1

' - ) - processedText = processedText.replace( - /^# (.*$)/gm, - '

$1

' - ) - - // Handle bold - processedText = processedText.replace(/\*\*(.*?)\*\*/g, '$1') - - // Handle lists - processedText = processedText.replace(/^- (.*$)/gm, '
  • • $1
  • ') - - // Handle line breaks (reduce spacing) - processedText = processedText.replace(/\n\n+/g, '

    ') - processedText = processedText.replace(/\n/g, '
    ') - - return processedText - } - - // For user messages (on the right) - if (message.type === 'user') { - return ( -

    -
    -
    -
    -
    - {message.content} -
    -
    -
    -
    -
    - ) - } - - // For assistant messages (on the left) - return ( -
    -
    -
    -
    -
    -
    -
    -
    -
    - ) -} - interface CopilotModalProps { open: boolean onOpenChange: (open: boolean) => void copilotMessage: string setCopilotMessage: (message: string) => void - messages: Message[] + messages: CopilotMessage[] onSendMessage: (message: string) => Promise isLoading: boolean + isLoadingChats: boolean // Chat management props chats: CopilotChat[] currentChat: CopilotChat | null onSelectChat: (chat: CopilotChat) => void onStartNewChat: () => void onDeleteChat: (chatId: string) => void + // Mode props + mode: 'ask' | 'agent' + onModeChange: (mode: 'ask' | 'agent') => void } export function CopilotModal({ @@ -147,15 +55,22 @@ export function CopilotModal({ messages, onSendMessage, isLoading, + isLoadingChats, chats, currentChat, onSelectChat, onStartNewChat, onDeleteChat, + mode, + onModeChange, }: CopilotModalProps) { const messagesEndRef = useRef(null) const messagesContainerRef = useRef(null) - const inputRef = useRef(null) + const [isDropdownOpen, setIsDropdownOpen] = useState(false) + const [showCheckpoints, setShowCheckpoints] = useState(false) + + // Fixed sidebar width for copilot modal positioning + const sidebarWidth = 240 // w-60 (sidebar width from staging) // Auto-scroll to bottom when new messages are added useEffect(() => { @@ -164,42 +79,13 @@ export function CopilotModal({ } }, [messages]) - // Focus input when modal opens - useEffect(() => { - if (open && inputRef.current) { - inputRef.current.focus() - } - }, [open]) - - // Handle send message - const handleSendMessage = async () => { - if (!copilotMessage.trim() || isLoading) return - - try { - await onSendMessage(copilotMessage.trim()) - setCopilotMessage('') - - // Ensure input stays focused - if (inputRef.current) { - inputRef.current.focus() - } - } catch (error) { - logger.error('Failed to send message', error) - } - } - - // Handle key press - const handleKeyPress = (e: KeyboardEvent) => { - if (e.key === 'Enter' && !e.shiftKey) { - e.preventDefault() - handleSendMessage() - } - } - if (!open) return null return ( -
    +
    - {/* Header with chat title, management, and close button */} -
    -
    - {/* Chat Title Dropdown */} - - - - - - {chats.map((chat) => ( -
    - onSelectChat(chat)} - className='flex-1 cursor-pointer' - > -
    -
    - {chat.title || 'Untitled Chat'} -
    -
    - {chat.messageCount} messages •{' '} - {new Date(chat.updatedAt).toLocaleDateString()} -
    -
    -
    - - - - - - onDeleteChat(chat.id)} - className='cursor-pointer text-destructive' - > - - Delete - - - -
    - ))} -
    -
    - - {/* New Chat Button */} + {/* Show loading state with centered pulsing agent icon */} + {isLoadingChats || isLoading ? ( +
    +
    + +
    +
    + ) : ( + <> + {/* Close button in top right corner */} -
    - -
    + {/* Header with chat title and management */} +
    +
    + {/* Chat Title Dropdown */} + + + + + setIsDropdownOpen(false)} + > + {isLoadingChats ? ( +
    Loading chats...
    + ) : chats.length === 0 ? ( +
    No chats yet
    + ) : ( + // Sort chats by updated date (most recent first) for display + [...chats] + .sort( + (a, b) => new Date(b.updatedAt).getTime() - new Date(a.updatedAt).getTime() + ) + .map((chat) => ( +
    + +
    { + onSelectChat(chat) + setIsDropdownOpen(false) + }} + className={`min-w-0 flex-1 cursor-pointer rounded-lg px-3 py-2.5 transition-all ${ + currentChat?.id === chat.id + ? 'bg-accent/80 text-accent-foreground' + : 'hover:bg-accent/40' + }`} + > +
    +
    + {chat.title || 'Untitled Chat'} +
    +
    + {new Date(chat.updatedAt).toLocaleDateString()} at{' '} + {new Date(chat.updatedAt).toLocaleTimeString([], { + hour: '2-digit', + minute: '2-digit', + })}{' '} + • {chat.messageCount} +
    +
    +
    +
    + + + + + + onDeleteChat(chat.id)} + className='cursor-pointer text-destructive hover:bg-destructive/10 hover:text-destructive focus:bg-destructive/10 focus:text-destructive' + > + + Delete + + + +
    + )) + )} +
    +
    - {/* Messages container */} -
    -
    - {messages.length === 0 ? ( -
    -
    - -
    -

    Welcome to Documentation Copilot

    -

    - Ask me anything about Sim Studio features, workflows, tools, or how to get - started. -

    -
    -
    -
    Try asking:
    -
    -
    - "How do I create a workflow?" -
    -
    - "What tools are available?" -
    -
    - "How do I deploy my workflow?" -
    -
    -
    + {/* Right side action buttons */} +
    + {/* Checkpoint Toggle Button */} + + + {/* New Chat Button */} +
    +
    + + {/* Messages container or Checkpoint Panel */} + {showCheckpoints ? ( +
    + +
    ) : ( - messages.map((message) => ) - )} - - {/* Loading indicator (shows only when loading) */} - {isLoading && ( -
    +
    -
    -
    -
    -
    -
    -
    -
    + {messages.length === 0 ? ( + + ) : ( + messages.map((message) => ( + + )) + )} + +
    )} -
    -
    -
    + {/* Mode Selector and Input */} + {!showCheckpoints && ( + <> + {/* Mode Selector */} +
    +
    +
    + + +
    +
    +
    - {/* Input area (fixed at bottom) */} -
    -
    -
    - setCopilotMessage(e.target.value)} - onKeyDown={handleKeyPress} - placeholder='Ask about Sim Studio documentation...' - className='min-h-[50px] flex-1 rounded-2xl border-0 bg-transparent py-7 pr-16 pl-6 text-base focus-visible:ring-0 focus-visible:ring-offset-0' - disabled={isLoading} - /> - -
    - -
    -

    Ask questions about Sim Studio documentation and features

    -
    -
    -
    + {/* Input area */} + { + await onSendMessage(message) + setCopilotMessage('') + }} + disabled={false} + isLoading={isLoading} + /> + + )} + + )}
    ) } diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/professional-input/professional-input.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/professional-input/professional-input.tsx new file mode 100644 index 000000000..5690a7c15 --- /dev/null +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/professional-input/professional-input.tsx @@ -0,0 +1,98 @@ +'use client' + +import { type FC, type KeyboardEvent, useRef, useState } from 'react' +import { ArrowUp, Loader2 } from 'lucide-react' +import { Button } from '@/components/ui/button' +import { Textarea } from '@/components/ui/textarea' +import { cn } from '@/lib/utils' + +interface ProfessionalInputProps { + onSubmit: (message: string) => void + disabled?: boolean + isLoading?: boolean + placeholder?: string + className?: string +} + +const ProfessionalInput: FC = ({ + onSubmit, + disabled = false, + isLoading = false, + placeholder = 'How can I help you today?', + className, +}) => { + const [message, setMessage] = useState('') + const textareaRef = useRef(null) + + const handleSubmit = () => { + const trimmedMessage = message.trim() + if (!trimmedMessage || disabled || isLoading) return + + onSubmit(trimmedMessage) + setMessage('') + + // Reset textarea height + if (textareaRef.current) { + textareaRef.current.style.height = 'auto' + } + } + + const handleKeyDown = (e: KeyboardEvent) => { + if (e.key === 'Enter' && !e.shiftKey) { + e.preventDefault() + handleSubmit() + } + } + + const handleInputChange = (e: React.ChangeEvent) => { + setMessage(e.target.value) + + // Auto-resize textarea + if (textareaRef.current) { + textareaRef.current.style.height = 'auto' + textareaRef.current.style.height = `${Math.min(textareaRef.current.scrollHeight, 120)}px` + } + } + + const canSubmit = message.trim().length > 0 && !disabled && !isLoading + + return ( +
    +
    +
    +
    +