diff --git a/apps/docs/components/icons.tsx b/apps/docs/components/icons.tsx index d62410d7f..f13fc8aa8 100644 --- a/apps/docs/components/icons.tsx +++ b/apps/docs/components/icons.tsx @@ -5483,3 +5483,37 @@ export function AgentSkillsIcon(props: SVGProps) { ) } + +export function OnePasswordIcon(props: SVGProps) { + return ( + + + + + + + ) +} diff --git a/apps/docs/components/ui/icon-mapping.ts b/apps/docs/components/ui/icon-mapping.ts index 490292c09..f02e55a8e 100644 --- a/apps/docs/components/ui/icon-mapping.ts +++ b/apps/docs/components/ui/icon-mapping.ts @@ -80,6 +80,7 @@ import { MySQLIcon, Neo4jIcon, NotionIcon, + OnePasswordIcon, OpenAIIcon, OutlookIcon, PackageSearchIcon, @@ -214,6 +215,7 @@ export const blockTypeToIconMap: Record = { neo4j: Neo4jIcon, notion_v2: NotionIcon, onedrive: MicrosoftOneDriveIcon, + onepassword: OnePasswordIcon, openai: OpenAIIcon, outlook: OutlookIcon, parallel_ai: ParallelIcon, diff --git a/apps/docs/content/docs/en/copilot/index.mdx b/apps/docs/content/docs/en/copilot/index.mdx index e222d8e55..9b5500aac 100644 --- a/apps/docs/content/docs/en/copilot/index.mdx +++ b/apps/docs/content/docs/en/copilot/index.mdx @@ -56,7 +56,7 @@ Switch between modes using the mode selector at the bottom of the input area. Select your preferred AI model using the model selector at the bottom right of the input area. **Available Models:** -- Claude 4.5 Opus, Sonnet (default), Haiku +- Claude 4.6 Opus (default), 4.5 Opus, Sonnet, Haiku - GPT 5.2 Codex, Pro - Gemini 3 Pro @@ -190,3 +190,99 @@ Copilot usage is billed per token from the underlying LLM. If you reach your usa See the [Cost Calculation page](/execution/costs) for billing details. +## Copilot MCP + +You can use Copilot as an MCP server in your favorite editor or AI client. This lets you build, test, deploy, and manage Sim workflows directly from tools like Cursor, Claude Code, Claude Desktop, and VS Code. + +### Generating a Copilot API Key + +To connect to the Copilot MCP server, you need a **Copilot API key**: + +1. Go to [sim.ai](https://sim.ai) and sign in +2. Navigate to **Settings** → **Copilot** +3. Click **Generate API Key** +4. Copy the key — it is only shown once + +The key will look like `sk-sim-copilot-...`. You will use this in the configuration below. + +### Cursor + +Add the following to your `.cursor/mcp.json` (project-level) or global Cursor MCP settings: + +```json +{ + "mcpServers": { + "sim-copilot": { + "url": "https://www.sim.ai/api/mcp/copilot", + "headers": { + "X-API-Key": "YOUR_COPILOT_API_KEY" + } + } + } +} +``` + +Replace `YOUR_COPILOT_API_KEY` with the key you generated above. + +### Claude Code + +Run the following command to add the Copilot MCP server: + +```bash +claude mcp add sim-copilot \ + --transport http \ + https://www.sim.ai/api/mcp/copilot \ + --header "X-API-Key: YOUR_COPILOT_API_KEY" +``` + +Replace `YOUR_COPILOT_API_KEY` with your key. + +### Claude Desktop + +Claude Desktop requires [`mcp-remote`](https://www.npmjs.com/package/mcp-remote) to connect to HTTP-based MCP servers. Add the following to your Claude Desktop config file (`~/Library/Application Support/Claude/claude_desktop_config.json` on macOS): + +```json +{ + "mcpServers": { + "sim-copilot": { + "command": "npx", + "args": [ + "-y", + "mcp-remote", + "https://www.sim.ai/api/mcp/copilot", + "--header", + "X-API-Key: YOUR_COPILOT_API_KEY" + ] + } + } +} +``` + +Replace `YOUR_COPILOT_API_KEY` with your key. + +### VS Code + +Add the following to your VS Code `settings.json` or workspace `.vscode/settings.json`: + +```json +{ + "mcp": { + "servers": { + "sim-copilot": { + "type": "http", + "url": "https://www.sim.ai/api/mcp/copilot", + "headers": { + "X-API-Key": "YOUR_COPILOT_API_KEY" + } + } + } + } +} +``` + +Replace `YOUR_COPILOT_API_KEY` with your key. + + + For self-hosted deployments, replace `https://www.sim.ai` with your self-hosted Sim URL. + + diff --git a/apps/docs/content/docs/en/tools/airweave.mdx b/apps/docs/content/docs/en/tools/airweave.mdx index 59764a4c0..bc9cb8cb3 100644 --- a/apps/docs/content/docs/en/tools/airweave.mdx +++ b/apps/docs/content/docs/en/tools/airweave.mdx @@ -25,6 +25,7 @@ With Airweave, you can: In Sim, the Airweave integration empowers your agents to search, summarize, and extract insights from all your organization’s data via a single tool. Use Airweave to drive rich, contextual knowledge retrieval within your workflows—whether answering questions, generating summaries, or supporting dynamic decision-making. {/* MANUAL-CONTENT-END */} + ## Usage Instructions Search across your synced data sources using Airweave. Supports semantic search with hybrid, neural, or keyword retrieval strategies. Optionally generate AI-powered answers from search results. diff --git a/apps/docs/content/docs/en/tools/jira.mdx b/apps/docs/content/docs/en/tools/jira.mdx index 812752057..179d7023a 100644 --- a/apps/docs/content/docs/en/tools/jira.mdx +++ b/apps/docs/content/docs/en/tools/jira.mdx @@ -43,7 +43,6 @@ Retrieve detailed information about a specific Jira issue | Parameter | Type | Required | Description | | --------- | ---- | -------- | ----------- | | `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) | -| `projectId` | string | No | Jira project key \(e.g., PROJ\). Optional when retrieving a single issue. | | `issueKey` | string | Yes | Jira issue key to retrieve \(e.g., PROJ-123\) | | `cloudId` | string | No | Jira Cloud ID for the instance. If not provided, it will be fetched using the domain. | @@ -51,13 +50,184 @@ Retrieve detailed information about a specific Jira issue | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | -| `issueKey` | string | Issue key \(e.g., PROJ-123\) | +| `ts` | string | ISO 8601 timestamp of the operation | +| `id` | string | Issue ID | +| `key` | string | Issue key \(e.g., PROJ-123\) | +| `self` | string | REST API URL for this issue | | `summary` | string | Issue summary | -| `description` | json | Issue description content | -| `created` | string | Issue creation timestamp | -| `updated` | string | Issue last updated timestamp | -| `issue` | json | Complete issue object with all fields | +| `description` | string | Issue description text \(extracted from ADF\) | +| `status` | object | Issue status | +| ↳ `id` | string | Status ID | +| ↳ `name` | string | Status name \(e.g., Open, In Progress, Done\) | +| ↳ `description` | string | Status description | +| ↳ `statusCategory` | object | Status category grouping | +| ↳ `id` | number | Status category ID | +| ↳ `key` | string | Status category key \(e.g., new, indeterminate, done\) | +| ↳ `name` | string | Status category name \(e.g., To Do, In Progress, Done\) | +| ↳ `colorName` | string | Status category color \(e.g., blue-gray, yellow, green\) | +| `issuetype` | object | Issue type | +| ↳ `id` | string | Issue type ID | +| ↳ `name` | string | Issue type name \(e.g., Task, Bug, Story, Epic\) | +| ↳ `description` | string | Issue type description | +| ↳ `subtask` | boolean | Whether this is a subtask type | +| ↳ `iconUrl` | string | URL to the issue type icon | +| `project` | object | Project the issue belongs to | +| ↳ `id` | string | Project ID | +| ↳ `key` | string | Project key \(e.g., PROJ\) | +| ↳ `name` | string | Project name | +| ↳ `projectTypeKey` | string | Project type key \(e.g., software, business\) | +| `priority` | object | Issue priority | +| ↳ `id` | string | Priority ID | +| ↳ `name` | string | Priority name \(e.g., Highest, High, Medium, Low, Lowest\) | +| ↳ `iconUrl` | string | URL to the priority icon | +| `assignee` | object | Assigned user | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| `reporter` | object | Reporter user | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| `creator` | object | Issue creator | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| `labels` | array | Issue labels | +| `components` | array | Issue components | +| ↳ `id` | string | Component ID | +| ↳ `name` | string | Component name | +| ↳ `description` | string | Component description | +| `fixVersions` | array | Fix versions | +| ↳ `id` | string | Version ID | +| ↳ `name` | string | Version name | +| ↳ `released` | boolean | Whether the version is released | +| ↳ `releaseDate` | string | Release date \(YYYY-MM-DD\) | +| `resolution` | object | Issue resolution | +| ↳ `id` | string | Resolution ID | +| ↳ `name` | string | Resolution name \(e.g., Fixed, Duplicate, Won't Fix\) | +| ↳ `description` | string | Resolution description | +| `duedate` | string | Due date \(YYYY-MM-DD\) | +| `created` | string | ISO 8601 timestamp when the issue was created | +| `updated` | string | ISO 8601 timestamp when the issue was last updated | +| `resolutiondate` | string | ISO 8601 timestamp when the issue was resolved | +| `timetracking` | object | Time tracking information | +| ↳ `originalEstimate` | string | Original estimate in human-readable format \(e.g., 1w 2d\) | +| ↳ `remainingEstimate` | string | Remaining estimate in human-readable format | +| ↳ `timeSpent` | string | Time spent in human-readable format | +| ↳ `originalEstimateSeconds` | number | Original estimate in seconds | +| ↳ `remainingEstimateSeconds` | number | Remaining estimate in seconds | +| ↳ `timeSpentSeconds` | number | Time spent in seconds | +| `parent` | object | Parent issue \(for subtasks\) | +| ↳ `id` | string | Parent issue ID | +| ↳ `key` | string | Parent issue key | +| ↳ `summary` | string | Parent issue summary | +| `issuelinks` | array | Linked issues | +| ↳ `id` | string | Issue link ID | +| ↳ `type` | object | Link type information | +| ↳ `id` | string | Link type ID | +| ↳ `name` | string | Link type name \(e.g., Blocks, Relates\) | +| ↳ `inward` | string | Inward description \(e.g., is blocked by\) | +| ↳ `outward` | string | Outward description \(e.g., blocks\) | +| ↳ `inwardIssue` | object | Inward linked issue | +| ↳ `id` | string | Issue ID | +| ↳ `key` | string | Issue key | +| ↳ `statusName` | string | Issue status name | +| ↳ `summary` | string | Issue summary | +| ↳ `outwardIssue` | object | Outward linked issue | +| ↳ `id` | string | Issue ID | +| ↳ `key` | string | Issue key | +| ↳ `statusName` | string | Issue status name | +| ↳ `summary` | string | Issue summary | +| `subtasks` | array | Subtask issues | +| ↳ `id` | string | Subtask issue ID | +| ↳ `key` | string | Subtask issue key | +| ↳ `summary` | string | Subtask summary | +| ↳ `statusName` | string | Subtask status name | +| ↳ `issueTypeName` | string | Subtask issue type name | +| `votes` | object | Vote information | +| ↳ `votes` | number | Number of votes | +| ↳ `hasVoted` | boolean | Whether the current user has voted | +| `watches` | object | Watch information | +| ↳ `watchCount` | number | Number of watchers | +| ↳ `isWatching` | boolean | Whether the current user is watching | +| `comments` | array | Issue comments \(fetched separately\) | +| ↳ `id` | string | Comment ID | +| ↳ `body` | string | Comment body text \(extracted from ADF\) | +| ↳ `author` | object | Comment author | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| ↳ `updateAuthor` | object | User who last updated the comment | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| ↳ `created` | string | ISO 8601 timestamp when the comment was created | +| ↳ `updated` | string | ISO 8601 timestamp when the comment was last updated | +| ↳ `visibility` | object | Comment visibility restriction | +| ↳ `type` | string | Restriction type \(e.g., role, group\) | +| ↳ `value` | string | Restriction value \(e.g., Administrators\) | +| `worklogs` | array | Issue worklogs \(fetched separately\) | +| ↳ `id` | string | Worklog ID | +| ↳ `author` | object | Worklog author | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| ↳ `updateAuthor` | object | User who last updated the worklog | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| ↳ `comment` | string | Worklog comment text | +| ↳ `started` | string | ISO 8601 timestamp when the work started | +| ↳ `timeSpent` | string | Time spent in human-readable format \(e.g., 3h 20m\) | +| ↳ `timeSpentSeconds` | number | Time spent in seconds | +| ↳ `created` | string | ISO 8601 timestamp when the worklog was created | +| ↳ `updated` | string | ISO 8601 timestamp when the worklog was last updated | +| `attachments` | array | Issue attachments | +| ↳ `id` | string | Attachment ID | +| ↳ `filename` | string | Attachment file name | +| ↳ `mimeType` | string | MIME type of the attachment | +| ↳ `size` | number | File size in bytes | +| ↳ `content` | string | URL to download the attachment content | +| ↳ `thumbnail` | string | URL to the attachment thumbnail | +| ↳ `author` | object | Attachment author | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| ↳ `created` | string | ISO 8601 timestamp when the attachment was created | +| `issueKey` | string | Issue key \(e.g., PROJ-123\) | +| `issue` | json | Complete raw Jira issue object from the API | ### `jira_update` @@ -68,26 +238,32 @@ Update a Jira issue | Parameter | Type | Required | Description | | --------- | ---- | -------- | ----------- | | `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) | -| `projectId` | string | No | Jira project key \(e.g., PROJ\). Optional when updating a single issue. | | `issueKey` | string | Yes | Jira issue key to update \(e.g., PROJ-123\) | | `summary` | string | No | New summary for the issue | | `description` | string | No | New description for the issue | -| `status` | string | No | New status for the issue | -| `priority` | string | No | New priority for the issue | -| `assignee` | string | No | New assignee for the issue | +| `priority` | string | No | New priority ID or name for the issue \(e.g., "High"\) | +| `assignee` | string | No | New assignee account ID for the issue | +| `labels` | json | No | Labels to set on the issue \(array of label name strings\) | +| `components` | json | No | Components to set on the issue \(array of component name strings\) | +| `duedate` | string | No | Due date for the issue \(format: YYYY-MM-DD\) | +| `fixVersions` | json | No | Fix versions to set \(array of version name strings\) | +| `environment` | string | No | Environment information for the issue | +| `customFieldId` | string | No | Custom field ID to update \(e.g., customfield_10001\) | +| `customFieldValue` | string | No | Value for the custom field | +| `notifyUsers` | boolean | No | Whether to send email notifications about this update \(default: true\) | | `cloudId` | string | No | Jira Cloud ID for the instance. If not provided, it will be fetched using the domain. | #### Output | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `issueKey` | string | Updated issue key \(e.g., PROJ-123\) | | `summary` | string | Issue summary after update | ### `jira_write` -Write a Jira issue +Create a new Jira issue #### Input @@ -100,9 +276,12 @@ Write a Jira issue | `priority` | string | No | Priority ID or name for the issue \(e.g., "10000" or "High"\) | | `assignee` | string | No | Assignee account ID for the issue | | `cloudId` | string | No | Jira Cloud ID for the instance. If not provided, it will be fetched using the domain. | -| `issueType` | string | Yes | Type of issue to create \(e.g., Task, Story\) | +| `issueType` | string | Yes | Type of issue to create \(e.g., Task, Story, Bug, Epic, Sub-task\) | +| `parent` | json | No | Parent issue key for creating subtasks \(e.g., \{ "key": "PROJ-123" \}\) | | `labels` | array | No | Labels for the issue \(array of label names\) | +| `components` | array | No | Components for the issue \(array of component names\) | | `duedate` | string | No | Due date for the issue \(format: YYYY-MM-DD\) | +| `fixVersions` | array | No | Fix versions for the issue \(array of version names\) | | `reporter` | string | No | Reporter account ID for the issue | | `environment` | string | No | Environment information for the issue | | `customFieldId` | string | No | Custom field ID \(e.g., customfield_10001\) | @@ -112,15 +291,17 @@ Write a Jira issue | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | +| `id` | string | Created issue ID | | `issueKey` | string | Created issue key \(e.g., PROJ-123\) | +| `self` | string | REST API URL for the created issue | | `summary` | string | Issue summary | -| `url` | string | URL to the created issue | -| `assigneeId` | string | Account ID of the assigned user \(if assigned\) | +| `url` | string | URL to the created issue in Jira | +| `assigneeId` | string | Account ID of the assigned user \(null if no assignee was set\) | ### `jira_bulk_read` -Retrieve multiple Jira issues in bulk +Retrieve multiple Jira issues from a project in bulk #### Input @@ -134,7 +315,30 @@ Retrieve multiple Jira issues in bulk | Parameter | Type | Description | | --------- | ---- | ----------- | -| `issues` | array | Array of Jira issues with ts, summary, description, created, and updated timestamps | +| `ts` | string | ISO 8601 timestamp of the operation | +| `total` | number | Total number of issues in the project \(may not always be available\) | +| `issues` | array | Array of Jira issues | +| ↳ `id` | string | Issue ID | +| ↳ `key` | string | Issue key \(e.g., PROJ-123\) | +| ↳ `self` | string | REST API URL for this issue | +| ↳ `summary` | string | Issue summary | +| ↳ `description` | string | Issue description text | +| ↳ `status` | object | Issue status | +| ↳ `id` | string | Status ID | +| ↳ `name` | string | Status name | +| ↳ `issuetype` | object | Issue type | +| ↳ `id` | string | Issue type ID | +| ↳ `name` | string | Issue type name | +| ↳ `priority` | object | Issue priority | +| ↳ `id` | string | Priority ID | +| ↳ `name` | string | Priority name | +| ↳ `assignee` | object | Assigned user | +| ↳ `accountId` | string | Atlassian account ID | +| ↳ `displayName` | string | Display name | +| ↳ `created` | string | ISO 8601 creation timestamp | +| ↳ `updated` | string | ISO 8601 last updated timestamp | +| `nextPageToken` | string | Cursor token for the next page. Null when no more results. | +| `isLast` | boolean | Whether this is the last page of results | ### `jira_delete_issue` @@ -153,7 +357,7 @@ Delete a Jira issue | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `issueKey` | string | Deleted issue key | ### `jira_assign_issue` @@ -173,9 +377,9 @@ Assign a Jira issue to a user | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `issueKey` | string | Issue key that was assigned | -| `assigneeId` | string | Account ID of the assignee | +| `assigneeId` | string | Account ID of the assignee \(use "-1" for auto-assign, null to unassign\) | ### `jira_transition_issue` @@ -189,15 +393,20 @@ Move a Jira issue between workflow statuses (e.g., To Do -> In Progress) | `issueKey` | string | Yes | Jira issue key to transition \(e.g., PROJ-123\) | | `transitionId` | string | Yes | ID of the transition to execute \(e.g., "11" for "To Do", "21" for "In Progress"\) | | `comment` | string | No | Optional comment to add when transitioning the issue | +| `resolution` | string | No | Resolution name to set during transition \(e.g., "Fixed", "Won\'t Fix"\) | | `cloudId` | string | No | Jira Cloud ID for the instance. If not provided, it will be fetched using the domain. | #### Output | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `issueKey` | string | Issue key that was transitioned | | `transitionId` | string | Applied transition ID | +| `transitionName` | string | Applied transition name | +| `toStatus` | object | Target status after transition | +| ↳ `id` | string | Status ID | +| ↳ `name` | string | Status name | ### `jira_search_issues` @@ -209,20 +418,77 @@ Search for Jira issues using JQL (Jira Query Language) | --------- | ---- | -------- | ----------- | | `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) | | `jql` | string | Yes | JQL query string to search for issues \(e.g., "project = PROJ AND status = Open"\) | -| `startAt` | number | No | The index of the first result to return \(for pagination\) | -| `maxResults` | number | No | Maximum number of results to return \(default: 50\) | -| `fields` | array | No | Array of field names to return \(default: \['summary', 'status', 'assignee', 'created', 'updated'\]\) | +| `nextPageToken` | string | No | Cursor token for the next page of results. Omit for the first page. | +| `maxResults` | number | No | Maximum number of results to return per page \(default: 50\) | +| `fields` | array | No | Array of field names to return \(default: all navigable\). Use "*all" for every field. | | `cloudId` | string | No | Jira Cloud ID for the instance. If not provided, it will be fetched using the domain. | #### Output | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | -| `total` | number | Total number of matching issues | -| `startAt` | number | Pagination start index | -| `maxResults` | number | Maximum results per page | -| `issues` | array | Array of matching issues with key, summary, status, assignee, created, updated | +| `ts` | string | ISO 8601 timestamp of the operation | +| `issues` | array | Array of matching issues | +| ↳ `id` | string | Issue ID | +| ↳ `key` | string | Issue key \(e.g., PROJ-123\) | +| ↳ `self` | string | REST API URL for this issue | +| ↳ `summary` | string | Issue summary | +| ↳ `description` | string | Issue description text \(extracted from ADF\) | +| ↳ `status` | object | Issue status | +| ↳ `id` | string | Status ID | +| ↳ `name` | string | Status name \(e.g., Open, In Progress, Done\) | +| ↳ `description` | string | Status description | +| ↳ `statusCategory` | object | Status category grouping | +| ↳ `id` | number | Status category ID | +| ↳ `key` | string | Status category key \(e.g., new, indeterminate, done\) | +| ↳ `name` | string | Status category name \(e.g., To Do, In Progress, Done\) | +| ↳ `colorName` | string | Status category color \(e.g., blue-gray, yellow, green\) | +| ↳ `issuetype` | object | Issue type | +| ↳ `id` | string | Issue type ID | +| ↳ `name` | string | Issue type name \(e.g., Task, Bug, Story, Epic\) | +| ↳ `description` | string | Issue type description | +| ↳ `subtask` | boolean | Whether this is a subtask type | +| ↳ `iconUrl` | string | URL to the issue type icon | +| ↳ `project` | object | Project the issue belongs to | +| ↳ `id` | string | Project ID | +| ↳ `key` | string | Project key \(e.g., PROJ\) | +| ↳ `name` | string | Project name | +| ↳ `projectTypeKey` | string | Project type key \(e.g., software, business\) | +| ↳ `priority` | object | Issue priority | +| ↳ `id` | string | Priority ID | +| ↳ `name` | string | Priority name \(e.g., Highest, High, Medium, Low, Lowest\) | +| ↳ `iconUrl` | string | URL to the priority icon | +| ↳ `assignee` | object | Assigned user | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| ↳ `reporter` | object | Reporter user | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| ↳ `labels` | array | Issue labels | +| ↳ `components` | array | Issue components | +| ↳ `id` | string | Component ID | +| ↳ `name` | string | Component name | +| ↳ `description` | string | Component description | +| ↳ `resolution` | object | Issue resolution | +| ↳ `id` | string | Resolution ID | +| ↳ `name` | string | Resolution name \(e.g., Fixed, Duplicate, Won't Fix\) | +| ↳ `description` | string | Resolution description | +| ↳ `duedate` | string | Due date \(YYYY-MM-DD\) | +| ↳ `created` | string | ISO 8601 timestamp when the issue was created | +| ↳ `updated` | string | ISO 8601 timestamp when the issue was last updated | +| `nextPageToken` | string | Cursor token for the next page. Null when no more results. | +| `isLast` | boolean | Whether this is the last page of results | +| `total` | number | Total number of matching issues \(may not always be available\) | ### `jira_add_comment` @@ -235,16 +501,27 @@ Add a comment to a Jira issue | `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) | | `issueKey` | string | Yes | Jira issue key to add comment to \(e.g., PROJ-123\) | | `body` | string | Yes | Comment body text | +| `visibility` | json | No | Restrict comment visibility. Object with "type" \("role" or "group"\) and "value" \(role/group name\). | | `cloudId` | string | No | Jira Cloud ID for the instance. If not provided, it will be fetched using the domain. | #### Output | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `issueKey` | string | Issue key the comment was added to | | `commentId` | string | Created comment ID | | `body` | string | Comment text content | +| `author` | object | Comment author | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| `created` | string | ISO 8601 timestamp when the comment was created | +| `updated` | string | ISO 8601 timestamp when the comment was last updated | ### `jira_get_comments` @@ -258,16 +535,42 @@ Get all comments from a Jira issue | `issueKey` | string | Yes | Jira issue key to get comments from \(e.g., PROJ-123\) | | `startAt` | number | No | Index of the first comment to return \(default: 0\) | | `maxResults` | number | No | Maximum number of comments to return \(default: 50\) | +| `orderBy` | string | No | Sort order for comments: "-created" for newest first, "created" for oldest first | | `cloudId` | string | No | Jira Cloud ID for the instance. If not provided, it will be fetched using the domain. | #### Output | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `issueKey` | string | Issue key | | `total` | number | Total number of comments | -| `comments` | array | Array of comments with id, author, body, created, updated | +| `startAt` | number | Pagination start index | +| `maxResults` | number | Maximum results per page | +| `comments` | array | Array of comments | +| ↳ `id` | string | Comment ID | +| ↳ `body` | string | Comment body text \(extracted from ADF\) | +| ↳ `author` | object | Comment author | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| ↳ `updateAuthor` | object | User who last updated the comment | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| ↳ `created` | string | ISO 8601 timestamp when the comment was created | +| ↳ `updated` | string | ISO 8601 timestamp when the comment was last updated | +| ↳ `visibility` | object | Comment visibility restriction | +| ↳ `type` | string | Restriction type \(e.g., role, group\) | +| ↳ `value` | string | Restriction value \(e.g., Administrators\) | ### `jira_update_comment` @@ -281,16 +584,27 @@ Update an existing comment on a Jira issue | `issueKey` | string | Yes | Jira issue key containing the comment \(e.g., PROJ-123\) | | `commentId` | string | Yes | ID of the comment to update | | `body` | string | Yes | Updated comment text | +| `visibility` | json | No | Restrict comment visibility. Object with "type" \("role" or "group"\) and "value" \(role/group name\). | | `cloudId` | string | No | Jira Cloud ID for the instance. If not provided, it will be fetched using the domain. | #### Output | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `issueKey` | string | Issue key | | `commentId` | string | Updated comment ID | | `body` | string | Updated comment text | +| `author` | object | Comment author | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| `created` | string | ISO 8601 timestamp when the comment was created | +| `updated` | string | ISO 8601 timestamp when the comment was last updated | ### `jira_delete_comment` @@ -309,7 +623,7 @@ Delete a comment from a Jira issue | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `issueKey` | string | Issue key | | `commentId` | string | Deleted comment ID | @@ -329,9 +643,24 @@ Get all attachments from a Jira issue | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `issueKey` | string | Issue key | -| `attachments` | array | Array of attachments with id, filename, size, mimeType, created, author | +| `attachments` | array | Array of attachments | +| ↳ `id` | string | Attachment ID | +| ↳ `filename` | string | Attachment file name | +| ↳ `mimeType` | string | MIME type of the attachment | +| ↳ `size` | number | File size in bytes | +| ↳ `content` | string | URL to download the attachment content | +| ↳ `thumbnail` | string | URL to the attachment thumbnail | +| ↳ `author` | object | Attachment author | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| ↳ `created` | string | ISO 8601 timestamp when the attachment was created | ### `jira_add_attachment` @@ -350,10 +679,19 @@ Add attachments to a Jira issue | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `issueKey` | string | Issue key | -| `attachmentIds` | json | IDs of uploaded attachments | -| `files` | file[] | Uploaded attachment files | +| `attachments` | array | Uploaded attachments | +| ↳ `id` | string | Attachment ID | +| ↳ `filename` | string | Attachment file name | +| ↳ `mimeType` | string | MIME type | +| ↳ `size` | number | File size in bytes | +| ↳ `content` | string | URL to download the attachment | +| `attachmentIds` | array | Array of attachment IDs | +| `files` | array | Uploaded file metadata | +| ↳ `name` | string | File name | +| ↳ `mimeType` | string | MIME type | +| ↳ `size` | number | File size in bytes | ### `jira_delete_attachment` @@ -371,7 +709,7 @@ Delete an attachment from a Jira issue | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `attachmentId` | string | Deleted attachment ID | ### `jira_add_worklog` @@ -387,16 +725,28 @@ Add a time tracking worklog entry to a Jira issue | `timeSpentSeconds` | number | Yes | Time spent in seconds | | `comment` | string | No | Optional comment for the worklog entry | | `started` | string | No | Optional start time in ISO format \(defaults to current time\) | +| `visibility` | json | No | Restrict worklog visibility. Object with "type" \("role" or "group"\) and "value" \(role/group name\). | | `cloudId` | string | No | Jira Cloud ID for the instance. If not provided, it will be fetched using the domain. | #### Output | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `issueKey` | string | Issue key the worklog was added to | | `worklogId` | string | Created worklog ID | +| `timeSpent` | string | Time spent in human-readable format \(e.g., 3h 20m\) | | `timeSpentSeconds` | number | Time spent in seconds | +| `author` | object | Worklog author | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| `started` | string | ISO 8601 timestamp when the work started | +| `created` | string | ISO 8601 timestamp when the worklog was created | ### `jira_get_worklogs` @@ -416,10 +766,35 @@ Get all worklog entries from a Jira issue | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `issueKey` | string | Issue key | | `total` | number | Total number of worklogs | -| `worklogs` | array | Array of worklogs with id, author, timeSpentSeconds, timeSpent, comment, created, updated, started | +| `startAt` | number | Pagination start index | +| `maxResults` | number | Maximum results per page | +| `worklogs` | array | Array of worklogs | +| ↳ `id` | string | Worklog ID | +| ↳ `author` | object | Worklog author | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| ↳ `updateAuthor` | object | User who last updated the worklog | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| ↳ `comment` | string | Worklog comment text | +| ↳ `started` | string | ISO 8601 timestamp when the work started | +| ↳ `timeSpent` | string | Time spent in human-readable format \(e.g., 3h 20m\) | +| ↳ `timeSpentSeconds` | number | Time spent in seconds | +| ↳ `created` | string | ISO 8601 timestamp when the worklog was created | +| ↳ `updated` | string | ISO 8601 timestamp when the worklog was last updated | ### `jira_update_worklog` @@ -435,15 +810,38 @@ Update an existing worklog entry on a Jira issue | `timeSpentSeconds` | number | No | Time spent in seconds | | `comment` | string | No | Optional comment for the worklog entry | | `started` | string | No | Optional start time in ISO format | +| `visibility` | json | No | Restrict worklog visibility. Object with "type" \("role" or "group"\) and "value" \(role/group name\). | | `cloudId` | string | No | Jira Cloud ID for the instance. If not provided, it will be fetched using the domain. | #### Output | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `issueKey` | string | Issue key | | `worklogId` | string | Updated worklog ID | +| `timeSpent` | string | Human-readable time spent \(e.g., "3h 20m"\) | +| `timeSpentSeconds` | number | Time spent in seconds | +| `comment` | string | Worklog comment text | +| `author` | object | Worklog author | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| `updateAuthor` | object | User who last updated the worklog | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | +| `started` | string | Worklog start time in ISO format | +| `created` | string | Worklog creation time | +| `updated` | string | Worklog last update time | ### `jira_delete_worklog` @@ -462,7 +860,7 @@ Delete a worklog entry from a Jira issue | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `issueKey` | string | Issue key | | `worklogId` | string | Deleted worklog ID | @@ -485,7 +883,7 @@ Create a link relationship between two Jira issues | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `inwardIssue` | string | Inward issue key | | `outwardIssue` | string | Outward issue key | | `linkType` | string | Type of issue link | @@ -507,7 +905,7 @@ Delete a link between two Jira issues | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `linkId` | string | Deleted link ID | ### `jira_add_watcher` @@ -527,7 +925,7 @@ Add a watcher to a Jira issue to receive notifications about updates | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `issueKey` | string | Issue key | | `watcherAccountId` | string | Added watcher account ID | @@ -548,7 +946,7 @@ Remove a watcher from a Jira issue | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | +| `ts` | string | ISO 8601 timestamp of the operation | | `issueKey` | string | Issue key | | `watcherAccountId` | string | Removed watcher account ID | @@ -570,8 +968,15 @@ Get Jira users. If an account ID is provided, returns a single user. Otherwise, | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ts` | string | Timestamp of the operation | -| `users` | json | Array of users with accountId, displayName, emailAddress, active status, and avatarUrls | +| `ts` | string | ISO 8601 timestamp of the operation | +| `users` | array | Array of Jira users | +| ↳ `accountId` | string | Atlassian account ID of the user | +| ↳ `displayName` | string | Display name of the user | +| ↳ `active` | boolean | Whether the user account is active | +| ↳ `emailAddress` | string | Email address of the user | +| ↳ `accountType` | string | Type of account \(e.g., atlassian, app, customer\) | +| ↳ `avatarUrl` | string | URL to the user avatar \(48x48\) | +| ↳ `timeZone` | string | User timezone | | `total` | number | Total number of users returned | | `startAt` | number | Pagination start index | | `maxResults` | number | Maximum results per page | diff --git a/apps/docs/content/docs/en/tools/jira_service_management.mdx b/apps/docs/content/docs/en/tools/jira_service_management.mdx index 9cc80444e..9814f8103 100644 --- a/apps/docs/content/docs/en/tools/jira_service_management.mdx +++ b/apps/docs/content/docs/en/tools/jira_service_management.mdx @@ -46,6 +46,7 @@ Get all service desks from Jira Service Management | --------- | ---- | -------- | ----------- | | `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) | | `cloudId` | string | No | Jira Cloud ID for the instance | +| `expand` | string | No | Comma-separated fields to expand in the response | | `start` | number | No | Start index for pagination \(e.g., 0, 50, 100\) | | `limit` | number | No | Maximum results to return \(e.g., 10, 25, 50\) | @@ -54,7 +55,14 @@ Get all service desks from Jira Service Management | Parameter | Type | Description | | --------- | ---- | ----------- | | `ts` | string | Timestamp of the operation | -| `serviceDesks` | json | Array of service desks | +| `serviceDesks` | array | List of service desks | +| ↳ `id` | string | Service desk ID | +| ↳ `projectId` | string | Associated Jira project ID | +| ↳ `projectName` | string | Associated project name | +| ↳ `projectKey` | string | Associated project key | +| ↳ `name` | string | Service desk name | +| ↳ `description` | string | Service desk description | +| ↳ `leadDisplayName` | string | Project lead display name | | `total` | number | Total number of service desks | | `isLastPage` | boolean | Whether this is the last page | @@ -69,6 +77,9 @@ Get request types for a service desk in Jira Service Management | `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) | | `cloudId` | string | No | Jira Cloud ID for the instance | | `serviceDeskId` | string | Yes | Service Desk ID \(e.g., "1", "2"\) | +| `searchQuery` | string | No | Filter request types by name | +| `groupId` | string | No | Filter by request type group ID | +| `expand` | string | No | Comma-separated fields to expand in the response | | `start` | number | No | Start index for pagination \(e.g., 0, 50, 100\) | | `limit` | number | No | Maximum results to return \(e.g., 10, 25, 50\) | @@ -77,7 +88,16 @@ Get request types for a service desk in Jira Service Management | Parameter | Type | Description | | --------- | ---- | ----------- | | `ts` | string | Timestamp of the operation | -| `requestTypes` | json | Array of request types | +| `requestTypes` | array | List of request types | +| ↳ `id` | string | Request type ID | +| ↳ `name` | string | Request type name | +| ↳ `description` | string | Request type description | +| ↳ `helpText` | string | Help text for customers | +| ↳ `issueTypeId` | string | Associated Jira issue type ID | +| ↳ `serviceDeskId` | string | Parent service desk ID | +| ↳ `groupIds` | json | Groups this request type belongs to | +| ↳ `icon` | json | Request type icon with id and links | +| ↳ `restrictionStatus` | string | OPEN or RESTRICTED | | `total` | number | Total number of request types | | `isLastPage` | boolean | Whether this is the last page | @@ -96,6 +116,9 @@ Create a new service request in Jira Service Management | `summary` | string | Yes | Summary/title for the service request | | `description` | string | No | Description for the service request | | `raiseOnBehalfOf` | string | No | Account ID of customer to raise request on behalf of | +| `requestFieldValues` | json | No | Custom field values as key-value pairs \(overrides summary/description if provided\) | +| `requestParticipants` | string | No | Comma-separated account IDs to add as request participants | +| `channel` | string | No | Channel the request originates from \(e.g., portal, email\) | #### Output @@ -106,6 +129,9 @@ Create a new service request in Jira Service Management | `issueKey` | string | Created request issue key \(e.g., SD-123\) | | `requestTypeId` | string | Request type ID | | `serviceDeskId` | string | Service desk ID | +| `createdDate` | json | Creation date with iso8601, friendly, epochMillis | +| `currentStatus` | json | Current status with status name and category | +| `reporter` | json | Reporter user with accountId, displayName, emailAddress | | `success` | boolean | Whether the request was created successfully | | `url` | string | URL to the created request | @@ -120,12 +146,33 @@ Get a single service request from Jira Service Management | `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) | | `cloudId` | string | No | Jira Cloud ID for the instance | | `issueIdOrKey` | string | Yes | Issue ID or key \(e.g., SD-123\) | +| `expand` | string | No | Comma-separated fields to expand: participant, status, sla, requestType, serviceDesk, attachment, comment, action | #### Output | Parameter | Type | Description | | --------- | ---- | ----------- | | `ts` | string | Timestamp of the operation | +| `issueId` | string | Jira issue ID | +| `issueKey` | string | Issue key \(e.g., SD-123\) | +| `requestTypeId` | string | Request type ID | +| `serviceDeskId` | string | Service desk ID | +| `createdDate` | json | Creation date with iso8601, friendly, epochMillis | +| `currentStatus` | object | Current request status | +| ↳ `status` | string | Status name | +| ↳ `statusCategory` | string | Status category \(NEW, INDETERMINATE, DONE\) | +| ↳ `statusDate` | json | Status change date with iso8601, friendly, epochMillis | +| `reporter` | object | Reporter user details | +| ↳ `accountId` | string | Atlassian account ID | +| ↳ `displayName` | string | User display name | +| ↳ `emailAddress` | string | User email address | +| ↳ `active` | boolean | Whether the account is active | +| `requestFieldValues` | array | Request field values | +| ↳ `fieldId` | string | Field identifier | +| ↳ `label` | string | Human-readable field label | +| ↳ `value` | json | Field value | +| ↳ `renderedValue` | json | HTML-rendered field value | +| `url` | string | URL to the request | | `request` | json | The service request object | ### `jsm_get_requests` @@ -139,9 +186,11 @@ Get multiple service requests from Jira Service Management | `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) | | `cloudId` | string | No | Jira Cloud ID for the instance | | `serviceDeskId` | string | No | Filter by service desk ID \(e.g., "1", "2"\) | -| `requestOwnership` | string | No | Filter by ownership: OWNED_REQUESTS, PARTICIPATED_REQUESTS, ORGANIZATION, ALL_REQUESTS | -| `requestStatus` | string | No | Filter by status: OPEN, CLOSED, ALL | +| `requestOwnership` | string | No | Filter by ownership: OWNED_REQUESTS, PARTICIPATED_REQUESTS, APPROVER, ALL_REQUESTS | +| `requestStatus` | string | No | Filter by status: OPEN_REQUESTS, CLOSED_REQUESTS, ALL_REQUESTS | +| `requestTypeId` | string | No | Filter by request type ID | | `searchTerm` | string | No | Search term to filter requests \(e.g., "password reset", "laptop"\) | +| `expand` | string | No | Comma-separated fields to expand: participant, status, sla, requestType, serviceDesk, attachment, comment, action | | `start` | number | No | Start index for pagination \(e.g., 0, 50, 100\) | | `limit` | number | No | Maximum results to return \(e.g., 10, 25, 50\) | @@ -150,8 +199,27 @@ Get multiple service requests from Jira Service Management | Parameter | Type | Description | | --------- | ---- | ----------- | | `ts` | string | Timestamp of the operation | -| `requests` | json | Array of service requests | -| `total` | number | Total number of requests | +| `requests` | array | List of service requests | +| ↳ `issueId` | string | Jira issue ID | +| ↳ `issueKey` | string | Issue key \(e.g., SD-123\) | +| ↳ `requestTypeId` | string | Request type ID | +| ↳ `serviceDeskId` | string | Service desk ID | +| ↳ `createdDate` | json | Creation date with iso8601, friendly, epochMillis | +| ↳ `currentStatus` | object | Current request status | +| ↳ `status` | string | Status name | +| ↳ `statusCategory` | string | Status category \(NEW, INDETERMINATE, DONE\) | +| ↳ `statusDate` | json | Status change date with iso8601, friendly, epochMillis | +| ↳ `reporter` | object | Reporter user details | +| ↳ `accountId` | string | Atlassian account ID | +| ↳ `displayName` | string | User display name | +| ↳ `emailAddress` | string | User email address | +| ↳ `active` | boolean | Whether the account is active | +| ↳ `requestFieldValues` | array | Request field values | +| ↳ `fieldId` | string | Field identifier | +| ↳ `label` | string | Human-readable field label | +| ↳ `value` | json | Field value | +| ↳ `renderedValue` | json | HTML-rendered field value | +| `total` | number | Total number of requests in current page | | `isLastPage` | boolean | Whether this is the last page | ### `jsm_add_comment` @@ -177,6 +245,12 @@ Add a comment (public or internal) to a service request in Jira Service Manageme | `commentId` | string | Created comment ID | | `body` | string | Comment body text | | `isPublic` | boolean | Whether the comment is public | +| `author` | object | Comment author | +| ↳ `accountId` | string | Atlassian account ID | +| ↳ `displayName` | string | User display name | +| ↳ `emailAddress` | string | User email address | +| ↳ `active` | boolean | Whether the account is active | +| `createdDate` | json | Comment creation date with iso8601, friendly, epochMillis | | `success` | boolean | Whether the comment was added successfully | ### `jsm_get_comments` @@ -192,6 +266,7 @@ Get comments for a service request in Jira Service Management | `issueIdOrKey` | string | Yes | Issue ID or key \(e.g., SD-123\) | | `isPublic` | boolean | No | Filter to only public comments \(true/false\) | | `internal` | boolean | No | Filter to only internal comments \(true/false\) | +| `expand` | string | No | Comma-separated fields to expand: renderedBody, attachment | | `start` | number | No | Start index for pagination \(e.g., 0, 50, 100\) | | `limit` | number | No | Maximum results to return \(e.g., 10, 25, 50\) | @@ -201,7 +276,17 @@ Get comments for a service request in Jira Service Management | --------- | ---- | ----------- | | `ts` | string | Timestamp of the operation | | `issueIdOrKey` | string | Issue ID or key | -| `comments` | json | Array of comments | +| `comments` | array | List of comments | +| ↳ `id` | string | Comment ID | +| ↳ `body` | string | Comment body text | +| ↳ `public` | boolean | Whether the comment is public | +| ↳ `author` | object | Comment author | +| ↳ `accountId` | string | Atlassian account ID | +| ↳ `displayName` | string | User display name | +| ↳ `emailAddress` | string | User email address | +| ↳ `active` | boolean | Whether the account is active | +| ↳ `created` | json | Creation date with iso8601, friendly, epochMillis | +| ↳ `renderedBody` | json | HTML-rendered comment body \(when expand=renderedBody\) | | `total` | number | Total number of comments | | `isLastPage` | boolean | Whether this is the last page | @@ -225,7 +310,12 @@ Get customers for a service desk in Jira Service Management | Parameter | Type | Description | | --------- | ---- | ----------- | | `ts` | string | Timestamp of the operation | -| `customers` | json | Array of customers | +| `customers` | array | List of customers | +| ↳ `accountId` | string | Atlassian account ID | +| ↳ `displayName` | string | Display name | +| ↳ `emailAddress` | string | Email address | +| ↳ `active` | boolean | Whether the account is active | +| ↳ `timeZone` | string | User timezone | | `total` | number | Total number of customers | | `isLastPage` | boolean | Whether this is the last page | @@ -240,7 +330,8 @@ Add customers to a service desk in Jira Service Management | `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) | | `cloudId` | string | No | Jira Cloud ID for the instance | | `serviceDeskId` | string | Yes | Service Desk ID \(e.g., "1", "2"\) | -| `emails` | string | Yes | Comma-separated email addresses to add as customers | +| `accountIds` | string | No | Comma-separated Atlassian account IDs to add as customers | +| `emails` | string | No | Comma-separated email addresses to add as customers | #### Output @@ -269,7 +360,9 @@ Get organizations for a service desk in Jira Service Management | Parameter | Type | Description | | --------- | ---- | ----------- | | `ts` | string | Timestamp of the operation | -| `organizations` | json | Array of organizations | +| `organizations` | array | List of organizations | +| ↳ `id` | string | Organization ID | +| ↳ `name` | string | Organization name | | `total` | number | Total number of organizations | | `isLastPage` | boolean | Whether this is the last page | @@ -336,7 +429,12 @@ Get queues for a service desk in Jira Service Management | Parameter | Type | Description | | --------- | ---- | ----------- | | `ts` | string | Timestamp of the operation | -| `queues` | json | Array of queues | +| `queues` | array | List of queues | +| ↳ `id` | string | Queue ID | +| ↳ `name` | string | Queue name | +| ↳ `jql` | string | JQL filter for the queue | +| ↳ `fields` | json | Fields displayed in the queue | +| ↳ `issueCount` | number | Number of issues in the queue | | `total` | number | Total number of queues | | `isLastPage` | boolean | Whether this is the last page | @@ -360,7 +458,11 @@ Get SLA information for a service request in Jira Service Management | --------- | ---- | ----------- | | `ts` | string | Timestamp of the operation | | `issueIdOrKey` | string | Issue ID or key | -| `slas` | json | Array of SLA information | +| `slas` | array | List of SLA metrics | +| ↳ `id` | string | SLA metric ID | +| ↳ `name` | string | SLA metric name | +| ↳ `completedCycles` | json | Completed SLA cycles with startTime, stopTime, breachTime, breached, goalDuration, elapsedTime, remainingTime \(each time as DateDTO, durations as DurationDTO\) | +| ↳ `ongoingCycle` | json | Ongoing SLA cycle with startTime, breachTime, breached, paused, withinCalendarHours, goalDuration, elapsedTime, remainingTime | | `total` | number | Total number of SLAs | | `isLastPage` | boolean | Whether this is the last page | @@ -375,6 +477,8 @@ Get available transitions for a service request in Jira Service Management | `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) | | `cloudId` | string | No | Jira Cloud ID for the instance | | `issueIdOrKey` | string | Yes | Issue ID or key \(e.g., SD-123\) | +| `start` | number | No | Start index for pagination \(e.g., 0, 50, 100\) | +| `limit` | number | No | Maximum results to return \(e.g., 10, 25, 50\) | #### Output @@ -382,7 +486,11 @@ Get available transitions for a service request in Jira Service Management | --------- | ---- | ----------- | | `ts` | string | Timestamp of the operation | | `issueIdOrKey` | string | Issue ID or key | -| `transitions` | json | Array of available transitions | +| `transitions` | array | List of available transitions | +| ↳ `id` | string | Transition ID | +| ↳ `name` | string | Transition name | +| `total` | number | Total number of transitions | +| `isLastPage` | boolean | Whether this is the last page | ### `jsm_transition_request` @@ -427,7 +535,11 @@ Get participants for a request in Jira Service Management | --------- | ---- | ----------- | | `ts` | string | Timestamp of the operation | | `issueIdOrKey` | string | Issue ID or key | -| `participants` | json | Array of participants | +| `participants` | array | List of participants | +| ↳ `accountId` | string | Atlassian account ID | +| ↳ `displayName` | string | Display name | +| ↳ `emailAddress` | string | Email address | +| ↳ `active` | boolean | Whether the account is active | | `total` | number | Total number of participants | | `isLastPage` | boolean | Whether this is the last page | @@ -450,7 +562,11 @@ Add participants to a request in Jira Service Management | --------- | ---- | ----------- | | `ts` | string | Timestamp of the operation | | `issueIdOrKey` | string | Issue ID or key | -| `participants` | json | Array of added participants | +| `participants` | array | List of added participants | +| ↳ `accountId` | string | Atlassian account ID | +| ↳ `displayName` | string | Display name | +| ↳ `emailAddress` | string | Email address | +| ↳ `active` | boolean | Whether the account is active | | `success` | boolean | Whether the operation succeeded | ### `jsm_get_approvals` @@ -473,7 +589,20 @@ Get approvals for a request in Jira Service Management | --------- | ---- | ----------- | | `ts` | string | Timestamp of the operation | | `issueIdOrKey` | string | Issue ID or key | -| `approvals` | json | Array of approvals | +| `approvals` | array | List of approvals | +| ↳ `id` | string | Approval ID | +| ↳ `name` | string | Approval description | +| ↳ `finalDecision` | string | Final decision: pending, approved, or declined | +| ↳ `canAnswerApproval` | boolean | Whether current user can respond | +| ↳ `approvers` | array | List of approvers with their decisions | +| ↳ `approver` | object | Approver user details | +| ↳ `accountId` | string | Atlassian account ID | +| ↳ `displayName` | string | User display name | +| ↳ `emailAddress` | string | User email address | +| ↳ `active` | boolean | Whether the account is active | +| ↳ `approverDecision` | string | Decision: pending, approved, or declined | +| ↳ `createdDate` | json | Creation date | +| ↳ `completedDate` | json | Completion date | | `total` | number | Total number of approvals | | `isLastPage` | boolean | Whether this is the last page | @@ -499,6 +628,53 @@ Approve or decline an approval request in Jira Service Management | `issueIdOrKey` | string | Issue ID or key | | `approvalId` | string | Approval ID | | `decision` | string | Decision made \(approve/decline\) | +| `id` | string | Approval ID from response | +| `name` | string | Approval description | +| `finalDecision` | string | Final approval decision: pending, approved, or declined | +| `canAnswerApproval` | boolean | Whether the current user can still respond | +| `approvers` | array | Updated list of approvers with decisions | +| ↳ `approver` | object | Approver user details | +| ↳ `accountId` | string | Approver account ID | +| ↳ `displayName` | string | Approver display name | +| ↳ `emailAddress` | string | Approver email | +| ↳ `active` | boolean | Whether the account is active | +| ↳ `approverDecision` | string | Individual approver decision | +| `createdDate` | json | Approval creation date | +| `completedDate` | json | Approval completion date | +| `approval` | json | The approval object | | `success` | boolean | Whether the operation succeeded | +### `jsm_get_request_type_fields` + +Get the fields required to create a request of a specific type in Jira Service Management + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) | +| `cloudId` | string | No | Jira Cloud ID for the instance | +| `serviceDeskId` | string | Yes | Service Desk ID \(e.g., "1", "2"\) | +| `requestTypeId` | string | Yes | Request Type ID \(e.g., "10", "15"\) | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `ts` | string | Timestamp of the operation | +| `serviceDeskId` | string | Service desk ID | +| `requestTypeId` | string | Request type ID | +| `canAddRequestParticipants` | boolean | Whether participants can be added to requests of this type | +| `canRaiseOnBehalfOf` | boolean | Whether requests can be raised on behalf of another user | +| `requestTypeFields` | array | List of fields for this request type | +| ↳ `fieldId` | string | Field identifier \(e.g., summary, description, customfield_10010\) | +| ↳ `name` | string | Human-readable field name | +| ↳ `description` | string | Help text for the field | +| ↳ `required` | boolean | Whether the field is required | +| ↳ `visible` | boolean | Whether the field is visible | +| ↳ `validValues` | json | Allowed values for select fields | +| ↳ `presetValues` | json | Pre-populated values | +| ↳ `defaultValues` | json | Default values for the field | +| ↳ `jiraSchema` | json | Jira field schema with type, system, custom, customId | + diff --git a/apps/docs/content/docs/en/tools/meta.json b/apps/docs/content/docs/en/tools/meta.json index 419957f7e..f9bd3ca1f 100644 --- a/apps/docs/content/docs/en/tools/meta.json +++ b/apps/docs/content/docs/en/tools/meta.json @@ -76,6 +76,7 @@ "neo4j", "notion", "onedrive", + "onepassword", "openai", "outlook", "parallel_ai", diff --git a/apps/docs/content/docs/en/tools/onepassword.mdx b/apps/docs/content/docs/en/tools/onepassword.mdx new file mode 100644 index 000000000..7d35c55b4 --- /dev/null +++ b/apps/docs/content/docs/en/tools/onepassword.mdx @@ -0,0 +1,260 @@ +--- +title: 1Password +description: Manage secrets and items in 1Password vaults +--- + +import { BlockInfoCard } from "@/components/ui/block-info-card" + + + +{/* MANUAL-CONTENT-START:intro */} +[1Password](https://1password.com) is a widely trusted password manager and secrets vault solution, allowing individuals and teams to securely store, access, and share passwords, API credentials, and sensitive information. With robust encryption, granular access controls, and seamless syncing across devices, 1Password supports teams and organizations in managing secrets efficiently and securely. + +The [1Password Connect API](https://developer.1password.com/docs/connect/) allows programmatic access to vaults and items within an organization's 1Password account. This integration in Sim lets you automate secret retrieval, onboarding workflows, secret rotation, vault audits, and more, all in a secure and auditable manner. + +With 1Password in your Sim workflow, you can: + +- **List, search, and retrieve vaults**: Access metadata or browse available vaults for organizing secrets by project or purpose +- **Fetch items and secrets**: Get credentials, API keys, or custom secrets in real time to power your workflows securely +- **Create, update, or delete secrets**: Automate secret management, provisioning, and rotation for enhanced security practices +- **Integrate with CI/CD and automation**: Fetch credentials or tokens only when needed, reducing manual work and reducing risk +- **Ensure access controls**: Leverage role-based access and fine-grained permissions to control which agents or users can access specific secrets + +By connecting Sim with 1Password, you empower your agents to securely manage secrets, reduce manual overhead, and maintain best practices for security automation, incident response, and DevOps workflows—all while ensuring secrets never leave a controlled environment. +{/* MANUAL-CONTENT-END */} + + +## Usage Instructions + +Access and manage secrets stored in 1Password vaults using the Connect API or Service Account SDK. List vaults, retrieve items with their fields and secrets, create new items, update existing ones, delete items, and resolve secret references. + + + +## Tools + +### `onepassword_list_vaults` + +List all vaults accessible by the Connect token or Service Account + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `connectionMode` | string | No | Connection mode: "service_account" or "connect" | +| `serviceAccountToken` | string | No | 1Password Service Account token \(for Service Account mode\) | +| `apiKey` | string | No | 1Password Connect API token \(for Connect Server mode\) | +| `serverUrl` | string | No | 1Password Connect server URL \(for Connect Server mode\) | +| `filter` | string | No | SCIM filter expression \(e.g., name eq "My Vault"\) | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `vaults` | array | List of accessible vaults | +| ↳ `id` | string | Vault ID | +| ↳ `name` | string | Vault name | +| ↳ `description` | string | Vault description | +| ↳ `attributeVersion` | number | Vault attribute version | +| ↳ `contentVersion` | number | Vault content version | +| ↳ `type` | string | Vault type \(USER_CREATED, PERSONAL, EVERYONE, TRANSFER\) | +| ↳ `createdAt` | string | Creation timestamp | +| ↳ `updatedAt` | string | Last update timestamp | + +### `onepassword_get_vault` + +Get details of a specific vault by ID + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `connectionMode` | string | No | Connection mode: "service_account" or "connect" | +| `serviceAccountToken` | string | No | 1Password Service Account token \(for Service Account mode\) | +| `apiKey` | string | No | 1Password Connect API token \(for Connect Server mode\) | +| `serverUrl` | string | No | 1Password Connect server URL \(for Connect Server mode\) | +| `vaultId` | string | Yes | The vault UUID | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `id` | string | Vault ID | +| `name` | string | Vault name | +| `description` | string | Vault description | +| `attributeVersion` | number | Vault attribute version | +| `contentVersion` | number | Vault content version | +| `items` | number | Number of items in the vault | +| `type` | string | Vault type \(USER_CREATED, PERSONAL, EVERYONE, TRANSFER\) | +| `createdAt` | string | Creation timestamp | +| `updatedAt` | string | Last update timestamp | + +### `onepassword_list_items` + +List items in a vault. Returns summaries without field values. + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `connectionMode` | string | No | Connection mode: "service_account" or "connect" | +| `serviceAccountToken` | string | No | 1Password Service Account token \(for Service Account mode\) | +| `apiKey` | string | No | 1Password Connect API token \(for Connect Server mode\) | +| `serverUrl` | string | No | 1Password Connect server URL \(for Connect Server mode\) | +| `vaultId` | string | Yes | The vault UUID to list items from | +| `filter` | string | No | SCIM filter expression \(e.g., title eq "API Key" or tag eq "production"\) | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `items` | array | List of items in the vault \(summaries without field values\) | +| ↳ `id` | string | Item ID | +| ↳ `title` | string | Item title | +| ↳ `vault` | object | Vault reference | +| ↳ `id` | string | Vault ID | +| ↳ `category` | string | Item category \(e.g., LOGIN, API_CREDENTIAL\) | +| ↳ `urls` | array | URLs associated with the item | +| ↳ `href` | string | URL | +| ↳ `label` | string | URL label | +| ↳ `primary` | boolean | Whether this is the primary URL | +| ↳ `favorite` | boolean | Whether the item is favorited | +| ↳ `tags` | array | Item tags | +| ↳ `version` | number | Item version number | +| ↳ `state` | string | Item state \(ARCHIVED or DELETED\) | +| ↳ `createdAt` | string | Creation timestamp | +| ↳ `updatedAt` | string | Last update timestamp | +| ↳ `lastEditedBy` | string | ID of the last editor | + +### `onepassword_get_item` + +Get full details of an item including all fields and secrets + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `connectionMode` | string | No | Connection mode: "service_account" or "connect" | +| `serviceAccountToken` | string | No | 1Password Service Account token \(for Service Account mode\) | +| `apiKey` | string | No | 1Password Connect API token \(for Connect Server mode\) | +| `serverUrl` | string | No | 1Password Connect server URL \(for Connect Server mode\) | +| `vaultId` | string | Yes | The vault UUID | +| `itemId` | string | Yes | The item UUID to retrieve | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `response` | json | Operation response data | + +### `onepassword_create_item` + +Create a new item in a vault + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `connectionMode` | string | No | Connection mode: "service_account" or "connect" | +| `serviceAccountToken` | string | No | 1Password Service Account token \(for Service Account mode\) | +| `apiKey` | string | No | 1Password Connect API token \(for Connect Server mode\) | +| `serverUrl` | string | No | 1Password Connect server URL \(for Connect Server mode\) | +| `vaultId` | string | Yes | The vault UUID to create the item in | +| `category` | string | Yes | Item category \(e.g., LOGIN, PASSWORD, API_CREDENTIAL, SECURE_NOTE, SERVER, DATABASE\) | +| `title` | string | No | Item title | +| `tags` | string | No | Comma-separated list of tags | +| `fields` | string | No | JSON array of field objects \(e.g., \[\{"label":"username","value":"admin","type":"STRING","purpose":"USERNAME"\}\]\) | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `response` | json | Operation response data | + +### `onepassword_replace_item` + +Replace an entire item with new data (full update) + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `connectionMode` | string | No | Connection mode: "service_account" or "connect" | +| `serviceAccountToken` | string | No | 1Password Service Account token \(for Service Account mode\) | +| `apiKey` | string | No | 1Password Connect API token \(for Connect Server mode\) | +| `serverUrl` | string | No | 1Password Connect server URL \(for Connect Server mode\) | +| `vaultId` | string | Yes | The vault UUID | +| `itemId` | string | Yes | The item UUID to replace | +| `item` | string | Yes | JSON object representing the full item \(e.g., \{"vault":\{"id":"..."\},"category":"LOGIN","title":"My Item","fields":\[...\]\}\) | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `response` | json | Operation response data | + +### `onepassword_update_item` + +Update an existing item using JSON Patch operations (RFC6902) + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `connectionMode` | string | No | Connection mode: "service_account" or "connect" | +| `serviceAccountToken` | string | No | 1Password Service Account token \(for Service Account mode\) | +| `apiKey` | string | No | 1Password Connect API token \(for Connect Server mode\) | +| `serverUrl` | string | No | 1Password Connect server URL \(for Connect Server mode\) | +| `vaultId` | string | Yes | The vault UUID | +| `itemId` | string | Yes | The item UUID to update | +| `operations` | string | Yes | JSON array of RFC6902 patch operations \(e.g., \[\{"op":"replace","path":"/title","value":"New Title"\}\]\) | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `response` | json | Operation response data | + +### `onepassword_delete_item` + +Delete an item from a vault + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `connectionMode` | string | No | Connection mode: "service_account" or "connect" | +| `serviceAccountToken` | string | No | 1Password Service Account token \(for Service Account mode\) | +| `apiKey` | string | No | 1Password Connect API token \(for Connect Server mode\) | +| `serverUrl` | string | No | 1Password Connect server URL \(for Connect Server mode\) | +| `vaultId` | string | Yes | The vault UUID | +| `itemId` | string | Yes | The item UUID to delete | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `success` | boolean | Whether the item was successfully deleted | + +### `onepassword_resolve_secret` + +Resolve a secret reference (op://vault/item/field) to its value. Service Account mode only. + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `connectionMode` | string | No | Connection mode: must be "service_account" for this operation | +| `serviceAccountToken` | string | Yes | 1Password Service Account token | +| `secretReference` | string | Yes | Secret reference URI \(e.g., op://vault-name/item-name/field-name or op://vault-name/item-name/section-name/field-name\) | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `value` | string | The resolved secret value | +| `reference` | string | The original secret reference URI | + + diff --git a/apps/sim/app/.well-known/oauth-authorization-server/[...issuer]/route.ts b/apps/sim/app/.well-known/oauth-authorization-server/[...issuer]/route.ts new file mode 100644 index 000000000..9dd3d6bd4 --- /dev/null +++ b/apps/sim/app/.well-known/oauth-authorization-server/[...issuer]/route.ts @@ -0,0 +1,6 @@ +import type { NextRequest, NextResponse } from 'next/server' +import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery' + +export async function GET(request: NextRequest): Promise { + return createMcpAuthorizationServerMetadataResponse(request) +} diff --git a/apps/sim/app/.well-known/oauth-authorization-server/api/mcp/copilot/route.ts b/apps/sim/app/.well-known/oauth-authorization-server/api/mcp/copilot/route.ts new file mode 100644 index 000000000..9dd3d6bd4 --- /dev/null +++ b/apps/sim/app/.well-known/oauth-authorization-server/api/mcp/copilot/route.ts @@ -0,0 +1,6 @@ +import type { NextRequest, NextResponse } from 'next/server' +import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery' + +export async function GET(request: NextRequest): Promise { + return createMcpAuthorizationServerMetadataResponse(request) +} diff --git a/apps/sim/app/.well-known/oauth-authorization-server/route.ts b/apps/sim/app/.well-known/oauth-authorization-server/route.ts new file mode 100644 index 000000000..9dd3d6bd4 --- /dev/null +++ b/apps/sim/app/.well-known/oauth-authorization-server/route.ts @@ -0,0 +1,6 @@ +import type { NextRequest, NextResponse } from 'next/server' +import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery' + +export async function GET(request: NextRequest): Promise { + return createMcpAuthorizationServerMetadataResponse(request) +} diff --git a/apps/sim/app/.well-known/oauth-protected-resource/api/mcp/copilot/route.ts b/apps/sim/app/.well-known/oauth-protected-resource/api/mcp/copilot/route.ts new file mode 100644 index 000000000..d1136b555 --- /dev/null +++ b/apps/sim/app/.well-known/oauth-protected-resource/api/mcp/copilot/route.ts @@ -0,0 +1,6 @@ +import type { NextRequest, NextResponse } from 'next/server' +import { createMcpProtectedResourceMetadataResponse } from '@/lib/mcp/oauth-discovery' + +export async function GET(request: NextRequest): Promise { + return createMcpProtectedResourceMetadataResponse(request) +} diff --git a/apps/sim/app/.well-known/oauth-protected-resource/route.ts b/apps/sim/app/.well-known/oauth-protected-resource/route.ts new file mode 100644 index 000000000..d1136b555 --- /dev/null +++ b/apps/sim/app/.well-known/oauth-protected-resource/route.ts @@ -0,0 +1,6 @@ +import type { NextRequest, NextResponse } from 'next/server' +import { createMcpProtectedResourceMetadataResponse } from '@/lib/mcp/oauth-discovery' + +export async function GET(request: NextRequest): Promise { + return createMcpProtectedResourceMetadataResponse(request) +} diff --git a/apps/sim/app/api/billing/update-cost/route.ts b/apps/sim/app/api/billing/update-cost/route.ts index 3e8e0a289..87b7f14f1 100644 --- a/apps/sim/app/api/billing/update-cost/route.ts +++ b/apps/sim/app/api/billing/update-cost/route.ts @@ -18,6 +18,7 @@ const UpdateCostSchema = z.object({ model: z.string().min(1, 'Model is required'), inputTokens: z.number().min(0).default(0), outputTokens: z.number().min(0).default(0), + source: z.enum(['copilot', 'mcp_copilot']).default('copilot'), }) /** @@ -75,12 +76,14 @@ export async function POST(req: NextRequest) { ) } - const { userId, cost, model, inputTokens, outputTokens } = validation.data + const { userId, cost, model, inputTokens, outputTokens, source } = validation.data + const isMcp = source === 'mcp_copilot' logger.info(`[${requestId}] Processing cost update`, { userId, cost, model, + source, }) // Check if user stats record exists (same as ExecutionLogger) @@ -96,7 +99,7 @@ export async function POST(req: NextRequest) { return NextResponse.json({ error: 'User stats record not found' }, { status: 500 }) } - const updateFields = { + const updateFields: Record = { totalCost: sql`total_cost + ${cost}`, currentPeriodCost: sql`current_period_cost + ${cost}`, totalCopilotCost: sql`total_copilot_cost + ${cost}`, @@ -105,17 +108,24 @@ export async function POST(req: NextRequest) { lastActive: new Date(), } + // Also increment MCP-specific counters when source is mcp_copilot + if (isMcp) { + updateFields.totalMcpCopilotCost = sql`total_mcp_copilot_cost + ${cost}` + updateFields.currentPeriodMcpCopilotCost = sql`current_period_mcp_copilot_cost + ${cost}` + } + await db.update(userStats).set(updateFields).where(eq(userStats.userId, userId)) logger.info(`[${requestId}] Updated user stats record`, { userId, addedCost: cost, + source, }) // Log usage for complete audit trail await logModelUsage({ userId, - source: 'copilot', + source: isMcp ? 'mcp_copilot' : 'copilot', model, inputTokens, outputTokens, diff --git a/apps/sim/app/api/copilot/api-keys/generate/route.ts b/apps/sim/app/api/copilot/api-keys/generate/route.ts index db890bdca..27971cede 100644 --- a/apps/sim/app/api/copilot/api-keys/generate/route.ts +++ b/apps/sim/app/api/copilot/api-keys/generate/route.ts @@ -1,7 +1,7 @@ import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { getSession } from '@/lib/auth' -import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' +import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' import { env } from '@/lib/core/config/env' const GenerateApiKeySchema = z.object({ @@ -17,9 +17,6 @@ export async function POST(req: NextRequest) { const userId = session.user.id - // Move environment variable access inside the function - const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT - const body = await req.json().catch(() => ({})) const validationResult = GenerateApiKeySchema.safeParse(body) diff --git a/apps/sim/app/api/copilot/api-keys/route.test.ts b/apps/sim/app/api/copilot/api-keys/route.test.ts index 8b8f630a0..7ec617abf 100644 --- a/apps/sim/app/api/copilot/api-keys/route.test.ts +++ b/apps/sim/app/api/copilot/api-keys/route.test.ts @@ -19,6 +19,7 @@ describe('Copilot API Keys API Route', () => { vi.doMock('@/lib/copilot/constants', () => ({ SIM_AGENT_API_URL_DEFAULT: 'https://agent.sim.example.com', + SIM_AGENT_API_URL: 'https://agent.sim.example.com', })) vi.doMock('@/lib/core/config/env', async () => { diff --git a/apps/sim/app/api/copilot/api-keys/route.ts b/apps/sim/app/api/copilot/api-keys/route.ts index f3e25ac82..02d0d5be2 100644 --- a/apps/sim/app/api/copilot/api-keys/route.ts +++ b/apps/sim/app/api/copilot/api-keys/route.ts @@ -1,6 +1,6 @@ import { type NextRequest, NextResponse } from 'next/server' import { getSession } from '@/lib/auth' -import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' +import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' import { env } from '@/lib/core/config/env' export async function GET(request: NextRequest) { @@ -12,8 +12,6 @@ export async function GET(request: NextRequest) { const userId = session.user.id - const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT - const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key/get-api-keys`, { method: 'POST', headers: { @@ -68,8 +66,6 @@ export async function DELETE(request: NextRequest) { return NextResponse.json({ error: 'id is required' }, { status: 400 }) } - const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT - const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key/delete`, { method: 'POST', headers: { diff --git a/apps/sim/app/api/copilot/chat/route.ts b/apps/sim/app/api/copilot/chat/route.ts index 72c959d9a..248298348 100644 --- a/apps/sim/app/api/copilot/chat/route.ts +++ b/apps/sim/app/api/copilot/chat/route.ts @@ -5,10 +5,18 @@ import { and, desc, eq } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { getSession } from '@/lib/auth' +import { buildConversationHistory } from '@/lib/copilot/chat-context' +import { resolveOrCreateChat } from '@/lib/copilot/chat-lifecycle' +import { buildCopilotRequestPayload } from '@/lib/copilot/chat-payload' import { generateChatTitle } from '@/lib/copilot/chat-title' import { getCopilotModel } from '@/lib/copilot/config' -import { SIM_AGENT_API_URL_DEFAULT, SIM_AGENT_VERSION } from '@/lib/copilot/constants' import { COPILOT_MODEL_IDS, COPILOT_REQUEST_MODES } from '@/lib/copilot/models' +import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' +import { + createStreamEventWriter, + resetStreamBuffer, + setStreamMeta, +} from '@/lib/copilot/orchestrator/stream-buffer' import { authenticateCopilotRequestSessionOnly, createBadRequestResponse, @@ -16,18 +24,11 @@ import { createRequestTracker, createUnauthorizedResponse, } from '@/lib/copilot/request-helpers' -import { getCredentialsServerTool } from '@/lib/copilot/tools/server/user/get-credentials' -import type { CopilotProviderConfig } from '@/lib/copilot/types' import { env } from '@/lib/core/config/env' -import { CopilotFiles } from '@/lib/uploads' -import { createFileContent } from '@/lib/uploads/utils/file-utils' -import { tools } from '@/tools/registry' -import { getLatestVersionTools, stripVersionSuffix } from '@/tools/utils' +import { resolveWorkflowIdForUser } from '@/lib/workflows/utils' const logger = createLogger('CopilotChatAPI') -const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT - const FileAttachmentSchema = z.object({ id: z.string(), key: z.string(), @@ -40,8 +41,9 @@ const ChatMessageSchema = z.object({ message: z.string().min(1, 'Message is required'), userMessageId: z.string().optional(), // ID from frontend for the user message chatId: z.string().optional(), - workflowId: z.string().min(1, 'Workflow ID is required'), - model: z.enum(COPILOT_MODEL_IDS).optional().default('claude-4.5-opus'), + workflowId: z.string().optional(), + workflowName: z.string().optional(), + model: z.enum(COPILOT_MODEL_IDS).optional().default('claude-4.6-opus'), mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'), prefetch: z.boolean().optional(), createNewChat: z.boolean().optional().default(false), @@ -100,7 +102,8 @@ export async function POST(req: NextRequest) { message, userMessageId, chatId, - workflowId, + workflowId: providedWorkflowId, + workflowName, model, mode, prefetch, @@ -113,6 +116,20 @@ export async function POST(req: NextRequest) { contexts, commands, } = ChatMessageSchema.parse(body) + + // Resolve workflowId - if not provided, use first workflow or find by name + const resolved = await resolveWorkflowIdForUser( + authenticatedUserId, + providedWorkflowId, + workflowName + ) + if (!resolved) { + return createBadRequestResponse( + 'No workflows found. Create a workflow first or provide a valid workflowId.' + ) + } + const workflowId = resolved.workflowId + // Ensure we have a consistent user message ID for this request const userMessageIdToUse = userMessageId || crypto.randomUUID() try { @@ -157,393 +174,114 @@ export async function POST(req: NextRequest) { let conversationHistory: any[] = [] let actualChatId = chatId - if (chatId) { - // Load existing chat - const [chat] = await db - .select() - .from(copilotChats) - .where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, authenticatedUserId))) - .limit(1) - - if (chat) { - currentChat = chat - conversationHistory = Array.isArray(chat.messages) ? chat.messages : [] - } - } else if (createNewChat && workflowId) { - // Create new chat - const { provider, model } = getCopilotModel('chat') - const [newChat] = await db - .insert(copilotChats) - .values({ - userId: authenticatedUserId, - workflowId, - title: null, - model, - messages: [], - }) - .returning() - - if (newChat) { - currentChat = newChat - actualChatId = newChat.id - } - } - - // Process file attachments if present - const processedFileContents: any[] = [] - if (fileAttachments && fileAttachments.length > 0) { - const processedAttachments = await CopilotFiles.processCopilotAttachments( - fileAttachments, - tracker.requestId + if (chatId || createNewChat) { + const defaultsForChatRow = getCopilotModel('chat') + const chatResult = await resolveOrCreateChat({ + chatId, + userId: authenticatedUserId, + workflowId, + model: defaultsForChatRow.model, + }) + currentChat = chatResult.chat + actualChatId = chatResult.chatId || chatId + const history = buildConversationHistory( + chatResult.conversationHistory, + (chatResult.chat?.conversationId as string | undefined) || conversationId ) - - for (const { buffer, attachment } of processedAttachments) { - const fileContent = createFileContent(buffer, attachment.media_type) - if (fileContent) { - processedFileContents.push(fileContent) - } - } - } - - // Build messages array for sim agent with conversation history - const messages: any[] = [] - - // Add conversation history (need to rebuild these with file support if they had attachments) - for (const msg of conversationHistory) { - if (msg.fileAttachments && msg.fileAttachments.length > 0) { - // This is a message with file attachments - rebuild with content array - const content: any[] = [{ type: 'text', text: msg.content }] - - const processedHistoricalAttachments = await CopilotFiles.processCopilotAttachments( - msg.fileAttachments, - tracker.requestId - ) - - for (const { buffer, attachment } of processedHistoricalAttachments) { - const fileContent = createFileContent(buffer, attachment.media_type) - if (fileContent) { - content.push(fileContent) - } - } - - messages.push({ - role: msg.role, - content, - }) - } else { - // Regular text-only message - messages.push({ - role: msg.role, - content: msg.content, - }) - } - } - - // Add implicit feedback if provided - if (implicitFeedback) { - messages.push({ - role: 'system', - content: implicitFeedback, - }) - } - - // Add current user message with file attachments - if (processedFileContents.length > 0) { - // Message with files - use content array format - const content: any[] = [{ type: 'text', text: message }] - - // Add file contents - for (const fileContent of processedFileContents) { - content.push(fileContent) - } - - messages.push({ - role: 'user', - content, - }) - } else { - // Text-only message - messages.push({ - role: 'user', - content: message, - }) + conversationHistory = history.history } const defaults = getCopilotModel('chat') const selectedModel = model || defaults.model - const envModel = env.COPILOT_MODEL || defaults.model - - let providerConfig: CopilotProviderConfig | undefined - const providerEnv = env.COPILOT_PROVIDER as any - - if (providerEnv) { - if (providerEnv === 'azure-openai') { - providerConfig = { - provider: 'azure-openai', - model: envModel, - apiKey: env.AZURE_OPENAI_API_KEY, - apiVersion: 'preview', - endpoint: env.AZURE_OPENAI_ENDPOINT, - } - } else if (providerEnv === 'azure-anthropic') { - providerConfig = { - provider: 'azure-anthropic', - model: envModel, - apiKey: env.AZURE_ANTHROPIC_API_KEY, - apiVersion: env.AZURE_ANTHROPIC_API_VERSION, - endpoint: env.AZURE_ANTHROPIC_ENDPOINT, - } - } else if (providerEnv === 'vertex') { - providerConfig = { - provider: 'vertex', - model: envModel, - apiKey: env.COPILOT_API_KEY, - vertexProject: env.VERTEX_PROJECT, - vertexLocation: env.VERTEX_LOCATION, - } - } else { - providerConfig = { - provider: providerEnv, - model: selectedModel, - apiKey: env.COPILOT_API_KEY, - } - } - } - const effectiveMode = mode === 'agent' ? 'build' : mode - const transportMode = effectiveMode === 'build' ? 'agent' : effectiveMode - - // Determine conversationId to use for this request const effectiveConversationId = (currentChat?.conversationId as string | undefined) || conversationId - // For agent/build mode, fetch credentials and build tool definitions - let integrationTools: any[] = [] - let baseTools: any[] = [] - let credentials: { - oauth: Record< - string, - { accessToken: string; accountId: string; name: string; expiresAt?: string } - > - apiKeys: string[] - metadata?: { - connectedOAuth: Array<{ provider: string; name: string; scopes?: string[] }> - configuredApiKeys: string[] + const requestPayload = await buildCopilotRequestPayload( + { + message, + workflowId, + userId: authenticatedUserId, + userMessageId: userMessageIdToUse, + mode, + model: selectedModel, + conversationHistory, + contexts: agentContexts, + fileAttachments, + commands, + chatId: actualChatId, + implicitFeedback, + }, + { + selectedModel, } - } | null = null - - if (effectiveMode === 'build') { - // Build base tools (executed locally, not deferred) - // Include function_execute for code execution capability - baseTools = [ - { - name: 'function_execute', - description: - 'Execute JavaScript code to perform calculations, data transformations, API calls, or any programmatic task. Code runs in a secure sandbox with fetch() available. Write plain statements (not wrapped in functions). Example: const res = await fetch(url); const data = await res.json(); return data;', - input_schema: { - type: 'object', - properties: { - code: { - type: 'string', - description: - 'Raw JavaScript statements to execute. Code is auto-wrapped in async context. Use fetch() for HTTP requests. Write like: const res = await fetch(url); return await res.json();', - }, - }, - required: ['code'], - }, - executeLocally: true, - }, - ] - // Fetch user credentials (OAuth + API keys) - pass workflowId to get workspace env vars - try { - const rawCredentials = await getCredentialsServerTool.execute( - { workflowId }, - { userId: authenticatedUserId } - ) - - // Transform OAuth credentials to map format: { [provider]: { accessToken, accountId, ... } } - const oauthMap: Record< - string, - { accessToken: string; accountId: string; name: string; expiresAt?: string } - > = {} - const connectedOAuth: Array<{ provider: string; name: string; scopes?: string[] }> = [] - for (const cred of rawCredentials?.oauth?.connected?.credentials || []) { - if (cred.accessToken) { - oauthMap[cred.provider] = { - accessToken: cred.accessToken, - accountId: cred.id, - name: cred.name, - } - connectedOAuth.push({ - provider: cred.provider, - name: cred.name, - }) - } - } - - credentials = { - oauth: oauthMap, - apiKeys: rawCredentials?.environment?.variableNames || [], - metadata: { - connectedOAuth, - configuredApiKeys: rawCredentials?.environment?.variableNames || [], - }, - } - - logger.info(`[${tracker.requestId}] Fetched credentials for build mode`, { - oauthProviders: Object.keys(oauthMap), - apiKeyCount: credentials.apiKeys.length, - }) - } catch (error) { - logger.warn(`[${tracker.requestId}] Failed to fetch credentials`, { - error: error instanceof Error ? error.message : String(error), - }) - } - - // Build tool definitions (schemas only) - try { - const { createUserToolSchema } = await import('@/tools/params') - - const latestTools = getLatestVersionTools(tools) - - integrationTools = Object.entries(latestTools).map(([toolId, toolConfig]) => { - const userSchema = createUserToolSchema(toolConfig) - const strippedName = stripVersionSuffix(toolId) - return { - name: strippedName, - description: toolConfig.description || toolConfig.name || strippedName, - input_schema: userSchema, - defer_loading: true, // Anthropic Advanced Tool Use - ...(toolConfig.oauth?.required && { - oauth: { - required: true, - provider: toolConfig.oauth.provider, - }, - }), - } - }) - - logger.info(`[${tracker.requestId}] Built tool definitions for build mode`, { - integrationToolCount: integrationTools.length, - }) - } catch (error) { - logger.warn(`[${tracker.requestId}] Failed to build tool definitions`, { - error: error instanceof Error ? error.message : String(error), - }) - } - } - - const requestPayload = { - message: message, // Just send the current user message text - workflowId, - userId: authenticatedUserId, - stream: stream, - streamToolCalls: true, - model: selectedModel, - mode: transportMode, - messageId: userMessageIdToUse, - version: SIM_AGENT_VERSION, - ...(providerConfig ? { provider: providerConfig } : {}), - ...(effectiveConversationId ? { conversationId: effectiveConversationId } : {}), - ...(typeof prefetch === 'boolean' ? { prefetch: prefetch } : {}), - ...(session?.user?.name && { userName: session.user.name }), - ...(agentContexts.length > 0 && { context: agentContexts }), - ...(actualChatId ? { chatId: actualChatId } : {}), - ...(processedFileContents.length > 0 && { fileAttachments: processedFileContents }), - // For build/agent mode, include tools and credentials - ...(integrationTools.length > 0 && { tools: integrationTools }), - ...(baseTools.length > 0 && { baseTools }), - ...(credentials && { credentials }), - ...(commands && commands.length > 0 && { commands }), - } + ) try { logger.info(`[${tracker.requestId}] About to call Sim Agent`, { hasContext: agentContexts.length > 0, contextCount: agentContexts.length, hasConversationId: !!effectiveConversationId, - hasFileAttachments: processedFileContents.length > 0, + hasFileAttachments: Array.isArray(requestPayload.fileAttachments), messageLength: message.length, mode: effectiveMode, - hasTools: integrationTools.length > 0, - toolCount: integrationTools.length, - hasBaseTools: baseTools.length > 0, - baseToolCount: baseTools.length, - hasCredentials: !!credentials, + hasTools: Array.isArray(requestPayload.tools), + toolCount: Array.isArray(requestPayload.tools) ? requestPayload.tools.length : 0, + hasBaseTools: Array.isArray(requestPayload.baseTools), + baseToolCount: Array.isArray(requestPayload.baseTools) + ? requestPayload.baseTools.length + : 0, + hasCredentials: !!requestPayload.credentials, }) } catch {} - const simAgentResponse = await fetch(`${SIM_AGENT_API_URL}/api/chat-completion-streaming`, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), - }, - body: JSON.stringify(requestPayload), - }) - - if (!simAgentResponse.ok) { - if (simAgentResponse.status === 401 || simAgentResponse.status === 402) { - // Rethrow status only; client will render appropriate assistant message - return new NextResponse(null, { status: simAgentResponse.status }) - } - - const errorText = await simAgentResponse.text().catch(() => '') - logger.error(`[${tracker.requestId}] Sim agent API error:`, { - status: simAgentResponse.status, - error: errorText, - }) - - return NextResponse.json( - { error: `Sim agent API error: ${simAgentResponse.statusText}` }, - { status: simAgentResponse.status } - ) - } - - // If streaming is requested, forward the stream and update chat later - if (stream && simAgentResponse.body) { - // Create user message to save - const userMessage = { - id: userMessageIdToUse, // Consistent ID used for request and persistence - role: 'user', - content: message, - timestamp: new Date().toISOString(), - ...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }), - ...(Array.isArray(contexts) && contexts.length > 0 && { contexts }), - ...(Array.isArray(contexts) && - contexts.length > 0 && { - contentBlocks: [{ type: 'contexts', contexts: contexts as any, timestamp: Date.now() }], - }), - } - - // Create a pass-through stream that captures the response + if (stream) { + const streamId = userMessageIdToUse + let eventWriter: ReturnType | null = null + let clientDisconnected = false const transformedStream = new ReadableStream({ async start(controller) { const encoder = new TextEncoder() - let assistantContent = '' - const toolCalls: any[] = [] - let buffer = '' - const isFirstDone = true - let responseIdFromStart: string | undefined - let responseIdFromDone: string | undefined - // Track tool call progress to identify a safe done event - const announcedToolCallIds = new Set() - const startedToolExecutionIds = new Set() - const completedToolExecutionIds = new Set() - let lastDoneResponseId: string | undefined - let lastSafeDoneResponseId: string | undefined - // Send chatId as first event - if (actualChatId) { - const chatIdEvent = `data: ${JSON.stringify({ - type: 'chat_id', - chatId: actualChatId, - })}\n\n` - controller.enqueue(encoder.encode(chatIdEvent)) - logger.debug(`[${tracker.requestId}] Sent initial chatId event to client`) + await resetStreamBuffer(streamId) + await setStreamMeta(streamId, { status: 'active', userId: authenticatedUserId }) + eventWriter = createStreamEventWriter(streamId) + + const shouldFlushEvent = (event: Record) => + event.type === 'tool_call' || + event.type === 'tool_result' || + event.type === 'tool_error' || + event.type === 'subagent_end' || + event.type === 'structured_result' || + event.type === 'subagent_result' || + event.type === 'done' || + event.type === 'error' + + const pushEvent = async (event: Record) => { + if (!eventWriter) return + const entry = await eventWriter.write(event) + if (shouldFlushEvent(event)) { + await eventWriter.flush() + } + const payload = { + ...event, + eventId: entry.eventId, + streamId, + } + try { + if (!clientDisconnected) { + controller.enqueue(encoder.encode(`data: ${JSON.stringify(payload)}\n\n`)) + } + } catch { + clientDisconnected = true + await eventWriter.flush() + } + } + + if (actualChatId) { + await pushEvent({ type: 'chat_id', chatId: actualChatId }) } - // Start title generation in parallel if needed if (actualChatId && !currentChat?.title && conversationHistory.length === 0) { generateChatTitle(message) .then(async (title) => { @@ -555,311 +293,64 @@ export async function POST(req: NextRequest) { updatedAt: new Date(), }) .where(eq(copilotChats.id, actualChatId!)) - - const titleEvent = `data: ${JSON.stringify({ - type: 'title_updated', - title: title, - })}\n\n` - controller.enqueue(encoder.encode(titleEvent)) - logger.info(`[${tracker.requestId}] Generated and saved title: ${title}`) + await pushEvent({ type: 'title_updated', title }) } }) .catch((error) => { logger.error(`[${tracker.requestId}] Title generation failed:`, error) }) - } else { - logger.debug(`[${tracker.requestId}] Skipping title generation`) } - // Forward the sim agent stream and capture assistant response - const reader = simAgentResponse.body!.getReader() - const decoder = new TextDecoder() - try { - while (true) { - const { done, value } = await reader.read() - if (done) { - break - } - - // Decode and parse SSE events for logging and capturing content - const decodedChunk = decoder.decode(value, { stream: true }) - buffer += decodedChunk - - const lines = buffer.split('\n') - buffer = lines.pop() || '' // Keep incomplete line in buffer - - for (const line of lines) { - if (line.trim() === '') continue // Skip empty lines - - if (line.startsWith('data: ') && line.length > 6) { - try { - const jsonStr = line.slice(6) - - // Check if the JSON string is unusually large (potential streaming issue) - if (jsonStr.length > 50000) { - // 50KB limit - logger.warn(`[${tracker.requestId}] Large SSE event detected`, { - size: jsonStr.length, - preview: `${jsonStr.substring(0, 100)}...`, - }) - } - - const event = JSON.parse(jsonStr) - - // Log different event types comprehensively - switch (event.type) { - case 'content': - if (event.data) { - assistantContent += event.data - } - break - - case 'reasoning': - logger.debug( - `[${tracker.requestId}] Reasoning chunk received (${(event.data || event.content || '').length} chars)` - ) - break - - case 'tool_call': - if (!event.data?.partial) { - toolCalls.push(event.data) - if (event.data?.id) { - announcedToolCallIds.add(event.data.id) - } - } - break - - case 'tool_generating': - if (event.toolCallId) { - startedToolExecutionIds.add(event.toolCallId) - } - break - - case 'tool_result': - if (event.toolCallId) { - completedToolExecutionIds.add(event.toolCallId) - } - break - - case 'tool_error': - logger.error(`[${tracker.requestId}] Tool error:`, { - toolCallId: event.toolCallId, - toolName: event.toolName, - error: event.error, - success: event.success, - }) - if (event.toolCallId) { - completedToolExecutionIds.add(event.toolCallId) - } - break - - case 'start': - if (event.data?.responseId) { - responseIdFromStart = event.data.responseId - } - break - - case 'done': - if (event.data?.responseId) { - responseIdFromDone = event.data.responseId - lastDoneResponseId = responseIdFromDone - - // Mark this done as safe only if no tool call is currently in progress or pending - const announced = announcedToolCallIds.size - const completed = completedToolExecutionIds.size - const started = startedToolExecutionIds.size - const hasToolInProgress = announced > completed || started > completed - if (!hasToolInProgress) { - lastSafeDoneResponseId = responseIdFromDone - } - } - break - - case 'error': - break - - default: - } - - // Emit to client: rewrite 'error' events into user-friendly assistant message - if (event?.type === 'error') { - try { - const displayMessage: string = - (event?.data && (event.data.displayMessage as string)) || - 'Sorry, I encountered an error. Please try again.' - const formatted = `_${displayMessage}_` - // Accumulate so it persists to DB as assistant content - assistantContent += formatted - // Send as content chunk - try { - controller.enqueue( - encoder.encode( - `data: ${JSON.stringify({ type: 'content', data: formatted })}\n\n` - ) - ) - } catch (enqueueErr) { - reader.cancel() - break - } - // Then close this response cleanly for the client - try { - controller.enqueue( - encoder.encode(`data: ${JSON.stringify({ type: 'done' })}\n\n`) - ) - } catch (enqueueErr) { - reader.cancel() - break - } - } catch {} - // Do not forward the original error event - } else { - // Forward original event to client - try { - controller.enqueue(encoder.encode(`data: ${jsonStr}\n\n`)) - } catch (enqueueErr) { - reader.cancel() - break - } - } - } catch (e) { - // Enhanced error handling for large payloads and parsing issues - const lineLength = line.length - const isLargePayload = lineLength > 10000 - - if (isLargePayload) { - logger.error( - `[${tracker.requestId}] Failed to parse large SSE event (${lineLength} chars)`, - { - error: e, - preview: `${line.substring(0, 200)}...`, - size: lineLength, - } - ) - } else { - logger.warn( - `[${tracker.requestId}] Failed to parse SSE event: "${line.substring(0, 200)}..."`, - e - ) - } - } - } else if (line.trim() && line !== 'data: [DONE]') { - logger.debug(`[${tracker.requestId}] Non-SSE line from sim agent: "${line}"`) - } - } - } - - // Process any remaining buffer - if (buffer.trim()) { - logger.debug(`[${tracker.requestId}] Processing remaining buffer: "${buffer}"`) - if (buffer.startsWith('data: ')) { - try { - const jsonStr = buffer.slice(6) - const event = JSON.parse(jsonStr) - if (event.type === 'content' && event.data) { - assistantContent += event.data - } - // Forward remaining event, applying same error rewrite behavior - if (event?.type === 'error') { - const displayMessage: string = - (event?.data && (event.data.displayMessage as string)) || - 'Sorry, I encountered an error. Please try again.' - const formatted = `_${displayMessage}_` - assistantContent += formatted - try { - controller.enqueue( - encoder.encode( - `data: ${JSON.stringify({ type: 'content', data: formatted })}\n\n` - ) - ) - controller.enqueue( - encoder.encode(`data: ${JSON.stringify({ type: 'done' })}\n\n`) - ) - } catch (enqueueErr) { - reader.cancel() - } - } else { - try { - controller.enqueue(encoder.encode(`data: ${jsonStr}\n\n`)) - } catch (enqueueErr) { - reader.cancel() - } - } - } catch (e) { - logger.warn(`[${tracker.requestId}] Failed to parse final buffer: "${buffer}"`) - } - } - } - - // Log final streaming summary - logger.info(`[${tracker.requestId}] Streaming complete summary:`, { - totalContentLength: assistantContent.length, - toolCallsCount: toolCalls.length, - hasContent: assistantContent.length > 0, - toolNames: toolCalls.map((tc) => tc?.name).filter(Boolean), + const result = await orchestrateCopilotStream(requestPayload, { + userId: authenticatedUserId, + workflowId, + chatId: actualChatId, + autoExecuteTools: true, + interactive: true, + onEvent: async (event) => { + await pushEvent(event) + }, }) - // NOTE: Messages are saved by the client via update-messages endpoint with full contentBlocks. - // Server only updates conversationId here to avoid overwriting client's richer save. - if (currentChat) { - // Persist only a safe conversationId to avoid continuing from a state that expects tool outputs - const previousConversationId = currentChat?.conversationId as string | undefined - const responseId = lastSafeDoneResponseId || previousConversationId || undefined - - if (responseId) { - await db - .update(copilotChats) - .set({ - updatedAt: new Date(), - conversationId: responseId, - }) - .where(eq(copilotChats.id, actualChatId!)) - - logger.info( - `[${tracker.requestId}] Updated conversationId for chat ${actualChatId}`, - { - updatedConversationId: responseId, - } - ) - } + if (currentChat && result.conversationId) { + await db + .update(copilotChats) + .set({ + updatedAt: new Date(), + conversationId: result.conversationId, + }) + .where(eq(copilotChats.id, actualChatId!)) } + await eventWriter.close() + await setStreamMeta(streamId, { status: 'complete', userId: authenticatedUserId }) } catch (error) { - logger.error(`[${tracker.requestId}] Error processing stream:`, error) - - // Send an error event to the client before closing so it knows what happened - try { - const errorMessage = - error instanceof Error && error.message === 'terminated' - ? 'Connection to AI service was interrupted. Please try again.' - : 'An unexpected error occurred while processing the response.' - const encoder = new TextEncoder() - - // Send error as content so it shows in the chat - controller.enqueue( - encoder.encode( - `data: ${JSON.stringify({ type: 'content', data: `\n\n_${errorMessage}_` })}\n\n` - ) - ) - // Send done event to properly close the stream on client - controller.enqueue(encoder.encode(`data: ${JSON.stringify({ type: 'done' })}\n\n`)) - } catch (enqueueError) { - // Stream might already be closed, that's ok - logger.warn( - `[${tracker.requestId}] Could not send error event to client:`, - enqueueError - ) - } + logger.error(`[${tracker.requestId}] Orchestration error:`, error) + await eventWriter.close() + await setStreamMeta(streamId, { + status: 'error', + userId: authenticatedUserId, + error: error instanceof Error ? error.message : 'Stream error', + }) + await pushEvent({ + type: 'error', + data: { + displayMessage: 'An unexpected error occurred while processing the response.', + }, + }) } finally { - try { - controller.close() - } catch { - // Controller might already be closed - } + controller.close() + } + }, + async cancel() { + clientDisconnected = true + if (eventWriter) { + await eventWriter.flush() } }, }) - const response = new Response(transformedStream, { + return new Response(transformedStream, { headers: { 'Content-Type': 'text/event-stream', 'Cache-Control': 'no-cache', @@ -867,43 +358,34 @@ export async function POST(req: NextRequest) { 'X-Accel-Buffering': 'no', }, }) - - logger.info(`[${tracker.requestId}] Returning streaming response to client`, { - duration: tracker.getDuration(), - chatId: actualChatId, - headers: { - 'Content-Type': 'text/event-stream', - 'Cache-Control': 'no-cache', - Connection: 'keep-alive', - }, - }) - - return response } - // For non-streaming responses - const responseData = await simAgentResponse.json() - logger.info(`[${tracker.requestId}] Non-streaming response from sim agent:`, { + const nonStreamingResult = await orchestrateCopilotStream(requestPayload, { + userId: authenticatedUserId, + workflowId, + chatId: actualChatId, + autoExecuteTools: true, + interactive: true, + }) + + const responseData = { + content: nonStreamingResult.content, + toolCalls: nonStreamingResult.toolCalls, + model: selectedModel, + provider: + (requestPayload?.provider as Record)?.provider || + env.COPILOT_PROVIDER || + 'openai', + } + + logger.info(`[${tracker.requestId}] Non-streaming response from orchestrator:`, { hasContent: !!responseData.content, contentLength: responseData.content?.length || 0, model: responseData.model, provider: responseData.provider, toolCallsCount: responseData.toolCalls?.length || 0, - hasTokens: !!responseData.tokens, }) - // Log tool calls if present - if (responseData.toolCalls?.length > 0) { - responseData.toolCalls.forEach((toolCall: any) => { - logger.info(`[${tracker.requestId}] Tool call in response:`, { - id: toolCall.id, - name: toolCall.name, - success: toolCall.success, - result: `${JSON.stringify(toolCall.result).substring(0, 200)}...`, - }) - }) - } - // Save messages if we have a chat if (currentChat && responseData.content) { const userMessage = { @@ -955,6 +437,9 @@ export async function POST(req: NextRequest) { .set({ messages: updatedMessages, updatedAt: new Date(), + ...(nonStreamingResult.conversationId + ? { conversationId: nonStreamingResult.conversationId } + : {}), }) .where(eq(copilotChats.id, actualChatId!)) } @@ -1006,10 +491,7 @@ export async function GET(req: NextRequest) { try { const { searchParams } = new URL(req.url) const workflowId = searchParams.get('workflowId') - - if (!workflowId) { - return createBadRequestResponse('workflowId is required') - } + const chatId = searchParams.get('chatId') // Get authenticated user using consolidated helper const { userId: authenticatedUserId, isAuthenticated } = @@ -1018,6 +500,47 @@ export async function GET(req: NextRequest) { return createUnauthorizedResponse() } + // If chatId is provided, fetch a single chat + if (chatId) { + const [chat] = await db + .select({ + id: copilotChats.id, + title: copilotChats.title, + model: copilotChats.model, + messages: copilotChats.messages, + planArtifact: copilotChats.planArtifact, + config: copilotChats.config, + createdAt: copilotChats.createdAt, + updatedAt: copilotChats.updatedAt, + }) + .from(copilotChats) + .where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, authenticatedUserId))) + .limit(1) + + if (!chat) { + return NextResponse.json({ success: false, error: 'Chat not found' }, { status: 404 }) + } + + const transformedChat = { + id: chat.id, + title: chat.title, + model: chat.model, + messages: Array.isArray(chat.messages) ? chat.messages : [], + messageCount: Array.isArray(chat.messages) ? chat.messages.length : 0, + planArtifact: chat.planArtifact || null, + config: chat.config || null, + createdAt: chat.createdAt, + updatedAt: chat.updatedAt, + } + + logger.info(`Retrieved chat ${chatId}`) + return NextResponse.json({ success: true, chat: transformedChat }) + } + + if (!workflowId) { + return createBadRequestResponse('workflowId or chatId is required') + } + // Fetch chats for this user and workflow const chats = await db .select({ diff --git a/apps/sim/app/api/copilot/chat/stream/route.ts b/apps/sim/app/api/copilot/chat/stream/route.ts new file mode 100644 index 000000000..c1fd6fb22 --- /dev/null +++ b/apps/sim/app/api/copilot/chat/stream/route.ts @@ -0,0 +1,130 @@ +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { + getStreamMeta, + readStreamEvents, + type StreamMeta, +} from '@/lib/copilot/orchestrator/stream-buffer' +import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers' +import { SSE_HEADERS } from '@/lib/core/utils/sse' + +const logger = createLogger('CopilotChatStreamAPI') +const POLL_INTERVAL_MS = 250 +const MAX_STREAM_MS = 10 * 60 * 1000 + +function encodeEvent(event: Record): Uint8Array { + return new TextEncoder().encode(`data: ${JSON.stringify(event)}\n\n`) +} + +export async function GET(request: NextRequest) { + const { userId: authenticatedUserId, isAuthenticated } = + await authenticateCopilotRequestSessionOnly() + + if (!isAuthenticated || !authenticatedUserId) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) + } + + const url = new URL(request.url) + const streamId = url.searchParams.get('streamId') || '' + const fromParam = url.searchParams.get('from') || '0' + const fromEventId = Number(fromParam || 0) + // If batch=true, return buffered events as JSON instead of SSE + const batchMode = url.searchParams.get('batch') === 'true' + const toParam = url.searchParams.get('to') + const toEventId = toParam ? Number(toParam) : undefined + + if (!streamId) { + return NextResponse.json({ error: 'streamId is required' }, { status: 400 }) + } + + const meta = (await getStreamMeta(streamId)) as StreamMeta | null + logger.info('[Resume] Stream lookup', { + streamId, + fromEventId, + toEventId, + batchMode, + hasMeta: !!meta, + metaStatus: meta?.status, + }) + if (!meta) { + return NextResponse.json({ error: 'Stream not found' }, { status: 404 }) + } + if (meta.userId && meta.userId !== authenticatedUserId) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 403 }) + } + + // Batch mode: return all buffered events as JSON + if (batchMode) { + const events = await readStreamEvents(streamId, fromEventId) + const filteredEvents = toEventId ? events.filter((e) => e.eventId <= toEventId) : events + logger.info('[Resume] Batch response', { + streamId, + fromEventId, + toEventId, + eventCount: filteredEvents.length, + }) + return NextResponse.json({ + success: true, + events: filteredEvents, + status: meta.status, + }) + } + + const startTime = Date.now() + + const stream = new ReadableStream({ + async start(controller) { + let lastEventId = Number.isFinite(fromEventId) ? fromEventId : 0 + + const flushEvents = async () => { + const events = await readStreamEvents(streamId, lastEventId) + if (events.length > 0) { + logger.info('[Resume] Flushing events', { + streamId, + fromEventId: lastEventId, + eventCount: events.length, + }) + } + for (const entry of events) { + lastEventId = entry.eventId + const payload = { + ...entry.event, + eventId: entry.eventId, + streamId: entry.streamId, + } + controller.enqueue(encodeEvent(payload)) + } + } + + try { + await flushEvents() + + while (Date.now() - startTime < MAX_STREAM_MS) { + const currentMeta = await getStreamMeta(streamId) + if (!currentMeta) break + + await flushEvents() + + if (currentMeta.status === 'complete' || currentMeta.status === 'error') { + break + } + + if (request.signal.aborted) { + break + } + + await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS)) + } + } catch (error) { + logger.warn('Stream replay failed', { + streamId, + error: error instanceof Error ? error.message : String(error), + }) + } finally { + controller.close() + } + }, + }) + + return new Response(stream, { headers: SSE_HEADERS }) +} diff --git a/apps/sim/app/api/copilot/confirm/route.test.ts b/apps/sim/app/api/copilot/confirm/route.test.ts index 5bb9efd68..78c46982e 100644 --- a/apps/sim/app/api/copilot/confirm/route.test.ts +++ b/apps/sim/app/api/copilot/confirm/route.test.ts @@ -139,7 +139,6 @@ describe('Copilot Confirm API Route', () => { status: 'success', }) - expect(mockRedisExists).toHaveBeenCalled() expect(mockRedisSet).toHaveBeenCalled() }) @@ -256,11 +255,11 @@ describe('Copilot Confirm API Route', () => { expect(responseData.error).toBe('Failed to update tool call status or tool call not found') }) - it('should return 400 when tool call is not found in Redis', async () => { + it('should return 400 when Redis set fails', async () => { const authMocks = mockAuth() authMocks.setAuthenticated() - mockRedisExists.mockResolvedValue(0) + mockRedisSet.mockRejectedValueOnce(new Error('Redis set failed')) const req = createMockRequest('POST', { toolCallId: 'non-existent-tool', @@ -279,7 +278,7 @@ describe('Copilot Confirm API Route', () => { const authMocks = mockAuth() authMocks.setAuthenticated() - mockRedisExists.mockRejectedValue(new Error('Redis connection failed')) + mockRedisSet.mockRejectedValueOnce(new Error('Redis connection failed')) const req = createMockRequest('POST', { toolCallId: 'tool-call-123', diff --git a/apps/sim/app/api/copilot/confirm/route.ts b/apps/sim/app/api/copilot/confirm/route.ts index 9fd5476c9..eb63b7524 100644 --- a/apps/sim/app/api/copilot/confirm/route.ts +++ b/apps/sim/app/api/copilot/confirm/route.ts @@ -1,6 +1,7 @@ import { createLogger } from '@sim/logger' import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' +import { REDIS_TOOL_CALL_PREFIX, REDIS_TOOL_CALL_TTL_SECONDS } from '@/lib/copilot/constants' import { authenticateCopilotRequestSessionOnly, createBadRequestResponse, @@ -23,7 +24,8 @@ const ConfirmationSchema = z.object({ }) /** - * Update tool call status in Redis + * Write the user's tool decision to Redis. The server-side orchestrator's + * waitForToolDecision() polls Redis for this value. */ async function updateToolCallStatus( toolCallId: string, @@ -32,57 +34,24 @@ async function updateToolCallStatus( ): Promise { const redis = getRedisClient() if (!redis) { - logger.warn('updateToolCallStatus: Redis client not available') + logger.warn('Redis client not available for tool confirmation') return false } try { - const key = `tool_call:${toolCallId}` - const timeout = 600000 // 10 minutes timeout for user confirmation - const pollInterval = 100 // Poll every 100ms - const startTime = Date.now() - - logger.info('Polling for tool call in Redis', { toolCallId, key, timeout }) - - // Poll until the key exists or timeout - while (Date.now() - startTime < timeout) { - const exists = await redis.exists(key) - if (exists) { - break - } - - // Wait before next poll - await new Promise((resolve) => setTimeout(resolve, pollInterval)) - } - - // Final check if key exists after polling - const exists = await redis.exists(key) - if (!exists) { - logger.warn('Tool call not found in Redis after polling timeout', { - toolCallId, - key, - timeout, - pollDuration: Date.now() - startTime, - }) - return false - } - - // Store both status and message as JSON - const toolCallData = { + const key = `${REDIS_TOOL_CALL_PREFIX}${toolCallId}` + const payload = { status, message: message || null, timestamp: new Date().toISOString(), } - - await redis.set(key, JSON.stringify(toolCallData), 'EX', 86400) // Keep 24 hour expiry - + await redis.set(key, JSON.stringify(payload), 'EX', REDIS_TOOL_CALL_TTL_SECONDS) return true } catch (error) { - logger.error('Failed to update tool call status in Redis', { + logger.error('Failed to update tool call status', { toolCallId, status, - message, - error: error instanceof Error ? error.message : 'Unknown error', + error: error instanceof Error ? error.message : String(error), }) return false } diff --git a/apps/sim/app/api/copilot/credentials/route.ts b/apps/sim/app/api/copilot/credentials/route.ts new file mode 100644 index 000000000..2f764429d --- /dev/null +++ b/apps/sim/app/api/copilot/credentials/route.ts @@ -0,0 +1,28 @@ +import { type NextRequest, NextResponse } from 'next/server' +import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers' +import { routeExecution } from '@/lib/copilot/tools/server/router' + +/** + * GET /api/copilot/credentials + * Returns connected OAuth credentials for the authenticated user. + * Used by the copilot store for credential masking. + */ +export async function GET(_req: NextRequest) { + const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly() + if (!isAuthenticated || !userId) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) + } + + try { + const result = await routeExecution('get_credentials', {}, { userId }) + return NextResponse.json({ success: true, result }) + } catch (error) { + return NextResponse.json( + { + success: false, + error: error instanceof Error ? error.message : 'Failed to load credentials', + }, + { status: 500 } + ) + } +} diff --git a/apps/sim/app/api/copilot/execute-copilot-server-tool/route.ts b/apps/sim/app/api/copilot/execute-copilot-server-tool/route.ts deleted file mode 100644 index 5627ae897..000000000 --- a/apps/sim/app/api/copilot/execute-copilot-server-tool/route.ts +++ /dev/null @@ -1,54 +0,0 @@ -import { createLogger } from '@sim/logger' -import { type NextRequest, NextResponse } from 'next/server' -import { z } from 'zod' -import { - authenticateCopilotRequestSessionOnly, - createBadRequestResponse, - createInternalServerErrorResponse, - createRequestTracker, - createUnauthorizedResponse, -} from '@/lib/copilot/request-helpers' -import { routeExecution } from '@/lib/copilot/tools/server/router' - -const logger = createLogger('ExecuteCopilotServerToolAPI') - -const ExecuteSchema = z.object({ - toolName: z.string(), - payload: z.unknown().optional(), -}) - -export async function POST(req: NextRequest) { - const tracker = createRequestTracker() - try { - const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly() - if (!isAuthenticated || !userId) { - return createUnauthorizedResponse() - } - - const body = await req.json() - try { - const preview = JSON.stringify(body).slice(0, 300) - logger.debug(`[${tracker.requestId}] Incoming request body preview`, { preview }) - } catch {} - - const { toolName, payload } = ExecuteSchema.parse(body) - - logger.info(`[${tracker.requestId}] Executing server tool`, { toolName }) - const result = await routeExecution(toolName, payload, { userId }) - - try { - const resultPreview = JSON.stringify(result).slice(0, 300) - logger.debug(`[${tracker.requestId}] Server tool result preview`, { toolName, resultPreview }) - } catch {} - - return NextResponse.json({ success: true, result }) - } catch (error) { - if (error instanceof z.ZodError) { - logger.debug(`[${tracker.requestId}] Zod validation error`, { issues: error.issues }) - return createBadRequestResponse('Invalid request body for execute-copilot-server-tool') - } - logger.error(`[${tracker.requestId}] Failed to execute server tool:`, error) - const errorMessage = error instanceof Error ? error.message : 'Failed to execute server tool' - return createInternalServerErrorResponse(errorMessage) - } -} diff --git a/apps/sim/app/api/copilot/execute-tool/route.ts b/apps/sim/app/api/copilot/execute-tool/route.ts deleted file mode 100644 index d134d28eb..000000000 --- a/apps/sim/app/api/copilot/execute-tool/route.ts +++ /dev/null @@ -1,247 +0,0 @@ -import { db } from '@sim/db' -import { account, workflow } from '@sim/db/schema' -import { createLogger } from '@sim/logger' -import { and, eq } from 'drizzle-orm' -import { type NextRequest, NextResponse } from 'next/server' -import { z } from 'zod' -import { getSession } from '@/lib/auth' -import { - createBadRequestResponse, - createInternalServerErrorResponse, - createRequestTracker, - createUnauthorizedResponse, -} from '@/lib/copilot/request-helpers' -import { generateRequestId } from '@/lib/core/utils/request' -import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' -import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils' -import { resolveEnvVarReferences } from '@/executor/utils/reference-validation' -import { executeTool } from '@/tools' -import { getTool, resolveToolId } from '@/tools/utils' - -const logger = createLogger('CopilotExecuteToolAPI') - -const ExecuteToolSchema = z.object({ - toolCallId: z.string(), - toolName: z.string(), - arguments: z.record(z.any()).optional().default({}), - workflowId: z.string().optional(), -}) - -export async function POST(req: NextRequest) { - const tracker = createRequestTracker() - - try { - const session = await getSession() - if (!session?.user?.id) { - return createUnauthorizedResponse() - } - - const userId = session.user.id - const body = await req.json() - - try { - const preview = JSON.stringify(body).slice(0, 300) - logger.debug(`[${tracker.requestId}] Incoming execute-tool request`, { preview }) - } catch {} - - const { toolCallId, toolName, arguments: toolArgs, workflowId } = ExecuteToolSchema.parse(body) - - const resolvedToolName = resolveToolId(toolName) - - logger.info(`[${tracker.requestId}] Executing tool`, { - toolCallId, - toolName, - resolvedToolName, - workflowId, - hasArgs: Object.keys(toolArgs).length > 0, - }) - - const toolConfig = getTool(resolvedToolName) - if (!toolConfig) { - // Find similar tool names to help debug - const { tools: allTools } = await import('@/tools/registry') - const allToolNames = Object.keys(allTools) - const prefix = toolName.split('_').slice(0, 2).join('_') - const similarTools = allToolNames - .filter((name) => name.startsWith(`${prefix.split('_')[0]}_`)) - .slice(0, 10) - - logger.warn(`[${tracker.requestId}] Tool not found in registry`, { - toolName, - prefix, - similarTools, - totalToolsInRegistry: allToolNames.length, - }) - return NextResponse.json( - { - success: false, - error: `Tool not found: ${toolName}. Similar tools: ${similarTools.join(', ')}`, - toolCallId, - }, - { status: 404 } - ) - } - - // Get the workspaceId from the workflow (env vars are stored at workspace level) - let workspaceId: string | undefined - if (workflowId) { - const workflowResult = await db - .select({ workspaceId: workflow.workspaceId }) - .from(workflow) - .where(eq(workflow.id, workflowId)) - .limit(1) - workspaceId = workflowResult[0]?.workspaceId ?? undefined - } - - // Get decrypted environment variables early so we can resolve all {{VAR}} references - const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId) - - logger.info(`[${tracker.requestId}] Fetched environment variables`, { - workflowId, - workspaceId, - envVarCount: Object.keys(decryptedEnvVars).length, - envVarKeys: Object.keys(decryptedEnvVars), - }) - - // Build execution params starting with LLM-provided arguments - // Resolve all {{ENV_VAR}} references in the arguments (deep for nested objects) - const executionParams: Record = resolveEnvVarReferences( - toolArgs, - decryptedEnvVars, - { deep: true } - ) as Record - - logger.info(`[${tracker.requestId}] Resolved env var references in arguments`, { - toolName, - originalArgKeys: Object.keys(toolArgs), - resolvedArgKeys: Object.keys(executionParams), - }) - - // Resolve OAuth access token if required - if (toolConfig.oauth?.required && toolConfig.oauth.provider) { - const provider = toolConfig.oauth.provider - logger.info(`[${tracker.requestId}] Resolving OAuth token`, { provider }) - - try { - // Find the account for this provider and user - const accounts = await db - .select() - .from(account) - .where(and(eq(account.providerId, provider), eq(account.userId, userId))) - .limit(1) - - if (accounts.length > 0) { - const acc = accounts[0] - const requestId = generateRequestId() - const { accessToken } = await refreshTokenIfNeeded(requestId, acc as any, acc.id) - - if (accessToken) { - executionParams.accessToken = accessToken - logger.info(`[${tracker.requestId}] OAuth token resolved`, { provider }) - } else { - logger.warn(`[${tracker.requestId}] No access token available`, { provider }) - return NextResponse.json( - { - success: false, - error: `OAuth token not available for ${provider}. Please reconnect your account.`, - toolCallId, - }, - { status: 400 } - ) - } - } else { - logger.warn(`[${tracker.requestId}] No account found for provider`, { provider }) - return NextResponse.json( - { - success: false, - error: `No ${provider} account connected. Please connect your account first.`, - toolCallId, - }, - { status: 400 } - ) - } - } catch (error) { - logger.error(`[${tracker.requestId}] Failed to resolve OAuth token`, { - provider, - error: error instanceof Error ? error.message : String(error), - }) - return NextResponse.json( - { - success: false, - error: `Failed to get OAuth token for ${provider}`, - toolCallId, - }, - { status: 500 } - ) - } - } - - // Check if tool requires an API key that wasn't resolved via {{ENV_VAR}} reference - const needsApiKey = toolConfig.params?.apiKey?.required - - if (needsApiKey && !executionParams.apiKey) { - logger.warn(`[${tracker.requestId}] No API key found for tool`, { toolName }) - return NextResponse.json( - { - success: false, - error: `API key not provided for ${toolName}. Use {{YOUR_API_KEY_ENV_VAR}} to reference your environment variable.`, - toolCallId, - }, - { status: 400 } - ) - } - - // Add execution context - executionParams._context = { - workflowId, - userId, - } - - // Special handling for function_execute - inject environment variables - if (toolName === 'function_execute') { - executionParams.envVars = decryptedEnvVars - executionParams.workflowVariables = {} // No workflow variables in copilot context - executionParams.blockData = {} // No block data in copilot context - executionParams.blockNameMapping = {} // No block mapping in copilot context - executionParams.language = executionParams.language || 'javascript' - executionParams.timeout = executionParams.timeout || 30000 - - logger.info(`[${tracker.requestId}] Injected env vars for function_execute`, { - envVarCount: Object.keys(decryptedEnvVars).length, - }) - } - - // Execute the tool - logger.info(`[${tracker.requestId}] Executing tool with resolved credentials`, { - toolName, - hasAccessToken: !!executionParams.accessToken, - hasApiKey: !!executionParams.apiKey, - }) - - const result = await executeTool(resolvedToolName, executionParams) - - logger.info(`[${tracker.requestId}] Tool execution complete`, { - toolName, - success: result.success, - hasOutput: !!result.output, - }) - - return NextResponse.json({ - success: true, - toolCallId, - result: { - success: result.success, - output: result.output, - error: result.error, - }, - }) - } catch (error) { - if (error instanceof z.ZodError) { - logger.debug(`[${tracker.requestId}] Zod validation error`, { issues: error.issues }) - return createBadRequestResponse('Invalid request body for execute-tool') - } - logger.error(`[${tracker.requestId}] Failed to execute tool:`, error) - const errorMessage = error instanceof Error ? error.message : 'Failed to execute tool' - return createInternalServerErrorResponse(errorMessage) - } -} diff --git a/apps/sim/app/api/copilot/stats/route.test.ts b/apps/sim/app/api/copilot/stats/route.test.ts index 35a0ad1df..1732a686f 100644 --- a/apps/sim/app/api/copilot/stats/route.test.ts +++ b/apps/sim/app/api/copilot/stats/route.test.ts @@ -40,6 +40,7 @@ describe('Copilot Stats API Route', () => { vi.doMock('@/lib/copilot/constants', () => ({ SIM_AGENT_API_URL_DEFAULT: 'https://agent.sim.example.com', + SIM_AGENT_API_URL: 'https://agent.sim.example.com', })) vi.doMock('@/lib/core/config/env', async () => { diff --git a/apps/sim/app/api/copilot/stats/route.ts b/apps/sim/app/api/copilot/stats/route.ts index ea52c1c58..493f6e4ec 100644 --- a/apps/sim/app/api/copilot/stats/route.ts +++ b/apps/sim/app/api/copilot/stats/route.ts @@ -1,6 +1,6 @@ import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' -import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' +import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' import { authenticateCopilotRequestSessionOnly, createBadRequestResponse, @@ -10,8 +10,6 @@ import { } from '@/lib/copilot/request-helpers' import { env } from '@/lib/core/config/env' -const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT - const BodySchema = z.object({ messageId: z.string(), diffCreated: z.boolean(), diff --git a/apps/sim/app/api/copilot/tools/mark-complete/route.ts b/apps/sim/app/api/copilot/tools/mark-complete/route.ts deleted file mode 100644 index 1ada484e5..000000000 --- a/apps/sim/app/api/copilot/tools/mark-complete/route.ts +++ /dev/null @@ -1,123 +0,0 @@ -import { createLogger } from '@sim/logger' -import { type NextRequest, NextResponse } from 'next/server' -import { z } from 'zod' -import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' -import { - authenticateCopilotRequestSessionOnly, - createBadRequestResponse, - createInternalServerErrorResponse, - createRequestTracker, - createUnauthorizedResponse, -} from '@/lib/copilot/request-helpers' -import { env } from '@/lib/core/config/env' - -const logger = createLogger('CopilotMarkToolCompleteAPI') - -const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT - -const MarkCompleteSchema = z.object({ - id: z.string(), - name: z.string(), - status: z.number().int(), - message: z.any().optional(), - data: z.any().optional(), -}) - -/** - * POST /api/copilot/tools/mark-complete - * Proxy to Sim Agent: POST /api/tools/mark-complete - */ -export async function POST(req: NextRequest) { - const tracker = createRequestTracker() - - try { - const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly() - if (!isAuthenticated || !userId) { - return createUnauthorizedResponse() - } - - const body = await req.json() - - // Log raw body shape for diagnostics (avoid dumping huge payloads) - try { - const bodyPreview = JSON.stringify(body).slice(0, 300) - logger.debug(`[${tracker.requestId}] Incoming mark-complete raw body preview`, { - preview: `${bodyPreview}${bodyPreview.length === 300 ? '...' : ''}`, - }) - } catch {} - - const parsed = MarkCompleteSchema.parse(body) - - const messagePreview = (() => { - try { - const s = - typeof parsed.message === 'string' ? parsed.message : JSON.stringify(parsed.message) - return s ? `${s.slice(0, 200)}${s.length > 200 ? '...' : ''}` : undefined - } catch { - return undefined - } - })() - - logger.info(`[${tracker.requestId}] Forwarding tool mark-complete`, { - userId, - toolCallId: parsed.id, - toolName: parsed.name, - status: parsed.status, - hasMessage: parsed.message !== undefined, - hasData: parsed.data !== undefined, - messagePreview, - agentUrl: `${SIM_AGENT_API_URL}/api/tools/mark-complete`, - }) - - const agentRes = await fetch(`${SIM_AGENT_API_URL}/api/tools/mark-complete`, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), - }, - body: JSON.stringify(parsed), - }) - - // Attempt to parse agent response JSON - let agentJson: any = null - let agentText: string | null = null - try { - agentJson = await agentRes.json() - } catch (_) { - try { - agentText = await agentRes.text() - } catch {} - } - - logger.info(`[${tracker.requestId}] Agent responded to mark-complete`, { - status: agentRes.status, - ok: agentRes.ok, - responseJsonPreview: agentJson ? JSON.stringify(agentJson).slice(0, 300) : undefined, - responseTextPreview: agentText ? agentText.slice(0, 300) : undefined, - }) - - if (agentRes.ok) { - return NextResponse.json({ success: true }) - } - - const errorMessage = - agentJson?.error || agentText || `Agent responded with status ${agentRes.status}` - const status = agentRes.status >= 500 ? 500 : 400 - - logger.warn(`[${tracker.requestId}] Mark-complete failed`, { - status, - error: errorMessage, - }) - - return NextResponse.json({ success: false, error: errorMessage }, { status }) - } catch (error) { - if (error instanceof z.ZodError) { - logger.warn(`[${tracker.requestId}] Invalid mark-complete request body`, { - issues: error.issues, - }) - return createBadRequestResponse('Invalid request body for mark-complete') - } - logger.error(`[${tracker.requestId}] Failed to proxy mark-complete:`, error) - return createInternalServerErrorResponse('Failed to mark tool as complete') - } -} diff --git a/apps/sim/app/api/copilot/user-models/route.ts b/apps/sim/app/api/copilot/user-models/route.ts index ead14a5e9..86e31c747 100644 --- a/apps/sim/app/api/copilot/user-models/route.ts +++ b/apps/sim/app/api/copilot/user-models/route.ts @@ -28,6 +28,7 @@ const DEFAULT_ENABLED_MODELS: Record = { 'claude-4-sonnet': false, 'claude-4.5-haiku': true, 'claude-4.5-sonnet': true, + 'claude-4.6-opus': true, 'claude-4.5-opus': true, 'claude-4.1-opus': false, 'gemini-3-pro': true, diff --git a/apps/sim/app/api/mcp/copilot/.well-known/oauth-authorization-server/route.ts b/apps/sim/app/api/mcp/copilot/.well-known/oauth-authorization-server/route.ts new file mode 100644 index 000000000..9dd3d6bd4 --- /dev/null +++ b/apps/sim/app/api/mcp/copilot/.well-known/oauth-authorization-server/route.ts @@ -0,0 +1,6 @@ +import type { NextRequest, NextResponse } from 'next/server' +import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery' + +export async function GET(request: NextRequest): Promise { + return createMcpAuthorizationServerMetadataResponse(request) +} diff --git a/apps/sim/app/api/mcp/copilot/.well-known/oauth-protected-resource/route.ts b/apps/sim/app/api/mcp/copilot/.well-known/oauth-protected-resource/route.ts new file mode 100644 index 000000000..d1136b555 --- /dev/null +++ b/apps/sim/app/api/mcp/copilot/.well-known/oauth-protected-resource/route.ts @@ -0,0 +1,6 @@ +import type { NextRequest, NextResponse } from 'next/server' +import { createMcpProtectedResourceMetadataResponse } from '@/lib/mcp/oauth-discovery' + +export async function GET(request: NextRequest): Promise { + return createMcpProtectedResourceMetadataResponse(request) +} diff --git a/apps/sim/app/api/mcp/copilot/route.ts b/apps/sim/app/api/mcp/copilot/route.ts new file mode 100644 index 000000000..4d02ab122 --- /dev/null +++ b/apps/sim/app/api/mcp/copilot/route.ts @@ -0,0 +1,793 @@ +import { randomUUID } from 'node:crypto' +import { Server } from '@modelcontextprotocol/sdk/server/index.js' +import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js' +import { + CallToolRequestSchema, + type CallToolResult, + ErrorCode, + type JSONRPCError, + ListToolsRequestSchema, + type ListToolsResult, + McpError, + type RequestId, +} from '@modelcontextprotocol/sdk/types.js' +import { db } from '@sim/db' +import { userStats } from '@sim/db/schema' +import { createLogger } from '@sim/logger' +import { eq, sql } from 'drizzle-orm' +import { type NextRequest, NextResponse } from 'next/server' +import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription' +import { getCopilotModel } from '@/lib/copilot/config' +import { + ORCHESTRATION_TIMEOUT_MS, + SIM_AGENT_API_URL, + SIM_AGENT_VERSION, +} from '@/lib/copilot/constants' +import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' +import { orchestrateSubagentStream } from '@/lib/copilot/orchestrator/subagent' +import { + executeToolServerSide, + prepareExecutionContext, +} from '@/lib/copilot/orchestrator/tool-executor' +import { DIRECT_TOOL_DEFS, SUBAGENT_TOOL_DEFS } from '@/lib/copilot/tools/mcp/definitions' +import { env } from '@/lib/core/config/env' +import { RateLimiter } from '@/lib/core/rate-limiter' +import { resolveWorkflowIdForUser } from '@/lib/workflows/utils' + +const logger = createLogger('CopilotMcpAPI') +const mcpRateLimiter = new RateLimiter() + +export const dynamic = 'force-dynamic' +export const runtime = 'nodejs' +export const maxDuration = 300 + +interface CopilotKeyAuthResult { + success: boolean + userId?: string + error?: string +} + +/** + * Validates a copilot API key by forwarding it to the Go copilot service's + * `/api/validate-key` endpoint. Returns the associated userId on success. + */ +async function authenticateCopilotApiKey(apiKey: string): Promise { + try { + const internalSecret = env.INTERNAL_API_SECRET + if (!internalSecret) { + logger.error('INTERNAL_API_SECRET not configured') + return { success: false, error: 'Server configuration error' } + } + + const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-api-key': internalSecret, + }, + body: JSON.stringify({ targetApiKey: apiKey }), + signal: AbortSignal.timeout(10_000), + }) + + if (!res.ok) { + const body = await res.json().catch(() => null) + const upstream = (body as Record)?.message + const status = res.status + + if (status === 401 || status === 403) { + return { + success: false, + error: `Invalid Copilot API key. Generate a new key in Settings → Copilot and set it in the x-api-key header.`, + } + } + if (status === 402) { + return { + success: false, + error: `Usage limit exceeded for this Copilot API key. Upgrade your plan or wait for your quota to reset.`, + } + } + + return { success: false, error: String(upstream ?? 'Copilot API key validation failed') } + } + + const data = (await res.json()) as { ok?: boolean; userId?: string } + if (!data.ok || !data.userId) { + return { + success: false, + error: 'Invalid Copilot API key. Generate a new key in Settings → Copilot.', + } + } + + return { success: true, userId: data.userId } + } catch (error) { + logger.error('Copilot API key validation failed', { error }) + return { + success: false, + error: + 'Could not validate Copilot API key — the authentication service is temporarily unreachable. This is NOT a problem with the API key itself; please retry shortly.', + } + } +} + +/** + * MCP Server instructions that guide LLMs on how to use the Sim copilot tools. + * This is included in the initialize response to help external LLMs understand + * the workflow lifecycle and best practices. + */ +const MCP_SERVER_INSTRUCTIONS = ` +## Sim Workflow Copilot + +Sim is a workflow automation platform. Workflows are visual pipelines of connected blocks (Agent, Function, Condition, API, integrations, etc.). The Agent block is the core — an LLM with tools, memory, structured output, and knowledge bases. + +### Workflow Lifecycle (Happy Path) + +1. \`list_workspaces\` → know where to work +2. \`create_workflow(name, workspaceId)\` → get a workflowId +3. \`sim_build(request, workflowId)\` → plan and build in one pass +4. \`sim_test(request, workflowId)\` → verify it works +5. \`sim_deploy("deploy as api", workflowId)\` → make it accessible externally (optional) + +For fine-grained control, use \`sim_plan\` → \`sim_edit\` instead of \`sim_build\`. Pass the plan object from sim_plan EXACTLY as-is to sim_edit's context.plan field. + +### Working with Existing Workflows + +When the user refers to a workflow by name or description ("the email one", "my Slack bot"): +1. Use \`sim_discovery\` to find it by functionality +2. Or use \`list_workflows\` and match by name +3. Then pass the workflowId to other tools + +### Organization + +- \`rename_workflow\` — rename a workflow +- \`move_workflow\` — move a workflow into a folder (or root with null) +- \`move_folder\` — nest a folder inside another (or root with null) +- \`create_folder(name, parentId)\` — create nested folder hierarchies + +### Key Rules + +- You can test workflows immediately after building — deployment is only needed for external access (API, chat, MCP). +- All copilot tools (build, plan, edit, deploy, test, debug) require workflowId. +- If the user reports errors → use \`sim_debug\` first, don't guess. +- Variable syntax: \`\` for block outputs, \`{{ENV_VAR}}\` for env vars. +` + +type HeaderMap = Record + +function createError(id: RequestId, code: ErrorCode | number, message: string): JSONRPCError { + return { + jsonrpc: '2.0', + id, + error: { code, message }, + } +} + +function normalizeRequestHeaders(request: NextRequest): HeaderMap { + const headers: HeaderMap = {} + + request.headers.forEach((value, key) => { + headers[key.toLowerCase()] = value + }) + + return headers +} + +function readHeader(headers: HeaderMap | undefined, name: string): string | undefined { + if (!headers) return undefined + const value = headers[name.toLowerCase()] + if (Array.isArray(value)) { + return value[0] + } + return value +} + +class NextResponseCapture { + private _status = 200 + private _headers = new Headers() + private _controller: ReadableStreamDefaultController | null = null + private _pendingChunks: Uint8Array[] = [] + private _closeHandlers: Array<() => void> = [] + private _errorHandlers: Array<(error: Error) => void> = [] + private _headersWritten = false + private _ended = false + private _headersPromise: Promise + private _resolveHeaders: (() => void) | null = null + private _endedPromise: Promise + private _resolveEnded: (() => void) | null = null + readonly readable: ReadableStream + + constructor() { + this._headersPromise = new Promise((resolve) => { + this._resolveHeaders = resolve + }) + + this._endedPromise = new Promise((resolve) => { + this._resolveEnded = resolve + }) + + this.readable = new ReadableStream({ + start: (controller) => { + this._controller = controller + if (this._pendingChunks.length > 0) { + for (const chunk of this._pendingChunks) { + controller.enqueue(chunk) + } + this._pendingChunks = [] + } + }, + cancel: () => { + this._ended = true + this._resolveEnded?.() + this.triggerCloseHandlers() + }, + }) + } + + private markHeadersWritten(): void { + if (this._headersWritten) return + this._headersWritten = true + this._resolveHeaders?.() + } + + private triggerCloseHandlers(): void { + for (const handler of this._closeHandlers) { + try { + handler() + } catch (error) { + this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error))) + } + } + } + + private triggerErrorHandlers(error: Error): void { + for (const errorHandler of this._errorHandlers) { + errorHandler(error) + } + } + + private normalizeChunk(chunk: unknown): Uint8Array | null { + if (typeof chunk === 'string') { + return new TextEncoder().encode(chunk) + } + + if (chunk instanceof Uint8Array) { + return chunk + } + + if (chunk === undefined || chunk === null) { + return null + } + + return new TextEncoder().encode(String(chunk)) + } + + writeHead(status: number, headers?: Record): this { + this._status = status + + if (headers) { + Object.entries(headers).forEach(([key, value]) => { + if (Array.isArray(value)) { + this._headers.set(key, value.join(', ')) + } else { + this._headers.set(key, String(value)) + } + }) + } + + this.markHeadersWritten() + return this + } + + flushHeaders(): this { + this.markHeadersWritten() + return this + } + + write(chunk: unknown): boolean { + const normalized = this.normalizeChunk(chunk) + if (!normalized) return true + + this.markHeadersWritten() + + if (this._controller) { + try { + this._controller.enqueue(normalized) + } catch (error) { + this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error))) + } + } else { + this._pendingChunks.push(normalized) + } + + return true + } + + end(chunk?: unknown): this { + if (chunk !== undefined) this.write(chunk) + this.markHeadersWritten() + if (this._ended) return this + + this._ended = true + this._resolveEnded?.() + + if (this._controller) { + try { + this._controller.close() + } catch (error) { + this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error))) + } + } + + this.triggerCloseHandlers() + + return this + } + + async waitForHeaders(timeoutMs = 30000): Promise { + if (this._headersWritten) return + + await Promise.race([ + this._headersPromise, + new Promise((resolve) => { + setTimeout(resolve, timeoutMs) + }), + ]) + } + + async waitForEnd(timeoutMs = 30000): Promise { + if (this._ended) return + + await Promise.race([ + this._endedPromise, + new Promise((resolve) => { + setTimeout(resolve, timeoutMs) + }), + ]) + } + + on(event: 'close' | 'error', handler: (() => void) | ((error: Error) => void)): this { + if (event === 'close') { + this._closeHandlers.push(handler as () => void) + } + + if (event === 'error') { + this._errorHandlers.push(handler as (error: Error) => void) + } + + return this + } + + toNextResponse(): NextResponse { + return new NextResponse(this.readable, { + status: this._status, + headers: this._headers, + }) + } +} + +function buildMcpServer(abortSignal?: AbortSignal): Server { + const server = new Server( + { + name: 'sim-copilot', + version: '1.0.0', + }, + { + capabilities: { tools: {} }, + instructions: MCP_SERVER_INSTRUCTIONS, + } + ) + + server.setRequestHandler(ListToolsRequestSchema, async () => { + const directTools = DIRECT_TOOL_DEFS.map((tool) => ({ + name: tool.name, + description: tool.description, + inputSchema: tool.inputSchema, + })) + + const subagentTools = SUBAGENT_TOOL_DEFS.map((tool) => ({ + name: tool.name, + description: tool.description, + inputSchema: tool.inputSchema, + })) + + const result: ListToolsResult = { + tools: [...directTools, ...subagentTools], + } + + return result + }) + + server.setRequestHandler(CallToolRequestSchema, async (request, extra) => { + const headers = (extra.requestInfo?.headers || {}) as HeaderMap + const apiKeyHeader = readHeader(headers, 'x-api-key') + + if (!apiKeyHeader) { + return { + content: [ + { + type: 'text' as const, + text: 'AUTHENTICATION ERROR: No Copilot API key provided. The user must set their Copilot API key in the x-api-key header. They can generate one in the Sim app under Settings → Copilot. Do NOT retry — this will fail until the key is configured.', + }, + ], + isError: true, + } + } + + const authResult = await authenticateCopilotApiKey(apiKeyHeader) + if (!authResult.success || !authResult.userId) { + logger.warn('MCP copilot key auth failed', { method: request.method }) + return { + content: [ + { + type: 'text' as const, + text: `AUTHENTICATION ERROR: ${authResult.error} Do NOT retry — this will fail until the user fixes their Copilot API key.`, + }, + ], + isError: true, + } + } + + const rateLimitResult = await mcpRateLimiter.checkRateLimitWithSubscription( + authResult.userId, + await getHighestPrioritySubscription(authResult.userId), + 'api-endpoint', + false + ) + + if (!rateLimitResult.allowed) { + return { + content: [ + { + type: 'text' as const, + text: `RATE LIMIT: Too many requests. Please wait and retry after ${rateLimitResult.resetAt.toISOString()}.`, + }, + ], + isError: true, + } + } + + const params = request.params as + | { name?: string; arguments?: Record } + | undefined + if (!params?.name) { + throw new McpError(ErrorCode.InvalidParams, 'Tool name required') + } + + const result = await handleToolsCall( + { + name: params.name, + arguments: params.arguments, + }, + authResult.userId, + abortSignal + ) + + trackMcpCopilotCall(authResult.userId) + + return result + }) + + return server +} + +async function handleMcpRequestWithSdk( + request: NextRequest, + parsedBody: unknown +): Promise { + const server = buildMcpServer(request.signal) + const transport = new StreamableHTTPServerTransport({ + sessionIdGenerator: undefined, + enableJsonResponse: true, + }) + + const responseCapture = new NextResponseCapture() + const requestAdapter = { + method: request.method, + headers: normalizeRequestHeaders(request), + } + + await server.connect(transport) + + try { + await transport.handleRequest(requestAdapter as any, responseCapture as any, parsedBody) + await responseCapture.waitForHeaders() + // Must exceed the longest possible tool execution (build = 5 min). + // Using ORCHESTRATION_TIMEOUT_MS + 60 s buffer so the orchestrator can + // finish or time-out on its own before the transport is torn down. + await responseCapture.waitForEnd(ORCHESTRATION_TIMEOUT_MS + 60_000) + return responseCapture.toNextResponse() + } finally { + await server.close().catch(() => {}) + await transport.close().catch(() => {}) + } +} + +export async function GET() { + // Return 405 to signal that server-initiated SSE notifications are not + // supported. Without this, clients like mcp-remote will repeatedly + // reconnect trying to open an SSE stream, flooding the logs with GETs. + return new NextResponse(null, { status: 405 }) +} + +export async function POST(request: NextRequest) { + try { + let parsedBody: unknown + + try { + parsedBody = await request.json() + } catch { + return NextResponse.json(createError(0, ErrorCode.ParseError, 'Invalid JSON body'), { + status: 400, + }) + } + + return await handleMcpRequestWithSdk(request, parsedBody) + } catch (error) { + logger.error('Error handling MCP request', { error }) + return NextResponse.json(createError(0, ErrorCode.InternalError, 'Internal error'), { + status: 500, + }) + } +} + +export async function DELETE(request: NextRequest) { + void request + return NextResponse.json(createError(0, -32000, 'Method not allowed.'), { status: 405 }) +} + +/** + * Increment MCP copilot call counter in userStats (fire-and-forget). + */ +function trackMcpCopilotCall(userId: string): void { + db.update(userStats) + .set({ + totalMcpCopilotCalls: sql`total_mcp_copilot_calls + 1`, + lastActive: new Date(), + }) + .where(eq(userStats.userId, userId)) + .then(() => {}) + .catch((error) => { + logger.error('Failed to track MCP copilot call', { error, userId }) + }) +} + +async function handleToolsCall( + params: { name: string; arguments?: Record }, + userId: string, + abortSignal?: AbortSignal +): Promise { + const args = params.arguments || {} + + const directTool = DIRECT_TOOL_DEFS.find((tool) => tool.name === params.name) + if (directTool) { + return handleDirectToolCall(directTool, args, userId) + } + + const subagentTool = SUBAGENT_TOOL_DEFS.find((tool) => tool.name === params.name) + if (subagentTool) { + return handleSubagentToolCall(subagentTool, args, userId, abortSignal) + } + + throw new McpError(ErrorCode.MethodNotFound, `Tool not found: ${params.name}`) +} + +async function handleDirectToolCall( + toolDef: (typeof DIRECT_TOOL_DEFS)[number], + args: Record, + userId: string +): Promise { + try { + const execContext = await prepareExecutionContext(userId, (args.workflowId as string) || '') + + const toolCall = { + id: randomUUID(), + name: toolDef.toolId, + status: 'pending' as const, + params: args as Record, + startTime: Date.now(), + } + + const result = await executeToolServerSide(toolCall, execContext) + + return { + content: [ + { + type: 'text', + text: JSON.stringify(result.output ?? result, null, 2), + }, + ], + isError: !result.success, + } + } catch (error) { + logger.error('Direct tool execution failed', { tool: toolDef.name, error }) + return { + content: [ + { + type: 'text', + text: `Tool execution failed: ${error instanceof Error ? error.message : String(error)}`, + }, + ], + isError: true, + } + } +} + +/** + * Build mode uses the main chat orchestrator with the 'fast' command instead of + * the subagent endpoint. In Go, 'build' is not a registered subagent — it's a mode + * (ModeFast) on the main chat processor that bypasses subagent orchestration and + * executes all tools directly. + */ +async function handleBuildToolCall( + args: Record, + userId: string, + abortSignal?: AbortSignal +): Promise { + try { + const requestText = (args.request as string) || JSON.stringify(args) + const { model } = getCopilotModel('chat') + const workflowId = args.workflowId as string | undefined + + const resolved = workflowId ? { workflowId } : await resolveWorkflowIdForUser(userId) + + if (!resolved?.workflowId) { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + success: false, + error: 'workflowId is required for build. Call create_workflow first.', + }, + null, + 2 + ), + }, + ], + isError: true, + } + } + + const chatId = randomUUID() + + const requestPayload = { + message: requestText, + workflowId: resolved.workflowId, + userId, + model, + mode: 'agent', + commands: ['fast'], + messageId: randomUUID(), + version: SIM_AGENT_VERSION, + headless: true, + chatId, + source: 'mcp', + } + + const result = await orchestrateCopilotStream(requestPayload, { + userId, + workflowId: resolved.workflowId, + chatId, + autoExecuteTools: true, + timeout: 300000, + interactive: false, + abortSignal, + }) + + const responseData = { + success: result.success, + content: result.content, + toolCalls: result.toolCalls, + error: result.error, + } + + return { + content: [{ type: 'text', text: JSON.stringify(responseData, null, 2) }], + isError: !result.success, + } + } catch (error) { + logger.error('Build tool call failed', { error }) + return { + content: [ + { + type: 'text', + text: `Build failed: ${error instanceof Error ? error.message : String(error)}`, + }, + ], + isError: true, + } + } +} + +async function handleSubagentToolCall( + toolDef: (typeof SUBAGENT_TOOL_DEFS)[number], + args: Record, + userId: string, + abortSignal?: AbortSignal +): Promise { + if (toolDef.agentId === 'build') { + return handleBuildToolCall(args, userId, abortSignal) + } + + try { + const requestText = + (args.request as string) || + (args.message as string) || + (args.error as string) || + JSON.stringify(args) + + const context = (args.context as Record) || {} + if (args.plan && !context.plan) { + context.plan = args.plan + } + + const { model } = getCopilotModel('chat') + + const result = await orchestrateSubagentStream( + toolDef.agentId, + { + message: requestText, + workflowId: args.workflowId, + workspaceId: args.workspaceId, + context, + model, + headless: true, + source: 'mcp', + }, + { + userId, + workflowId: args.workflowId as string | undefined, + workspaceId: args.workspaceId as string | undefined, + abortSignal, + } + ) + + let responseData: unknown + + if (result.structuredResult) { + responseData = { + success: result.structuredResult.success ?? result.success, + type: result.structuredResult.type, + summary: result.structuredResult.summary, + data: result.structuredResult.data, + } + } else if (result.error) { + responseData = { + success: false, + error: result.error, + errors: result.errors, + } + } else { + responseData = { + success: result.success, + content: result.content, + } + } + + return { + content: [ + { + type: 'text', + text: JSON.stringify(responseData, null, 2), + }, + ], + isError: !result.success, + } + } catch (error) { + logger.error('Subagent tool call failed', { + tool: toolDef.name, + agentId: toolDef.agentId, + error, + }) + + return { + content: [ + { + type: 'text', + text: `Subagent call failed: ${error instanceof Error ? error.message : String(error)}`, + }, + ], + isError: true, + } + } +} diff --git a/apps/sim/app/api/mcp/events/route.test.ts b/apps/sim/app/api/mcp/events/route.test.ts new file mode 100644 index 000000000..f3db4d575 --- /dev/null +++ b/apps/sim/app/api/mcp/events/route.test.ts @@ -0,0 +1,98 @@ +/** + * Tests for MCP SSE events endpoint + * + * @vitest-environment node + */ +import { createMockRequest, mockAuth, mockConsoleLogger } from '@sim/testing' +import { beforeEach, describe, expect, it, vi } from 'vitest' + +mockConsoleLogger() +const auth = mockAuth() + +const mockGetUserEntityPermissions = vi.fn() +vi.doMock('@/lib/workspaces/permissions/utils', () => ({ + getUserEntityPermissions: mockGetUserEntityPermissions, +})) + +vi.doMock('@/lib/mcp/connection-manager', () => ({ + mcpConnectionManager: null, +})) + +vi.doMock('@/lib/mcp/pubsub', () => ({ + mcpPubSub: null, +})) + +const { GET } = await import('./route') + +describe('MCP Events SSE Endpoint', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('returns 401 when session is missing', async () => { + auth.setUnauthenticated() + + const request = createMockRequest( + 'GET', + undefined, + {}, + 'http://localhost:3000/api/mcp/events?workspaceId=ws-123' + ) + + const response = await GET(request as any) + + expect(response.status).toBe(401) + const text = await response.text() + expect(text).toBe('Unauthorized') + }) + + it('returns 400 when workspaceId is missing', async () => { + auth.setAuthenticated() + + const request = createMockRequest('GET', undefined, {}, 'http://localhost:3000/api/mcp/events') + + const response = await GET(request as any) + + expect(response.status).toBe(400) + const text = await response.text() + expect(text).toBe('Missing workspaceId query parameter') + }) + + it('returns 403 when user lacks workspace access', async () => { + auth.setAuthenticated() + mockGetUserEntityPermissions.mockResolvedValue(null) + + const request = createMockRequest( + 'GET', + undefined, + {}, + 'http://localhost:3000/api/mcp/events?workspaceId=ws-123' + ) + + const response = await GET(request as any) + + expect(response.status).toBe(403) + const text = await response.text() + expect(text).toBe('Access denied to workspace') + expect(mockGetUserEntityPermissions).toHaveBeenCalledWith('user-123', 'workspace', 'ws-123') + }) + + it('returns SSE stream when authorized', async () => { + auth.setAuthenticated() + mockGetUserEntityPermissions.mockResolvedValue({ read: true }) + + const request = createMockRequest( + 'GET', + undefined, + {}, + 'http://localhost:3000/api/mcp/events?workspaceId=ws-123' + ) + + const response = await GET(request as any) + + expect(response.status).toBe(200) + expect(response.headers.get('Content-Type')).toBe('text/event-stream') + expect(response.headers.get('Cache-Control')).toBe('no-cache') + expect(response.headers.get('Connection')).toBe('keep-alive') + }) +}) diff --git a/apps/sim/app/api/mcp/events/route.ts b/apps/sim/app/api/mcp/events/route.ts new file mode 100644 index 000000000..6df91db5c --- /dev/null +++ b/apps/sim/app/api/mcp/events/route.ts @@ -0,0 +1,111 @@ +/** + * SSE endpoint for MCP tool-change events. + * + * Pushes `tools_changed` events to the browser when: + * - An external MCP server sends `notifications/tools/list_changed` (via connection manager) + * - A workflow CRUD route modifies workflow MCP server tools (via pub/sub) + * + * Auth is handled via session cookies (EventSource sends cookies automatically). + */ + +import { createLogger } from '@sim/logger' +import type { NextRequest } from 'next/server' +import { getSession } from '@/lib/auth' +import { SSE_HEADERS } from '@/lib/core/utils/sse' +import { mcpConnectionManager } from '@/lib/mcp/connection-manager' +import { mcpPubSub } from '@/lib/mcp/pubsub' +import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils' + +const logger = createLogger('McpEventsSSE') + +export const dynamic = 'force-dynamic' + +const HEARTBEAT_INTERVAL_MS = 30_000 + +export async function GET(request: NextRequest) { + const session = await getSession() + if (!session?.user?.id) { + return new Response('Unauthorized', { status: 401 }) + } + + const { searchParams } = new URL(request.url) + const workspaceId = searchParams.get('workspaceId') + if (!workspaceId) { + return new Response('Missing workspaceId query parameter', { status: 400 }) + } + + const permissions = await getUserEntityPermissions(session.user.id, 'workspace', workspaceId) + if (!permissions) { + return new Response('Access denied to workspace', { status: 403 }) + } + + const encoder = new TextEncoder() + const unsubscribers: Array<() => void> = [] + + const stream = new ReadableStream({ + start(controller) { + const send = (eventName: string, data: Record) => { + try { + controller.enqueue( + encoder.encode(`event: ${eventName}\ndata: ${JSON.stringify(data)}\n\n`) + ) + } catch { + // Stream already closed + } + } + + // Subscribe to external MCP server tool changes + if (mcpConnectionManager) { + const unsub = mcpConnectionManager.subscribe((event) => { + if (event.workspaceId !== workspaceId) return + send('tools_changed', { + source: 'external', + serverId: event.serverId, + timestamp: event.timestamp, + }) + }) + unsubscribers.push(unsub) + } + + // Subscribe to workflow CRUD tool changes + if (mcpPubSub) { + const unsub = mcpPubSub.onWorkflowToolsChanged((event) => { + if (event.workspaceId !== workspaceId) return + send('tools_changed', { + source: 'workflow', + serverId: event.serverId, + timestamp: Date.now(), + }) + }) + unsubscribers.push(unsub) + } + + // Heartbeat to keep the connection alive + const heartbeat = setInterval(() => { + try { + controller.enqueue(encoder.encode(': heartbeat\n\n')) + } catch { + clearInterval(heartbeat) + } + }, HEARTBEAT_INTERVAL_MS) + unsubscribers.push(() => clearInterval(heartbeat)) + + // Cleanup when client disconnects + request.signal.addEventListener('abort', () => { + for (const unsub of unsubscribers) { + unsub() + } + try { + controller.close() + } catch { + // Already closed + } + logger.info(`SSE connection closed for workspace ${workspaceId}`) + }) + + logger.info(`SSE connection opened for workspace ${workspaceId}`) + }, + }) + + return new Response(stream, { headers: SSE_HEADERS }) +} diff --git a/apps/sim/app/api/mcp/workflow-servers/[id]/route.ts b/apps/sim/app/api/mcp/workflow-servers/[id]/route.ts index 3ce0e0045..e0a1f085e 100644 --- a/apps/sim/app/api/mcp/workflow-servers/[id]/route.ts +++ b/apps/sim/app/api/mcp/workflow-servers/[id]/route.ts @@ -4,6 +4,7 @@ import { createLogger } from '@sim/logger' import { and, eq } from 'drizzle-orm' import type { NextRequest } from 'next/server' import { getParsedBody, withMcpAuth } from '@/lib/mcp/middleware' +import { mcpPubSub } from '@/lib/mcp/pubsub' import { createMcpErrorResponse, createMcpSuccessResponse } from '@/lib/mcp/utils' const logger = createLogger('WorkflowMcpServerAPI') @@ -146,6 +147,8 @@ export const DELETE = withMcpAuth('admin')( logger.info(`[${requestId}] Successfully deleted workflow MCP server: ${serverId}`) + mcpPubSub?.publishWorkflowToolsChanged({ serverId, workspaceId }) + return createMcpSuccessResponse({ message: `Server ${serverId} deleted successfully` }) } catch (error) { logger.error(`[${requestId}] Error deleting workflow MCP server:`, error) diff --git a/apps/sim/app/api/mcp/workflow-servers/[id]/tools/[toolId]/route.ts b/apps/sim/app/api/mcp/workflow-servers/[id]/tools/[toolId]/route.ts index d7fd53259..87113b868 100644 --- a/apps/sim/app/api/mcp/workflow-servers/[id]/tools/[toolId]/route.ts +++ b/apps/sim/app/api/mcp/workflow-servers/[id]/tools/[toolId]/route.ts @@ -4,6 +4,7 @@ import { createLogger } from '@sim/logger' import { and, eq } from 'drizzle-orm' import type { NextRequest } from 'next/server' import { getParsedBody, withMcpAuth } from '@/lib/mcp/middleware' +import { mcpPubSub } from '@/lib/mcp/pubsub' import { createMcpErrorResponse, createMcpSuccessResponse } from '@/lib/mcp/utils' import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema' @@ -115,6 +116,8 @@ export const PATCH = withMcpAuth('write')( logger.info(`[${requestId}] Successfully updated tool ${toolId}`) + mcpPubSub?.publishWorkflowToolsChanged({ serverId, workspaceId }) + return createMcpSuccessResponse({ tool: updatedTool }) } catch (error) { logger.error(`[${requestId}] Error updating tool:`, error) @@ -160,6 +163,8 @@ export const DELETE = withMcpAuth('write')( logger.info(`[${requestId}] Successfully deleted tool ${toolId}`) + mcpPubSub?.publishWorkflowToolsChanged({ serverId, workspaceId }) + return createMcpSuccessResponse({ message: `Tool ${toolId} deleted successfully` }) } catch (error) { logger.error(`[${requestId}] Error deleting tool:`, error) diff --git a/apps/sim/app/api/mcp/workflow-servers/[id]/tools/route.ts b/apps/sim/app/api/mcp/workflow-servers/[id]/tools/route.ts index b2cef8ee5..6705d5298 100644 --- a/apps/sim/app/api/mcp/workflow-servers/[id]/tools/route.ts +++ b/apps/sim/app/api/mcp/workflow-servers/[id]/tools/route.ts @@ -4,6 +4,7 @@ import { createLogger } from '@sim/logger' import { and, eq } from 'drizzle-orm' import type { NextRequest } from 'next/server' import { getParsedBody, withMcpAuth } from '@/lib/mcp/middleware' +import { mcpPubSub } from '@/lib/mcp/pubsub' import { createMcpErrorResponse, createMcpSuccessResponse } from '@/lib/mcp/utils' import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema' import { hasValidStartBlock } from '@/lib/workflows/triggers/trigger-utils.server' @@ -188,6 +189,8 @@ export const POST = withMcpAuth('write')( `[${requestId}] Successfully added tool ${toolName} (workflow: ${body.workflowId}) to server ${serverId}` ) + mcpPubSub?.publishWorkflowToolsChanged({ serverId, workspaceId }) + return createMcpSuccessResponse({ tool }, 201) } catch (error) { logger.error(`[${requestId}] Error adding tool:`, error) diff --git a/apps/sim/app/api/mcp/workflow-servers/route.ts b/apps/sim/app/api/mcp/workflow-servers/route.ts index e2900f5a8..1779e51a9 100644 --- a/apps/sim/app/api/mcp/workflow-servers/route.ts +++ b/apps/sim/app/api/mcp/workflow-servers/route.ts @@ -4,6 +4,7 @@ import { createLogger } from '@sim/logger' import { eq, inArray, sql } from 'drizzle-orm' import type { NextRequest } from 'next/server' import { getParsedBody, withMcpAuth } from '@/lib/mcp/middleware' +import { mcpPubSub } from '@/lib/mcp/pubsub' import { createMcpErrorResponse, createMcpSuccessResponse } from '@/lib/mcp/utils' import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema' import { hasValidStartBlock } from '@/lib/workflows/triggers/trigger-utils.server' @@ -174,6 +175,10 @@ export const POST = withMcpAuth('write')( `[${requestId}] Added ${addedTools.length} tools to server ${serverId}:`, addedTools.map((t) => t.toolName) ) + + if (addedTools.length > 0) { + mcpPubSub?.publishWorkflowToolsChanged({ serverId, workspaceId }) + } } logger.info( diff --git a/apps/sim/app/api/tools/jira/add-attachment/route.ts b/apps/sim/app/api/tools/jira/add-attachment/route.ts index 52b36b24a..63b031032 100644 --- a/apps/sim/app/api/tools/jira/add-attachment/route.ts +++ b/apps/sim/app/api/tools/jira/add-attachment/route.ts @@ -90,16 +90,24 @@ export async function POST(request: NextRequest) { ) } - const attachments = await response.json() - const attachmentIds = Array.isArray(attachments) - ? attachments.map((attachment) => attachment.id).filter(Boolean) - : [] + const jiraAttachments = await response.json() + const attachmentsList = Array.isArray(jiraAttachments) ? jiraAttachments : [] + + const attachmentIds = attachmentsList.map((att: any) => att.id).filter(Boolean) + const attachments = attachmentsList.map((att: any) => ({ + id: att.id ?? '', + filename: att.filename ?? '', + mimeType: att.mimeType ?? '', + size: att.size ?? 0, + content: att.content ?? '', + })) return NextResponse.json({ success: true, output: { ts: new Date().toISOString(), issueKey: validatedData.issueKey, + attachments, attachmentIds, files: filesOutput, }, diff --git a/apps/sim/app/api/tools/jira/issue/route.ts b/apps/sim/app/api/tools/jira/issue/route.ts deleted file mode 100644 index 3c837de04..000000000 --- a/apps/sim/app/api/tools/jira/issue/route.ts +++ /dev/null @@ -1,111 +0,0 @@ -import { createLogger } from '@sim/logger' -import { type NextRequest, NextResponse } from 'next/server' -import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' -import { validateJiraCloudId, validateJiraIssueKey } from '@/lib/core/security/input-validation' -import { getJiraCloudId } from '@/tools/jira/utils' - -export const dynamic = 'force-dynamic' - -const logger = createLogger('JiraIssueAPI') - -export async function POST(request: NextRequest) { - try { - const auth = await checkSessionOrInternalAuth(request) - if (!auth.success || !auth.userId) { - return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) - } - - const { domain, accessToken, issueId, cloudId: providedCloudId } = await request.json() - if (!domain) { - logger.error('Missing domain in request') - return NextResponse.json({ error: 'Domain is required' }, { status: 400 }) - } - - if (!accessToken) { - logger.error('Missing access token in request') - return NextResponse.json({ error: 'Access token is required' }, { status: 400 }) - } - - if (!issueId) { - logger.error('Missing issue ID in request') - return NextResponse.json({ error: 'Issue ID is required' }, { status: 400 }) - } - - const cloudId = providedCloudId || (await getJiraCloudId(domain, accessToken)) - logger.info('Using cloud ID:', cloudId) - - const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId') - if (!cloudIdValidation.isValid) { - return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) - } - - const issueIdValidation = validateJiraIssueKey(issueId, 'issueId') - if (!issueIdValidation.isValid) { - return NextResponse.json({ error: issueIdValidation.error }, { status: 400 }) - } - - const url = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${issueId}` - - logger.info('Fetching Jira issue from:', url) - - const response = await fetch(url, { - method: 'GET', - headers: { - Authorization: `Bearer ${accessToken}`, - Accept: 'application/json', - }, - }) - - if (!response.ok) { - logger.error('Jira API error:', { - status: response.status, - statusText: response.statusText, - }) - - let errorMessage - try { - const errorData = await response.json() - logger.error('Error details:', errorData) - errorMessage = errorData.message || `Failed to fetch issue (${response.status})` - } catch (_e) { - errorMessage = `Failed to fetch issue: ${response.status} ${response.statusText}` - } - return NextResponse.json({ error: errorMessage }, { status: response.status }) - } - - const data = await response.json() - logger.info('Successfully fetched issue:', data.key) - - const issueInfo: any = { - id: data.key, - name: data.fields.summary, - mimeType: 'jira/issue', - url: `https://${domain}/browse/${data.key}`, - modifiedTime: data.fields.updated, - webViewLink: `https://${domain}/browse/${data.key}`, - status: data.fields.status?.name, - description: data.fields.description, - priority: data.fields.priority?.name, - assignee: data.fields.assignee?.displayName, - reporter: data.fields.reporter?.displayName, - project: { - key: data.fields.project?.key, - name: data.fields.project?.name, - }, - } - - return NextResponse.json({ - issue: issueInfo, - cloudId, - }) - } catch (error) { - logger.error('Error processing request:', error) - return NextResponse.json( - { - error: 'Failed to retrieve Jira issue', - details: (error as Error).message, - }, - { status: 500 } - ) - } -} diff --git a/apps/sim/app/api/tools/jira/update/route.ts b/apps/sim/app/api/tools/jira/update/route.ts index d4ad86af6..c77dceb41 100644 --- a/apps/sim/app/api/tools/jira/update/route.ts +++ b/apps/sim/app/api/tools/jira/update/route.ts @@ -16,9 +16,16 @@ const jiraUpdateSchema = z.object({ summary: z.string().optional(), title: z.string().optional(), description: z.string().optional(), - status: z.string().optional(), priority: z.string().optional(), assignee: z.string().optional(), + labels: z.array(z.string()).optional(), + components: z.array(z.string()).optional(), + duedate: z.string().optional(), + fixVersions: z.array(z.string()).optional(), + environment: z.string().optional(), + customFieldId: z.string().optional(), + customFieldValue: z.string().optional(), + notifyUsers: z.boolean().optional(), cloudId: z.string().optional(), }) @@ -45,9 +52,16 @@ export async function PUT(request: NextRequest) { summary, title, description, - status, priority, assignee, + labels, + components, + duedate, + fixVersions, + environment, + customFieldId, + customFieldValue, + notifyUsers, cloudId: providedCloudId, } = validation.data @@ -64,7 +78,8 @@ export async function PUT(request: NextRequest) { return NextResponse.json({ error: issueKeyValidation.error }, { status: 400 }) } - const url = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${issueKey}` + const notifyParam = notifyUsers === false ? '?notifyUsers=false' : '' + const url = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${issueKey}${notifyParam}` logger.info('Updating Jira issue at:', url) @@ -93,24 +108,65 @@ export async function PUT(request: NextRequest) { } } - if (status !== undefined && status !== null && status !== '') { - fields.status = { - name: status, - } - } - if (priority !== undefined && priority !== null && priority !== '') { - fields.priority = { - name: priority, - } + const isNumericId = /^\d+$/.test(priority) + fields.priority = isNumericId ? { id: priority } : { name: priority } } if (assignee !== undefined && assignee !== null && assignee !== '') { fields.assignee = { - id: assignee, + accountId: assignee, } } + if (labels !== undefined && labels !== null && labels.length > 0) { + fields.labels = labels + } + + if (components !== undefined && components !== null && components.length > 0) { + fields.components = components.map((name) => ({ name })) + } + + if (duedate !== undefined && duedate !== null && duedate !== '') { + fields.duedate = duedate + } + + if (fixVersions !== undefined && fixVersions !== null && fixVersions.length > 0) { + fields.fixVersions = fixVersions.map((name) => ({ name })) + } + + if (environment !== undefined && environment !== null && environment !== '') { + fields.environment = { + type: 'doc', + version: 1, + content: [ + { + type: 'paragraph', + content: [ + { + type: 'text', + text: environment, + }, + ], + }, + ], + } + } + + if ( + customFieldId !== undefined && + customFieldId !== null && + customFieldId !== '' && + customFieldValue !== undefined && + customFieldValue !== null && + customFieldValue !== '' + ) { + const fieldId = customFieldId.startsWith('customfield_') + ? customFieldId + : `customfield_${customFieldId}` + fields[fieldId] = customFieldValue + } + const requestBody = { fields } const response = await fetch(url, { diff --git a/apps/sim/app/api/tools/jira/write/route.ts b/apps/sim/app/api/tools/jira/write/route.ts index 61ec34e01..cf3168e75 100644 --- a/apps/sim/app/api/tools/jira/write/route.ts +++ b/apps/sim/app/api/tools/jira/write/route.ts @@ -32,6 +32,8 @@ export async function POST(request: NextRequest) { environment, customFieldId, customFieldValue, + components, + fixVersions, } = await request.json() if (!domain) { @@ -73,10 +75,9 @@ export async function POST(request: NextRequest) { logger.info('Creating Jira issue at:', url) + const isNumericProjectId = /^\d+$/.test(projectId) const fields: Record = { - project: { - id: projectId, - }, + project: isNumericProjectId ? { id: projectId } : { key: projectId }, issuetype: { name: normalizedIssueType, }, @@ -114,13 +115,31 @@ export async function POST(request: NextRequest) { fields.labels = labels } + if ( + components !== undefined && + components !== null && + Array.isArray(components) && + components.length > 0 + ) { + fields.components = components.map((name: string) => ({ name })) + } + if (duedate !== undefined && duedate !== null && duedate !== '') { fields.duedate = duedate } + if ( + fixVersions !== undefined && + fixVersions !== null && + Array.isArray(fixVersions) && + fixVersions.length > 0 + ) { + fields.fixVersions = fixVersions.map((name: string) => ({ name })) + } + if (reporter !== undefined && reporter !== null && reporter !== '') { fields.reporter = { - id: reporter, + accountId: reporter, } } @@ -220,8 +239,10 @@ export async function POST(request: NextRequest) { success: true, output: { ts: new Date().toISOString(), + id: responseData.id || '', issueKey: issueKey, - summary: responseData.fields?.summary || 'Issue created', + self: responseData.self || '', + summary: responseData.fields?.summary || summary || 'Issue created', success: true, url: `https://${domain}/browse/${issueKey}`, ...(assigneeId && { assigneeId }), diff --git a/apps/sim/app/api/tools/jsm/approvals/route.ts b/apps/sim/app/api/tools/jsm/approvals/route.ts index 08e51725a..e579121e8 100644 --- a/apps/sim/app/api/tools/jsm/approvals/route.ts +++ b/apps/sim/app/api/tools/jsm/approvals/route.ts @@ -165,8 +165,26 @@ export async function POST(request: NextRequest) { issueIdOrKey, approvalId, decision, - success: true, + id: data.id ?? null, + name: data.name ?? null, + finalDecision: data.finalDecision ?? null, + canAnswerApproval: data.canAnswerApproval ?? null, + approvers: (data.approvers ?? []).map((a: Record) => { + const approver = a.approver as Record | undefined + return { + approver: { + accountId: approver?.accountId ?? null, + displayName: approver?.displayName ?? null, + emailAddress: approver?.emailAddress ?? null, + active: approver?.active ?? null, + }, + approverDecision: a.approverDecision ?? null, + } + }), + createdDate: data.createdDate ?? null, + completedDate: data.completedDate ?? null, approval: data, + success: true, }, }) } diff --git a/apps/sim/app/api/tools/jsm/comment/route.ts b/apps/sim/app/api/tools/jsm/comment/route.ts index ab2e3b1e5..946a17bb2 100644 --- a/apps/sim/app/api/tools/jsm/comment/route.ts +++ b/apps/sim/app/api/tools/jsm/comment/route.ts @@ -95,6 +95,14 @@ export async function POST(request: NextRequest) { commentId: data.id, body: data.body, isPublic: data.public, + author: data.author + ? { + accountId: data.author.accountId ?? null, + displayName: data.author.displayName ?? null, + emailAddress: data.author.emailAddress ?? null, + } + : null, + createdDate: data.created ?? null, success: true, }, }) diff --git a/apps/sim/app/api/tools/jsm/comments/route.ts b/apps/sim/app/api/tools/jsm/comments/route.ts index a2ca2c47d..d68c51b8b 100644 --- a/apps/sim/app/api/tools/jsm/comments/route.ts +++ b/apps/sim/app/api/tools/jsm/comments/route.ts @@ -23,6 +23,7 @@ export async function POST(request: NextRequest) { issueIdOrKey, isPublic, internal, + expand, start, limit, } = body @@ -57,8 +58,9 @@ export async function POST(request: NextRequest) { const baseUrl = getJsmApiBaseUrl(cloudId) const params = new URLSearchParams() - if (isPublic) params.append('public', isPublic) - if (internal) params.append('internal', internal) + if (isPublic !== undefined) params.append('public', String(isPublic)) + if (internal !== undefined) params.append('internal', String(internal)) + if (expand) params.append('expand', expand) if (start) params.append('start', start) if (limit) params.append('limit', limit) diff --git a/apps/sim/app/api/tools/jsm/customers/route.ts b/apps/sim/app/api/tools/jsm/customers/route.ts index f05d39187..cf9fcf7e6 100644 --- a/apps/sim/app/api/tools/jsm/customers/route.ts +++ b/apps/sim/app/api/tools/jsm/customers/route.ts @@ -24,6 +24,7 @@ export async function POST(request: NextRequest) { query, start, limit, + accountIds, emails, } = body @@ -56,24 +57,27 @@ export async function POST(request: NextRequest) { const baseUrl = getJsmApiBaseUrl(cloudId) - const parsedEmails = emails - ? typeof emails === 'string' - ? emails + const rawIds = accountIds || emails + const parsedAccountIds = rawIds + ? typeof rawIds === 'string' + ? rawIds .split(',') - .map((email: string) => email.trim()) - .filter((email: string) => email) - : emails + .map((id: string) => id.trim()) + .filter((id: string) => id) + : Array.isArray(rawIds) + ? rawIds + : [] : [] - const isAddOperation = parsedEmails.length > 0 + const isAddOperation = parsedAccountIds.length > 0 if (isAddOperation) { const url = `${baseUrl}/servicedesk/${serviceDeskId}/customer` - logger.info('Adding customers to:', url, { emails: parsedEmails }) + logger.info('Adding customers to:', url, { accountIds: parsedAccountIds }) const requestBody: Record = { - usernames: parsedEmails, + accountIds: parsedAccountIds, } const response = await fetch(url, { diff --git a/apps/sim/app/api/tools/jsm/request/route.ts b/apps/sim/app/api/tools/jsm/request/route.ts index 92e5e9f4c..ae5b150b5 100644 --- a/apps/sim/app/api/tools/jsm/request/route.ts +++ b/apps/sim/app/api/tools/jsm/request/route.ts @@ -31,6 +31,9 @@ export async function POST(request: NextRequest) { description, raiseOnBehalfOf, requestFieldValues, + requestParticipants, + channel, + expand, } = body if (!domain) { @@ -80,6 +83,19 @@ export async function POST(request: NextRequest) { if (raiseOnBehalfOf) { requestBody.raiseOnBehalfOf = raiseOnBehalfOf } + if (requestParticipants) { + requestBody.requestParticipants = Array.isArray(requestParticipants) + ? requestParticipants + : typeof requestParticipants === 'string' + ? requestParticipants + .split(',') + .map((id: string) => id.trim()) + .filter(Boolean) + : [] + } + if (channel) { + requestBody.channel = channel + } const response = await fetch(url, { method: 'POST', @@ -111,6 +127,21 @@ export async function POST(request: NextRequest) { issueKey: data.issueKey, requestTypeId: data.requestTypeId, serviceDeskId: data.serviceDeskId, + createdDate: data.createdDate ?? null, + currentStatus: data.currentStatus + ? { + status: data.currentStatus.status ?? null, + statusCategory: data.currentStatus.statusCategory ?? null, + statusDate: data.currentStatus.statusDate ?? null, + } + : null, + reporter: data.reporter + ? { + accountId: data.reporter.accountId ?? null, + displayName: data.reporter.displayName ?? null, + emailAddress: data.reporter.emailAddress ?? null, + } + : null, success: true, url: `https://${domain}/browse/${data.issueKey}`, }, @@ -126,7 +157,10 @@ export async function POST(request: NextRequest) { return NextResponse.json({ error: issueIdOrKeyValidation.error }, { status: 400 }) } - const url = `${baseUrl}/request/${issueIdOrKey}` + const params = new URLSearchParams() + if (expand) params.append('expand', expand) + + const url = `${baseUrl}/request/${issueIdOrKey}${params.toString() ? `?${params.toString()}` : ''}` logger.info('Fetching request from:', url) @@ -155,6 +189,32 @@ export async function POST(request: NextRequest) { success: true, output: { ts: new Date().toISOString(), + issueId: data.issueId ?? null, + issueKey: data.issueKey ?? null, + requestTypeId: data.requestTypeId ?? null, + serviceDeskId: data.serviceDeskId ?? null, + createdDate: data.createdDate ?? null, + currentStatus: data.currentStatus + ? { + status: data.currentStatus.status ?? null, + statusCategory: data.currentStatus.statusCategory ?? null, + statusDate: data.currentStatus.statusDate ?? null, + } + : null, + reporter: data.reporter + ? { + accountId: data.reporter.accountId ?? null, + displayName: data.reporter.displayName ?? null, + emailAddress: data.reporter.emailAddress ?? null, + active: data.reporter.active ?? true, + } + : null, + requestFieldValues: (data.requestFieldValues ?? []).map((fv: Record) => ({ + fieldId: fv.fieldId ?? null, + label: fv.label ?? null, + value: fv.value ?? null, + })), + url: `https://${domain}/browse/${data.issueKey}`, request: data, }, }) diff --git a/apps/sim/app/api/tools/jsm/requests/route.ts b/apps/sim/app/api/tools/jsm/requests/route.ts index f2f0dc0e7..70a4cc8ce 100644 --- a/apps/sim/app/api/tools/jsm/requests/route.ts +++ b/apps/sim/app/api/tools/jsm/requests/route.ts @@ -1,7 +1,11 @@ import { createLogger } from '@sim/logger' import { type NextRequest, NextResponse } from 'next/server' import { checkInternalAuth } from '@/lib/auth/hybrid' -import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation' +import { + validateAlphanumericId, + validateEnum, + validateJiraCloudId, +} from '@/lib/core/security/input-validation' import { getJiraCloudId, getJsmApiBaseUrl, getJsmHeaders } from '@/tools/jsm/utils' export const dynamic = 'force-dynamic' @@ -23,7 +27,9 @@ export async function POST(request: NextRequest) { serviceDeskId, requestOwnership, requestStatus, + requestTypeId, searchTerm, + expand, start, limit, } = body @@ -52,17 +58,45 @@ export async function POST(request: NextRequest) { } } + const VALID_REQUEST_OWNERSHIP = [ + 'OWNED_REQUESTS', + 'PARTICIPATED_REQUESTS', + 'APPROVER', + 'ALL_REQUESTS', + ] as const + const VALID_REQUEST_STATUS = ['OPEN_REQUESTS', 'CLOSED_REQUESTS', 'ALL_REQUESTS'] as const + + if (requestOwnership) { + const ownershipValidation = validateEnum( + requestOwnership, + VALID_REQUEST_OWNERSHIP, + 'requestOwnership' + ) + if (!ownershipValidation.isValid) { + return NextResponse.json({ error: ownershipValidation.error }, { status: 400 }) + } + } + + if (requestStatus) { + const statusValidation = validateEnum(requestStatus, VALID_REQUEST_STATUS, 'requestStatus') + if (!statusValidation.isValid) { + return NextResponse.json({ error: statusValidation.error }, { status: 400 }) + } + } + const baseUrl = getJsmApiBaseUrl(cloudId) const params = new URLSearchParams() if (serviceDeskId) params.append('serviceDeskId', serviceDeskId) - if (requestOwnership && requestOwnership !== 'ALL_REQUESTS') { + if (requestOwnership) { params.append('requestOwnership', requestOwnership) } - if (requestStatus && requestStatus !== 'ALL') { + if (requestStatus) { params.append('requestStatus', requestStatus) } + if (requestTypeId) params.append('requestTypeId', requestTypeId) if (searchTerm) params.append('searchTerm', searchTerm) + if (expand) params.append('expand', expand) if (start) params.append('start', start) if (limit) params.append('limit', limit) diff --git a/apps/sim/app/api/tools/jsm/requesttypefields/route.ts b/apps/sim/app/api/tools/jsm/requesttypefields/route.ts new file mode 100644 index 000000000..5e86337ae --- /dev/null +++ b/apps/sim/app/api/tools/jsm/requesttypefields/route.ts @@ -0,0 +1,119 @@ +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { checkInternalAuth } from '@/lib/auth/hybrid' +import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation' +import { getJiraCloudId, getJsmApiBaseUrl, getJsmHeaders } from '@/tools/jsm/utils' + +export const dynamic = 'force-dynamic' + +const logger = createLogger('JsmRequestTypeFieldsAPI') + +export async function POST(request: NextRequest) { + const auth = await checkInternalAuth(request) + if (!auth.success || !auth.userId) { + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + try { + const body = await request.json() + const { domain, accessToken, cloudId: cloudIdParam, serviceDeskId, requestTypeId } = body + + if (!domain) { + logger.error('Missing domain in request') + return NextResponse.json({ error: 'Domain is required' }, { status: 400 }) + } + + if (!accessToken) { + logger.error('Missing access token in request') + return NextResponse.json({ error: 'Access token is required' }, { status: 400 }) + } + + if (!serviceDeskId) { + logger.error('Missing serviceDeskId in request') + return NextResponse.json({ error: 'Service Desk ID is required' }, { status: 400 }) + } + + if (!requestTypeId) { + logger.error('Missing requestTypeId in request') + return NextResponse.json({ error: 'Request Type ID is required' }, { status: 400 }) + } + + const cloudId = cloudIdParam || (await getJiraCloudId(domain, accessToken)) + + const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId') + if (!cloudIdValidation.isValid) { + return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 }) + } + + const serviceDeskIdValidation = validateAlphanumericId(serviceDeskId, 'serviceDeskId') + if (!serviceDeskIdValidation.isValid) { + return NextResponse.json({ error: serviceDeskIdValidation.error }, { status: 400 }) + } + + const requestTypeIdValidation = validateAlphanumericId(requestTypeId, 'requestTypeId') + if (!requestTypeIdValidation.isValid) { + return NextResponse.json({ error: requestTypeIdValidation.error }, { status: 400 }) + } + + const baseUrl = getJsmApiBaseUrl(cloudId) + const url = `${baseUrl}/servicedesk/${serviceDeskId}/requesttype/${requestTypeId}/field` + + logger.info('Fetching request type fields from:', url) + + const response = await fetch(url, { + method: 'GET', + headers: getJsmHeaders(accessToken), + }) + + if (!response.ok) { + const errorText = await response.text() + logger.error('JSM API error:', { + status: response.status, + statusText: response.statusText, + error: errorText, + }) + + return NextResponse.json( + { error: `JSM API error: ${response.status} ${response.statusText}`, details: errorText }, + { status: response.status } + ) + } + + const data = await response.json() + + return NextResponse.json({ + success: true, + output: { + ts: new Date().toISOString(), + serviceDeskId, + requestTypeId, + canAddRequestParticipants: data.canAddRequestParticipants ?? false, + canRaiseOnBehalfOf: data.canRaiseOnBehalfOf ?? false, + requestTypeFields: (data.requestTypeFields ?? []).map((field: Record) => ({ + fieldId: field.fieldId ?? null, + name: field.name ?? null, + description: field.description ?? null, + required: field.required ?? false, + visible: field.visible ?? true, + validValues: field.validValues ?? [], + presetValues: field.presetValues ?? [], + defaultValues: field.defaultValues ?? [], + jiraSchema: field.jiraSchema ?? null, + })), + }, + }) + } catch (error) { + logger.error('Error fetching request type fields:', { + error: error instanceof Error ? error.message : String(error), + stack: error instanceof Error ? error.stack : undefined, + }) + + return NextResponse.json( + { + error: error instanceof Error ? error.message : 'Internal server error', + success: false, + }, + { status: 500 } + ) + } +} diff --git a/apps/sim/app/api/tools/jsm/requesttypes/route.ts b/apps/sim/app/api/tools/jsm/requesttypes/route.ts index 8591f116b..9426fe847 100644 --- a/apps/sim/app/api/tools/jsm/requesttypes/route.ts +++ b/apps/sim/app/api/tools/jsm/requesttypes/route.ts @@ -16,7 +16,17 @@ export async function POST(request: NextRequest) { try { const body = await request.json() - const { domain, accessToken, cloudId: cloudIdParam, serviceDeskId, start, limit } = body + const { + domain, + accessToken, + cloudId: cloudIdParam, + serviceDeskId, + searchQuery, + groupId, + expand, + start, + limit, + } = body if (!domain) { logger.error('Missing domain in request') @@ -48,6 +58,9 @@ export async function POST(request: NextRequest) { const baseUrl = getJsmApiBaseUrl(cloudId) const params = new URLSearchParams() + if (searchQuery) params.append('searchQuery', searchQuery) + if (groupId) params.append('groupId', groupId) + if (expand) params.append('expand', expand) if (start) params.append('start', start) if (limit) params.append('limit', limit) diff --git a/apps/sim/app/api/tools/jsm/servicedesks/route.ts b/apps/sim/app/api/tools/jsm/servicedesks/route.ts index 607508a61..e6721be52 100644 --- a/apps/sim/app/api/tools/jsm/servicedesks/route.ts +++ b/apps/sim/app/api/tools/jsm/servicedesks/route.ts @@ -16,7 +16,7 @@ export async function POST(request: NextRequest) { try { const body = await request.json() - const { domain, accessToken, cloudId: cloudIdParam, start, limit } = body + const { domain, accessToken, cloudId: cloudIdParam, expand, start, limit } = body if (!domain) { logger.error('Missing domain in request') @@ -38,6 +38,7 @@ export async function POST(request: NextRequest) { const baseUrl = getJsmApiBaseUrl(cloudId) const params = new URLSearchParams() + if (expand) params.append('expand', expand) if (start) params.append('start', start) if (limit) params.append('limit', limit) diff --git a/apps/sim/app/api/tools/jsm/transitions/route.ts b/apps/sim/app/api/tools/jsm/transitions/route.ts index 5d5f2e260..d1001452f 100644 --- a/apps/sim/app/api/tools/jsm/transitions/route.ts +++ b/apps/sim/app/api/tools/jsm/transitions/route.ts @@ -16,7 +16,7 @@ export async function POST(request: NextRequest) { try { const body = await request.json() - const { domain, accessToken, cloudId: cloudIdParam, issueIdOrKey } = body + const { domain, accessToken, cloudId: cloudIdParam, issueIdOrKey, start, limit } = body if (!domain) { logger.error('Missing domain in request') @@ -47,7 +47,11 @@ export async function POST(request: NextRequest) { const baseUrl = getJsmApiBaseUrl(cloudId) - const url = `${baseUrl}/request/${issueIdOrKey}/transition` + const params = new URLSearchParams() + if (start) params.append('start', start) + if (limit) params.append('limit', limit) + + const url = `${baseUrl}/request/${issueIdOrKey}/transition${params.toString() ? `?${params.toString()}` : ''}` logger.info('Fetching transitions from:', url) @@ -78,6 +82,8 @@ export async function POST(request: NextRequest) { ts: new Date().toISOString(), issueIdOrKey, transitions: data.values || [], + total: data.size || 0, + isLastPage: data.isLastPage ?? true, }, }) } catch (error) { diff --git a/apps/sim/app/api/tools/onepassword/create-item/route.ts b/apps/sim/app/api/tools/onepassword/create-item/route.ts new file mode 100644 index 000000000..dae8cbffa --- /dev/null +++ b/apps/sim/app/api/tools/onepassword/create-item/route.ts @@ -0,0 +1,113 @@ +import { randomUUID } from 'crypto' +import type { ItemCreateParams } from '@1password/sdk' +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { checkInternalAuth } from '@/lib/auth/hybrid' +import { + connectRequest, + createOnePasswordClient, + normalizeSdkItem, + resolveCredentials, + toSdkCategory, + toSdkFieldType, +} from '../utils' + +const logger = createLogger('OnePasswordCreateItemAPI') + +const CreateItemSchema = z.object({ + connectionMode: z.enum(['service_account', 'connect']).nullish(), + serviceAccountToken: z.string().nullish(), + serverUrl: z.string().nullish(), + apiKey: z.string().nullish(), + vaultId: z.string().min(1, 'Vault ID is required'), + category: z.string().min(1, 'Category is required'), + title: z.string().nullish(), + tags: z.string().nullish(), + fields: z.string().nullish(), +}) + +export async function POST(request: NextRequest) { + const requestId = randomUUID().slice(0, 8) + + const auth = await checkInternalAuth(request) + if (!auth.success || !auth.userId) { + logger.warn(`[${requestId}] Unauthorized 1Password create-item attempt`) + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + try { + const body = await request.json() + const params = CreateItemSchema.parse(body) + const creds = resolveCredentials(params) + + logger.info(`[${requestId}] Creating item in vault ${params.vaultId} (${creds.mode} mode)`) + + if (creds.mode === 'service_account') { + const client = await createOnePasswordClient(creds.serviceAccountToken!) + + const parsedTags = params.tags + ? params.tags + .split(',') + .map((t) => t.trim()) + .filter(Boolean) + : undefined + + const parsedFields = params.fields + ? (JSON.parse(params.fields) as Array>).map((f) => ({ + id: f.id || randomUUID().slice(0, 8), + title: f.label || f.title || '', + fieldType: toSdkFieldType(f.type || 'STRING'), + value: f.value || '', + sectionId: f.section?.id ?? f.sectionId, + })) + : undefined + + const item = await client.items.create({ + vaultId: params.vaultId, + category: toSdkCategory(params.category), + title: params.title || '', + tags: parsedTags, + fields: parsedFields, + } as ItemCreateParams) + + return NextResponse.json(normalizeSdkItem(item)) + } + + const connectBody: Record = { + vault: { id: params.vaultId }, + category: params.category, + } + if (params.title) connectBody.title = params.title + if (params.tags) connectBody.tags = params.tags.split(',').map((t) => t.trim()) + if (params.fields) connectBody.fields = JSON.parse(params.fields) + + const response = await connectRequest({ + serverUrl: creds.serverUrl!, + apiKey: creds.apiKey!, + path: `/v1/vaults/${params.vaultId}/items`, + method: 'POST', + body: connectBody, + }) + + const data = await response.json() + if (!response.ok) { + return NextResponse.json( + { error: data.message || 'Failed to create item' }, + { status: response.status } + ) + } + + return NextResponse.json(data) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Invalid request data', details: error.errors }, + { status: 400 } + ) + } + const message = error instanceof Error ? error.message : 'Unknown error' + logger.error(`[${requestId}] Create item failed:`, error) + return NextResponse.json({ error: `Failed to create item: ${message}` }, { status: 500 }) + } +} diff --git a/apps/sim/app/api/tools/onepassword/delete-item/route.ts b/apps/sim/app/api/tools/onepassword/delete-item/route.ts new file mode 100644 index 000000000..8909adf88 --- /dev/null +++ b/apps/sim/app/api/tools/onepassword/delete-item/route.ts @@ -0,0 +1,70 @@ +import { randomUUID } from 'crypto' +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { checkInternalAuth } from '@/lib/auth/hybrid' +import { connectRequest, createOnePasswordClient, resolveCredentials } from '../utils' + +const logger = createLogger('OnePasswordDeleteItemAPI') + +const DeleteItemSchema = z.object({ + connectionMode: z.enum(['service_account', 'connect']).nullish(), + serviceAccountToken: z.string().nullish(), + serverUrl: z.string().nullish(), + apiKey: z.string().nullish(), + vaultId: z.string().min(1, 'Vault ID is required'), + itemId: z.string().min(1, 'Item ID is required'), +}) + +export async function POST(request: NextRequest) { + const requestId = randomUUID().slice(0, 8) + + const auth = await checkInternalAuth(request) + if (!auth.success || !auth.userId) { + logger.warn(`[${requestId}] Unauthorized 1Password delete-item attempt`) + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + try { + const body = await request.json() + const params = DeleteItemSchema.parse(body) + const creds = resolveCredentials(params) + + logger.info( + `[${requestId}] Deleting item ${params.itemId} from vault ${params.vaultId} (${creds.mode} mode)` + ) + + if (creds.mode === 'service_account') { + const client = await createOnePasswordClient(creds.serviceAccountToken!) + await client.items.delete(params.vaultId, params.itemId) + return NextResponse.json({ success: true }) + } + + const response = await connectRequest({ + serverUrl: creds.serverUrl!, + apiKey: creds.apiKey!, + path: `/v1/vaults/${params.vaultId}/items/${params.itemId}`, + method: 'DELETE', + }) + + if (!response.ok) { + const data = await response.json().catch(() => ({})) + return NextResponse.json( + { error: (data as Record).message || 'Failed to delete item' }, + { status: response.status } + ) + } + + return NextResponse.json({ success: true }) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Invalid request data', details: error.errors }, + { status: 400 } + ) + } + const message = error instanceof Error ? error.message : 'Unknown error' + logger.error(`[${requestId}] Delete item failed:`, error) + return NextResponse.json({ error: `Failed to delete item: ${message}` }, { status: 500 }) + } +} diff --git a/apps/sim/app/api/tools/onepassword/get-item/route.ts b/apps/sim/app/api/tools/onepassword/get-item/route.ts new file mode 100644 index 000000000..63ac2906b --- /dev/null +++ b/apps/sim/app/api/tools/onepassword/get-item/route.ts @@ -0,0 +1,75 @@ +import { randomUUID } from 'crypto' +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { checkInternalAuth } from '@/lib/auth/hybrid' +import { + connectRequest, + createOnePasswordClient, + normalizeSdkItem, + resolveCredentials, +} from '../utils' + +const logger = createLogger('OnePasswordGetItemAPI') + +const GetItemSchema = z.object({ + connectionMode: z.enum(['service_account', 'connect']).nullish(), + serviceAccountToken: z.string().nullish(), + serverUrl: z.string().nullish(), + apiKey: z.string().nullish(), + vaultId: z.string().min(1, 'Vault ID is required'), + itemId: z.string().min(1, 'Item ID is required'), +}) + +export async function POST(request: NextRequest) { + const requestId = randomUUID().slice(0, 8) + + const auth = await checkInternalAuth(request) + if (!auth.success || !auth.userId) { + logger.warn(`[${requestId}] Unauthorized 1Password get-item attempt`) + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + try { + const body = await request.json() + const params = GetItemSchema.parse(body) + const creds = resolveCredentials(params) + + logger.info( + `[${requestId}] Getting item ${params.itemId} from vault ${params.vaultId} (${creds.mode} mode)` + ) + + if (creds.mode === 'service_account') { + const client = await createOnePasswordClient(creds.serviceAccountToken!) + const item = await client.items.get(params.vaultId, params.itemId) + return NextResponse.json(normalizeSdkItem(item)) + } + + const response = await connectRequest({ + serverUrl: creds.serverUrl!, + apiKey: creds.apiKey!, + path: `/v1/vaults/${params.vaultId}/items/${params.itemId}`, + method: 'GET', + }) + + const data = await response.json() + if (!response.ok) { + return NextResponse.json( + { error: data.message || 'Failed to get item' }, + { status: response.status } + ) + } + + return NextResponse.json(data) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Invalid request data', details: error.errors }, + { status: 400 } + ) + } + const message = error instanceof Error ? error.message : 'Unknown error' + logger.error(`[${requestId}] Get item failed:`, error) + return NextResponse.json({ error: `Failed to get item: ${message}` }, { status: 500 }) + } +} diff --git a/apps/sim/app/api/tools/onepassword/get-vault/route.ts b/apps/sim/app/api/tools/onepassword/get-vault/route.ts new file mode 100644 index 000000000..16343134a --- /dev/null +++ b/apps/sim/app/api/tools/onepassword/get-vault/route.ts @@ -0,0 +1,78 @@ +import { randomUUID } from 'crypto' +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { checkInternalAuth } from '@/lib/auth/hybrid' +import { + connectRequest, + createOnePasswordClient, + normalizeSdkVault, + resolveCredentials, +} from '../utils' + +const logger = createLogger('OnePasswordGetVaultAPI') + +const GetVaultSchema = z.object({ + connectionMode: z.enum(['service_account', 'connect']).nullish(), + serviceAccountToken: z.string().nullish(), + serverUrl: z.string().nullish(), + apiKey: z.string().nullish(), + vaultId: z.string().min(1, 'Vault ID is required'), +}) + +export async function POST(request: NextRequest) { + const requestId = randomUUID().slice(0, 8) + + const auth = await checkInternalAuth(request) + if (!auth.success || !auth.userId) { + logger.warn(`[${requestId}] Unauthorized 1Password get-vault attempt`) + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + try { + const body = await request.json() + const params = GetVaultSchema.parse(body) + const creds = resolveCredentials(params) + + logger.info(`[${requestId}] Getting 1Password vault ${params.vaultId} (${creds.mode} mode)`) + + if (creds.mode === 'service_account') { + const client = await createOnePasswordClient(creds.serviceAccountToken!) + const vaults = await client.vaults.list() + const vault = vaults.find((v) => v.id === params.vaultId) + + if (!vault) { + return NextResponse.json({ error: 'Vault not found' }, { status: 404 }) + } + + return NextResponse.json(normalizeSdkVault(vault)) + } + + const response = await connectRequest({ + serverUrl: creds.serverUrl!, + apiKey: creds.apiKey!, + path: `/v1/vaults/${params.vaultId}`, + method: 'GET', + }) + + const data = await response.json() + if (!response.ok) { + return NextResponse.json( + { error: data.message || 'Failed to get vault' }, + { status: response.status } + ) + } + + return NextResponse.json(data) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Invalid request data', details: error.errors }, + { status: 400 } + ) + } + const message = error instanceof Error ? error.message : 'Unknown error' + logger.error(`[${requestId}] Get vault failed:`, error) + return NextResponse.json({ error: `Failed to get vault: ${message}` }, { status: 500 }) + } +} diff --git a/apps/sim/app/api/tools/onepassword/list-items/route.ts b/apps/sim/app/api/tools/onepassword/list-items/route.ts new file mode 100644 index 000000000..0e9afabdc --- /dev/null +++ b/apps/sim/app/api/tools/onepassword/list-items/route.ts @@ -0,0 +1,87 @@ +import { randomUUID } from 'crypto' +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { checkInternalAuth } from '@/lib/auth/hybrid' +import { + connectRequest, + createOnePasswordClient, + normalizeSdkItemOverview, + resolveCredentials, +} from '../utils' + +const logger = createLogger('OnePasswordListItemsAPI') + +const ListItemsSchema = z.object({ + connectionMode: z.enum(['service_account', 'connect']).nullish(), + serviceAccountToken: z.string().nullish(), + serverUrl: z.string().nullish(), + apiKey: z.string().nullish(), + vaultId: z.string().min(1, 'Vault ID is required'), + filter: z.string().nullish(), +}) + +export async function POST(request: NextRequest) { + const requestId = randomUUID().slice(0, 8) + + const auth = await checkInternalAuth(request) + if (!auth.success || !auth.userId) { + logger.warn(`[${requestId}] Unauthorized 1Password list-items attempt`) + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + try { + const body = await request.json() + const params = ListItemsSchema.parse(body) + const creds = resolveCredentials(params) + + logger.info(`[${requestId}] Listing items in vault ${params.vaultId} (${creds.mode} mode)`) + + if (creds.mode === 'service_account') { + const client = await createOnePasswordClient(creds.serviceAccountToken!) + const items = await client.items.list(params.vaultId) + const normalized = items.map(normalizeSdkItemOverview) + + if (params.filter) { + const filterLower = params.filter.toLowerCase() + const filtered = normalized.filter( + (item) => + item.title?.toLowerCase().includes(filterLower) || + item.id?.toLowerCase().includes(filterLower) + ) + return NextResponse.json(filtered) + } + + return NextResponse.json(normalized) + } + + const query = params.filter ? `filter=${encodeURIComponent(params.filter)}` : undefined + const response = await connectRequest({ + serverUrl: creds.serverUrl!, + apiKey: creds.apiKey!, + path: `/v1/vaults/${params.vaultId}/items`, + method: 'GET', + query, + }) + + const data = await response.json() + if (!response.ok) { + return NextResponse.json( + { error: data.message || 'Failed to list items' }, + { status: response.status } + ) + } + + return NextResponse.json(data) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Invalid request data', details: error.errors }, + { status: 400 } + ) + } + const message = error instanceof Error ? error.message : 'Unknown error' + logger.error(`[${requestId}] List items failed:`, error) + return NextResponse.json({ error: `Failed to list items: ${message}` }, { status: 500 }) + } +} diff --git a/apps/sim/app/api/tools/onepassword/list-vaults/route.ts b/apps/sim/app/api/tools/onepassword/list-vaults/route.ts new file mode 100644 index 000000000..d1b08e781 --- /dev/null +++ b/apps/sim/app/api/tools/onepassword/list-vaults/route.ts @@ -0,0 +1,85 @@ +import { randomUUID } from 'crypto' +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { checkInternalAuth } from '@/lib/auth/hybrid' +import { + connectRequest, + createOnePasswordClient, + normalizeSdkVault, + resolveCredentials, +} from '../utils' + +const logger = createLogger('OnePasswordListVaultsAPI') + +const ListVaultsSchema = z.object({ + connectionMode: z.enum(['service_account', 'connect']).nullish(), + serviceAccountToken: z.string().nullish(), + serverUrl: z.string().nullish(), + apiKey: z.string().nullish(), + filter: z.string().nullish(), +}) + +export async function POST(request: NextRequest) { + const requestId = randomUUID().slice(0, 8) + + const auth = await checkInternalAuth(request) + if (!auth.success || !auth.userId) { + logger.warn(`[${requestId}] Unauthorized 1Password list-vaults attempt`) + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + try { + const body = await request.json() + const params = ListVaultsSchema.parse(body) + const creds = resolveCredentials(params) + + logger.info(`[${requestId}] Listing 1Password vaults (${creds.mode} mode)`) + + if (creds.mode === 'service_account') { + const client = await createOnePasswordClient(creds.serviceAccountToken!) + const vaults = await client.vaults.list() + const normalized = vaults.map(normalizeSdkVault) + + if (params.filter) { + const filterLower = params.filter.toLowerCase() + const filtered = normalized.filter( + (v) => + v.name?.toLowerCase().includes(filterLower) || v.id?.toLowerCase().includes(filterLower) + ) + return NextResponse.json(filtered) + } + + return NextResponse.json(normalized) + } + + const query = params.filter ? `filter=${encodeURIComponent(params.filter)}` : undefined + const response = await connectRequest({ + serverUrl: creds.serverUrl!, + apiKey: creds.apiKey!, + path: '/v1/vaults', + method: 'GET', + query, + }) + + const data = await response.json() + if (!response.ok) { + return NextResponse.json( + { error: data.message || 'Failed to list vaults' }, + { status: response.status } + ) + } + + return NextResponse.json(data) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Invalid request data', details: error.errors }, + { status: 400 } + ) + } + const message = error instanceof Error ? error.message : 'Unknown error' + logger.error(`[${requestId}] List vaults failed:`, error) + return NextResponse.json({ error: `Failed to list vaults: ${message}` }, { status: 500 }) + } +} diff --git a/apps/sim/app/api/tools/onepassword/replace-item/route.ts b/apps/sim/app/api/tools/onepassword/replace-item/route.ts new file mode 100644 index 000000000..3fc198d62 --- /dev/null +++ b/apps/sim/app/api/tools/onepassword/replace-item/route.ts @@ -0,0 +1,117 @@ +import { randomUUID } from 'crypto' +import type { Item } from '@1password/sdk' +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { checkInternalAuth } from '@/lib/auth/hybrid' +import { + connectRequest, + createOnePasswordClient, + normalizeSdkItem, + resolveCredentials, + toSdkCategory, + toSdkFieldType, +} from '../utils' + +const logger = createLogger('OnePasswordReplaceItemAPI') + +const ReplaceItemSchema = z.object({ + connectionMode: z.enum(['service_account', 'connect']).nullish(), + serviceAccountToken: z.string().nullish(), + serverUrl: z.string().nullish(), + apiKey: z.string().nullish(), + vaultId: z.string().min(1, 'Vault ID is required'), + itemId: z.string().min(1, 'Item ID is required'), + item: z.string().min(1, 'Item JSON is required'), +}) + +export async function POST(request: NextRequest) { + const requestId = randomUUID().slice(0, 8) + + const auth = await checkInternalAuth(request) + if (!auth.success || !auth.userId) { + logger.warn(`[${requestId}] Unauthorized 1Password replace-item attempt`) + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + try { + const body = await request.json() + const params = ReplaceItemSchema.parse(body) + const creds = resolveCredentials(params) + const itemData = JSON.parse(params.item) + + logger.info( + `[${requestId}] Replacing item ${params.itemId} in vault ${params.vaultId} (${creds.mode} mode)` + ) + + if (creds.mode === 'service_account') { + const client = await createOnePasswordClient(creds.serviceAccountToken!) + + const existing = await client.items.get(params.vaultId, params.itemId) + + const sdkItem = { + ...existing, + id: params.itemId, + title: itemData.title || existing.title, + category: itemData.category ? toSdkCategory(itemData.category) : existing.category, + vaultId: params.vaultId, + fields: itemData.fields + ? (itemData.fields as Array>).map((f) => ({ + id: f.id || randomUUID().slice(0, 8), + title: f.label || f.title || '', + fieldType: toSdkFieldType(f.type || 'STRING'), + value: f.value || '', + sectionId: f.section?.id ?? f.sectionId, + })) + : existing.fields, + sections: itemData.sections + ? (itemData.sections as Array>).map((s) => ({ + id: s.id || '', + title: s.label || s.title || '', + })) + : existing.sections, + notes: itemData.notes ?? existing.notes, + tags: itemData.tags ?? existing.tags, + websites: + itemData.urls || itemData.websites + ? (itemData.urls ?? itemData.websites ?? []).map((u: Record) => ({ + url: u.href || u.url || '', + label: u.label || '', + autofillBehavior: 'AnywhereOnWebsite' as const, + })) + : existing.websites, + } as Item + + const result = await client.items.put(sdkItem) + return NextResponse.json(normalizeSdkItem(result)) + } + + const response = await connectRequest({ + serverUrl: creds.serverUrl!, + apiKey: creds.apiKey!, + path: `/v1/vaults/${params.vaultId}/items/${params.itemId}`, + method: 'PUT', + body: itemData, + }) + + const data = await response.json() + if (!response.ok) { + return NextResponse.json( + { error: data.message || 'Failed to replace item' }, + { status: response.status } + ) + } + + return NextResponse.json(data) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Invalid request data', details: error.errors }, + { status: 400 } + ) + } + const message = error instanceof Error ? error.message : 'Unknown error' + logger.error(`[${requestId}] Replace item failed:`, error) + return NextResponse.json({ error: `Failed to replace item: ${message}` }, { status: 500 }) + } +} diff --git a/apps/sim/app/api/tools/onepassword/resolve-secret/route.ts b/apps/sim/app/api/tools/onepassword/resolve-secret/route.ts new file mode 100644 index 000000000..408ac48c5 --- /dev/null +++ b/apps/sim/app/api/tools/onepassword/resolve-secret/route.ts @@ -0,0 +1,59 @@ +import { randomUUID } from 'crypto' +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { checkInternalAuth } from '@/lib/auth/hybrid' +import { createOnePasswordClient, resolveCredentials } from '../utils' + +const logger = createLogger('OnePasswordResolveSecretAPI') + +const ResolveSecretSchema = z.object({ + connectionMode: z.enum(['service_account', 'connect']).nullish(), + serviceAccountToken: z.string().nullish(), + serverUrl: z.string().nullish(), + apiKey: z.string().nullish(), + secretReference: z.string().min(1, 'Secret reference is required'), +}) + +export async function POST(request: NextRequest) { + const requestId = randomUUID().slice(0, 8) + + const auth = await checkInternalAuth(request) + if (!auth.success || !auth.userId) { + logger.warn(`[${requestId}] Unauthorized 1Password resolve-secret attempt`) + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + try { + const body = await request.json() + const params = ResolveSecretSchema.parse(body) + const creds = resolveCredentials(params) + + if (creds.mode !== 'service_account') { + return NextResponse.json( + { error: 'Resolve Secret is only available in Service Account mode' }, + { status: 400 } + ) + } + + logger.info(`[${requestId}] Resolving secret reference (service_account mode)`) + + const client = await createOnePasswordClient(creds.serviceAccountToken!) + const secret = await client.secrets.resolve(params.secretReference) + + return NextResponse.json({ + value: secret, + reference: params.secretReference, + }) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Invalid request data', details: error.errors }, + { status: 400 } + ) + } + const message = error instanceof Error ? error.message : 'Unknown error' + logger.error(`[${requestId}] Resolve secret failed:`, error) + return NextResponse.json({ error: `Failed to resolve secret: ${message}` }, { status: 500 }) + } +} diff --git a/apps/sim/app/api/tools/onepassword/update-item/route.ts b/apps/sim/app/api/tools/onepassword/update-item/route.ts new file mode 100644 index 000000000..543b5f052 --- /dev/null +++ b/apps/sim/app/api/tools/onepassword/update-item/route.ts @@ -0,0 +1,136 @@ +import { randomUUID } from 'crypto' +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { checkInternalAuth } from '@/lib/auth/hybrid' +import { + connectRequest, + createOnePasswordClient, + normalizeSdkItem, + resolveCredentials, +} from '../utils' + +const logger = createLogger('OnePasswordUpdateItemAPI') + +const UpdateItemSchema = z.object({ + connectionMode: z.enum(['service_account', 'connect']).nullish(), + serviceAccountToken: z.string().nullish(), + serverUrl: z.string().nullish(), + apiKey: z.string().nullish(), + vaultId: z.string().min(1, 'Vault ID is required'), + itemId: z.string().min(1, 'Item ID is required'), + operations: z.string().min(1, 'Patch operations are required'), +}) + +export async function POST(request: NextRequest) { + const requestId = randomUUID().slice(0, 8) + + const auth = await checkInternalAuth(request) + if (!auth.success || !auth.userId) { + logger.warn(`[${requestId}] Unauthorized 1Password update-item attempt`) + return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) + } + + try { + const body = await request.json() + const params = UpdateItemSchema.parse(body) + const creds = resolveCredentials(params) + const ops = JSON.parse(params.operations) as JsonPatchOperation[] + + logger.info( + `[${requestId}] Updating item ${params.itemId} in vault ${params.vaultId} (${creds.mode} mode)` + ) + + if (creds.mode === 'service_account') { + const client = await createOnePasswordClient(creds.serviceAccountToken!) + + const item = await client.items.get(params.vaultId, params.itemId) + + for (const op of ops) { + applyPatch(item, op) + } + + const result = await client.items.put(item) + return NextResponse.json(normalizeSdkItem(result)) + } + + const response = await connectRequest({ + serverUrl: creds.serverUrl!, + apiKey: creds.apiKey!, + path: `/v1/vaults/${params.vaultId}/items/${params.itemId}`, + method: 'PATCH', + body: ops, + }) + + const data = await response.json() + if (!response.ok) { + return NextResponse.json( + { error: data.message || 'Failed to update item' }, + { status: response.status } + ) + } + + return NextResponse.json(data) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { error: 'Invalid request data', details: error.errors }, + { status: 400 } + ) + } + const message = error instanceof Error ? error.message : 'Unknown error' + logger.error(`[${requestId}] Update item failed:`, error) + return NextResponse.json({ error: `Failed to update item: ${message}` }, { status: 500 }) + } +} + +interface JsonPatchOperation { + op: 'add' | 'remove' | 'replace' + path: string + value?: unknown +} + +/** Apply a single RFC6902 JSON Patch operation to a mutable object. */ +// eslint-disable-next-line @typescript-eslint/no-explicit-any +function applyPatch(item: Record, op: JsonPatchOperation) { + const segments = op.path.split('/').filter(Boolean) + + if (segments.length === 1) { + const key = segments[0] + if (op.op === 'replace' || op.op === 'add') { + item[key] = op.value + } else if (op.op === 'remove') { + delete item[key] + } + return + } + + let target = item + for (let i = 0; i < segments.length - 1; i++) { + const seg = segments[i] + if (Array.isArray(target)) { + target = target[Number(seg)] + } else { + target = target[seg] + } + if (target === undefined || target === null) return + } + + const lastSeg = segments[segments.length - 1] + + if (op.op === 'replace' || op.op === 'add') { + if (Array.isArray(target) && lastSeg === '-') { + target.push(op.value) + } else if (Array.isArray(target)) { + target[Number(lastSeg)] = op.value + } else { + target[lastSeg] = op.value + } + } else if (op.op === 'remove') { + if (Array.isArray(target)) { + target.splice(Number(lastSeg), 1) + } else { + delete target[lastSeg] + } + } +} diff --git a/apps/sim/app/api/tools/onepassword/utils.ts b/apps/sim/app/api/tools/onepassword/utils.ts new file mode 100644 index 000000000..703b7e5ac --- /dev/null +++ b/apps/sim/app/api/tools/onepassword/utils.ts @@ -0,0 +1,357 @@ +import type { + Item, + ItemCategory, + ItemField, + ItemFieldType, + ItemOverview, + ItemSection, + VaultOverview, + Website, +} from '@1password/sdk' + +/** Connect-format field type strings returned by normalization. */ +type ConnectFieldType = + | 'STRING' + | 'CONCEALED' + | 'EMAIL' + | 'URL' + | 'OTP' + | 'PHONE' + | 'DATE' + | 'MONTH_YEAR' + | 'MENU' + | 'ADDRESS' + | 'REFERENCE' + | 'SSHKEY' + | 'CREDIT_CARD_NUMBER' + | 'CREDIT_CARD_TYPE' + +/** Connect-format category strings returned by normalization. */ +type ConnectCategory = + | 'LOGIN' + | 'PASSWORD' + | 'API_CREDENTIAL' + | 'SECURE_NOTE' + | 'SERVER' + | 'DATABASE' + | 'CREDIT_CARD' + | 'IDENTITY' + | 'SSH_KEY' + | 'DOCUMENT' + | 'SOFTWARE_LICENSE' + | 'EMAIL_ACCOUNT' + | 'MEMBERSHIP' + | 'PASSPORT' + | 'REWARD_PROGRAM' + | 'DRIVER_LICENSE' + | 'BANK_ACCOUNT' + | 'MEDICAL_RECORD' + | 'OUTDOOR_LICENSE' + | 'WIRELESS_ROUTER' + | 'SOCIAL_SECURITY_NUMBER' + | 'CUSTOM' + +/** Normalized vault shape matching the Connect API response. */ +export interface NormalizedVault { + id: string + name: string + description: null + attributeVersion: number + contentVersion: number + items: number + type: string + createdAt: string | null + updatedAt: string | null +} + +/** Normalized item overview shape matching the Connect API response. */ +export interface NormalizedItemOverview { + id: string + title: string + vault: { id: string } + category: ConnectCategory + urls: Array<{ href: string; label: string | null; primary: boolean }> + favorite: boolean + tags: string[] + version: number + state: string | null + createdAt: string | null + updatedAt: string | null + lastEditedBy: null +} + +/** Normalized field shape matching the Connect API response. */ +export interface NormalizedField { + id: string + label: string + type: ConnectFieldType + purpose: string + value: string | null + section: { id: string } | null + generate: boolean + recipe: null + entropy: null +} + +/** Normalized full item shape matching the Connect API response. */ +export interface NormalizedItem extends NormalizedItemOverview { + fields: NormalizedField[] + sections: Array<{ id: string; label: string }> +} + +/** + * SDK field type string values → Connect field type mapping. + * Uses string literals instead of enum imports to avoid loading the WASM module at build time. + */ +const SDK_TO_CONNECT_FIELD_TYPE: Record = { + Text: 'STRING', + Concealed: 'CONCEALED', + Email: 'EMAIL', + Url: 'URL', + Totp: 'OTP', + Phone: 'PHONE', + Date: 'DATE', + MonthYear: 'MONTH_YEAR', + Menu: 'MENU', + Address: 'ADDRESS', + Reference: 'REFERENCE', + SshKey: 'SSHKEY', + CreditCardNumber: 'CREDIT_CARD_NUMBER', + CreditCardType: 'CREDIT_CARD_TYPE', +} + +/** SDK category string values → Connect category mapping. */ +const SDK_TO_CONNECT_CATEGORY: Record = { + Login: 'LOGIN', + Password: 'PASSWORD', + ApiCredentials: 'API_CREDENTIAL', + SecureNote: 'SECURE_NOTE', + Server: 'SERVER', + Database: 'DATABASE', + CreditCard: 'CREDIT_CARD', + Identity: 'IDENTITY', + SshKey: 'SSH_KEY', + Document: 'DOCUMENT', + SoftwareLicense: 'SOFTWARE_LICENSE', + Email: 'EMAIL_ACCOUNT', + Membership: 'MEMBERSHIP', + Passport: 'PASSPORT', + Rewards: 'REWARD_PROGRAM', + DriverLicense: 'DRIVER_LICENSE', + BankAccount: 'BANK_ACCOUNT', + MedicalRecord: 'MEDICAL_RECORD', + OutdoorLicense: 'OUTDOOR_LICENSE', + Router: 'WIRELESS_ROUTER', + SocialSecurityNumber: 'SOCIAL_SECURITY_NUMBER', + CryptoWallet: 'CUSTOM', + Person: 'CUSTOM', + Unsupported: 'CUSTOM', +} + +/** Connect category → SDK category string mapping. */ +const CONNECT_TO_SDK_CATEGORY: Record = { + LOGIN: 'Login', + PASSWORD: 'Password', + API_CREDENTIAL: 'ApiCredentials', + SECURE_NOTE: 'SecureNote', + SERVER: 'Server', + DATABASE: 'Database', + CREDIT_CARD: 'CreditCard', + IDENTITY: 'Identity', + SSH_KEY: 'SshKey', + DOCUMENT: 'Document', + SOFTWARE_LICENSE: 'SoftwareLicense', + EMAIL_ACCOUNT: 'Email', + MEMBERSHIP: 'Membership', + PASSPORT: 'Passport', + REWARD_PROGRAM: 'Rewards', + DRIVER_LICENSE: 'DriverLicense', + BANK_ACCOUNT: 'BankAccount', + MEDICAL_RECORD: 'MedicalRecord', + OUTDOOR_LICENSE: 'OutdoorLicense', + WIRELESS_ROUTER: 'Router', + SOCIAL_SECURITY_NUMBER: 'SocialSecurityNumber', +} + +/** Connect field type → SDK field type string mapping. */ +const CONNECT_TO_SDK_FIELD_TYPE: Record = { + STRING: 'Text', + CONCEALED: 'Concealed', + EMAIL: 'Email', + URL: 'Url', + OTP: 'Totp', + TOTP: 'Totp', + PHONE: 'Phone', + DATE: 'Date', + MONTH_YEAR: 'MonthYear', + MENU: 'Menu', + ADDRESS: 'Address', + REFERENCE: 'Reference', + SSHKEY: 'SshKey', + CREDIT_CARD_NUMBER: 'CreditCardNumber', + CREDIT_CARD_TYPE: 'CreditCardType', +} + +export type ConnectionMode = 'service_account' | 'connect' + +export interface CredentialParams { + connectionMode?: ConnectionMode | null + serviceAccountToken?: string | null + serverUrl?: string | null + apiKey?: string | null +} + +export interface ResolvedCredentials { + mode: ConnectionMode + serviceAccountToken?: string + serverUrl?: string + apiKey?: string +} + +/** Determine which backend to use based on provided credentials. */ +export function resolveCredentials(params: CredentialParams): ResolvedCredentials { + const mode = params.connectionMode ?? (params.serviceAccountToken ? 'service_account' : 'connect') + + if (mode === 'service_account') { + if (!params.serviceAccountToken) { + throw new Error('Service Account token is required for Service Account mode') + } + return { mode, serviceAccountToken: params.serviceAccountToken } + } + + if (!params.serverUrl || !params.apiKey) { + throw new Error('Server URL and Connect token are required for Connect Server mode') + } + return { mode, serverUrl: params.serverUrl, apiKey: params.apiKey } +} + +/** + * Create a 1Password SDK client from a service account token. + * Uses dynamic import to avoid loading the WASM module at build time. + */ +export async function createOnePasswordClient(serviceAccountToken: string) { + const { createClient } = await import('@1password/sdk') + return createClient({ + auth: serviceAccountToken, + integrationName: 'Sim Studio', + integrationVersion: '1.0.0', + }) +} + +/** Proxy a request to the 1Password Connect Server. */ +export async function connectRequest(options: { + serverUrl: string + apiKey: string + path: string + method: string + body?: unknown + query?: string +}): Promise { + const base = options.serverUrl.replace(/\/$/, '') + const queryStr = options.query ? `?${options.query}` : '' + const url = `${base}${options.path}${queryStr}` + + const headers: Record = { + Authorization: `Bearer ${options.apiKey}`, + } + + if (options.body) { + headers['Content-Type'] = 'application/json' + } + + return fetch(url, { + method: options.method, + headers, + body: options.body ? JSON.stringify(options.body) : undefined, + }) +} + +/** Normalize an SDK VaultOverview to match Connect API vault shape. */ +export function normalizeSdkVault(vault: VaultOverview): NormalizedVault { + return { + id: vault.id, + name: vault.title, + description: null, + attributeVersion: 0, + contentVersion: 0, + items: 0, + type: 'USER_CREATED', + createdAt: + vault.createdAt instanceof Date ? vault.createdAt.toISOString() : (vault.createdAt ?? null), + updatedAt: + vault.updatedAt instanceof Date ? vault.updatedAt.toISOString() : (vault.updatedAt ?? null), + } +} + +/** Normalize an SDK ItemOverview to match Connect API item summary shape. */ +export function normalizeSdkItemOverview(item: ItemOverview): NormalizedItemOverview { + return { + id: item.id, + title: item.title, + vault: { id: item.vaultId }, + category: SDK_TO_CONNECT_CATEGORY[item.category] ?? 'CUSTOM', + urls: (item.websites ?? []).map((w: Website) => ({ + href: w.url, + label: w.label ?? null, + primary: false, + })), + favorite: false, + tags: item.tags ?? [], + version: 0, + state: item.state === 'archived' ? 'ARCHIVED' : null, + createdAt: + item.createdAt instanceof Date ? item.createdAt.toISOString() : (item.createdAt ?? null), + updatedAt: + item.updatedAt instanceof Date ? item.updatedAt.toISOString() : (item.updatedAt ?? null), + lastEditedBy: null, + } +} + +/** Normalize a full SDK Item to match Connect API FullItem shape. */ +export function normalizeSdkItem(item: Item): NormalizedItem { + return { + id: item.id, + title: item.title, + vault: { id: item.vaultId }, + category: SDK_TO_CONNECT_CATEGORY[item.category] ?? 'CUSTOM', + urls: (item.websites ?? []).map((w: Website) => ({ + href: w.url, + label: w.label ?? null, + primary: false, + })), + favorite: false, + tags: item.tags ?? [], + version: item.version ?? 0, + state: null, + fields: (item.fields ?? []).map((field: ItemField) => ({ + id: field.id, + label: field.title, + type: SDK_TO_CONNECT_FIELD_TYPE[field.fieldType] ?? 'STRING', + purpose: '', + value: field.value ?? null, + section: field.sectionId ? { id: field.sectionId } : null, + generate: false, + recipe: null, + entropy: null, + })), + sections: (item.sections ?? []).map((section: ItemSection) => ({ + id: section.id, + label: section.title, + })), + createdAt: + item.createdAt instanceof Date ? item.createdAt.toISOString() : (item.createdAt ?? null), + updatedAt: + item.updatedAt instanceof Date ? item.updatedAt.toISOString() : (item.updatedAt ?? null), + lastEditedBy: null, + } +} + +/** Convert a Connect-style category string to the SDK category string. */ +export function toSdkCategory(category: string): `${ItemCategory}` { + return CONNECT_TO_SDK_CATEGORY[category] ?? 'Login' +} + +/** Convert a Connect-style field type string to the SDK field type string. */ +export function toSdkFieldType(type: string): `${ItemFieldType}` { + return CONNECT_TO_SDK_FIELD_TYPE[type] ?? 'Text' +} diff --git a/apps/sim/app/api/v1/copilot/chat/route.ts b/apps/sim/app/api/v1/copilot/chat/route.ts new file mode 100644 index 000000000..d08234cff --- /dev/null +++ b/apps/sim/app/api/v1/copilot/chat/route.ts @@ -0,0 +1,114 @@ +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { getCopilotModel } from '@/lib/copilot/config' +import { SIM_AGENT_VERSION } from '@/lib/copilot/constants' +import { COPILOT_REQUEST_MODES } from '@/lib/copilot/models' +import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator' +import { resolveWorkflowIdForUser } from '@/lib/workflows/utils' +import { authenticateV1Request } from '@/app/api/v1/auth' + +const logger = createLogger('CopilotHeadlessAPI') + +const RequestSchema = z.object({ + message: z.string().min(1, 'message is required'), + workflowId: z.string().optional(), + workflowName: z.string().optional(), + chatId: z.string().optional(), + mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'), + model: z.string().optional(), + autoExecuteTools: z.boolean().optional().default(true), + timeout: z.number().optional().default(300000), +}) + +/** + * POST /api/v1/copilot/chat + * Headless copilot endpoint for server-side orchestration. + * + * workflowId is optional - if not provided: + * - If workflowName is provided, finds that workflow + * - Otherwise uses the user's first workflow as context + * - The copilot can still operate on any workflow using list_user_workflows + */ +export async function POST(req: NextRequest) { + const auth = await authenticateV1Request(req) + if (!auth.authenticated || !auth.userId) { + return NextResponse.json( + { success: false, error: auth.error || 'Unauthorized' }, + { status: 401 } + ) + } + + try { + const body = await req.json() + const parsed = RequestSchema.parse(body) + const defaults = getCopilotModel('chat') + const selectedModel = parsed.model || defaults.model + + // Resolve workflow ID + const resolved = await resolveWorkflowIdForUser( + auth.userId, + parsed.workflowId, + parsed.workflowName + ) + if (!resolved) { + return NextResponse.json( + { + success: false, + error: 'No workflows found. Create a workflow first or provide a valid workflowId.', + }, + { status: 400 } + ) + } + + // Transform mode to transport mode (same as client API) + // build and agent both map to 'agent' on the backend + const effectiveMode = parsed.mode === 'agent' ? 'build' : parsed.mode + const transportMode = effectiveMode === 'build' ? 'agent' : effectiveMode + + // Always generate a chatId - required for artifacts system to work with subagents + const chatId = parsed.chatId || crypto.randomUUID() + + const requestPayload = { + message: parsed.message, + workflowId: resolved.workflowId, + userId: auth.userId, + model: selectedModel, + mode: transportMode, + messageId: crypto.randomUUID(), + version: SIM_AGENT_VERSION, + headless: true, + chatId, + } + + const result = await orchestrateCopilotStream(requestPayload, { + userId: auth.userId, + workflowId: resolved.workflowId, + chatId, + autoExecuteTools: parsed.autoExecuteTools, + timeout: parsed.timeout, + interactive: false, + }) + + return NextResponse.json({ + success: result.success, + content: result.content, + toolCalls: result.toolCalls, + chatId: result.chatId || chatId, // Return the chatId for conversation continuity + conversationId: result.conversationId, + error: result.error, + }) + } catch (error) { + if (error instanceof z.ZodError) { + return NextResponse.json( + { success: false, error: 'Invalid request', details: error.errors }, + { status: 400 } + ) + } + + logger.error('Headless copilot request failed', { + error: error instanceof Error ? error.message : String(error), + }) + return NextResponse.json({ success: false, error: 'Internal server error' }, { status: 500 }) + } +} diff --git a/apps/sim/app/api/workflows/[id]/execute/route.ts b/apps/sim/app/api/workflows/[id]/execute/route.ts index 06984a3e2..a343fb3e9 100644 --- a/apps/sim/app/api/workflows/[id]/execute/route.ts +++ b/apps/sim/app/api/workflows/[id]/execute/route.ts @@ -33,7 +33,11 @@ import { createHttpResponseFromBlock, workflowHasResponseBlock } from '@/lib/wor import { executeWorkflowJob, type WorkflowExecutionPayload } from '@/background/workflow-execution' import { normalizeName } from '@/executor/constants' import { ExecutionSnapshot } from '@/executor/execution/snapshot' -import type { ExecutionMetadata, IterationContext } from '@/executor/execution/types' +import type { + ExecutionMetadata, + IterationContext, + SerializableExecutionState, +} from '@/executor/execution/types' import type { NormalizedBlockOutput, StreamingExecution } from '@/executor/types' import { hasExecutionResult } from '@/executor/utils/errors' import { Serializer } from '@/serializer' @@ -62,20 +66,23 @@ const ExecuteWorkflowSchema = z.object({ runFromBlock: z .object({ startBlockId: z.string().min(1, 'Start block ID is required'), - sourceSnapshot: z.object({ - blockStates: z.record(z.any()), - executedBlocks: z.array(z.string()), - blockLogs: z.array(z.any()), - decisions: z.object({ - router: z.record(z.string()), - condition: z.record(z.string()), - }), - completedLoops: z.array(z.string()), - loopExecutions: z.record(z.any()).optional(), - parallelExecutions: z.record(z.any()).optional(), - parallelBlockMapping: z.record(z.any()).optional(), - activeExecutionPath: z.array(z.string()), - }), + sourceSnapshot: z + .object({ + blockStates: z.record(z.any()), + executedBlocks: z.array(z.string()), + blockLogs: z.array(z.any()), + decisions: z.object({ + router: z.record(z.string()), + condition: z.record(z.string()), + }), + completedLoops: z.array(z.string()), + loopExecutions: z.record(z.any()).optional(), + parallelExecutions: z.record(z.any()).optional(), + parallelBlockMapping: z.record(z.any()).optional(), + activeExecutionPath: z.array(z.string()), + }) + .optional(), + executionId: z.string().optional(), }) .optional(), }) @@ -269,9 +276,47 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id: base64MaxBytes, workflowStateOverride, stopAfterBlockId, - runFromBlock, + runFromBlock: rawRunFromBlock, } = validation.data + // Resolve runFromBlock snapshot from executionId if needed + let resolvedRunFromBlock: + | { startBlockId: string; sourceSnapshot: SerializableExecutionState } + | undefined + if (rawRunFromBlock) { + if (rawRunFromBlock.sourceSnapshot) { + resolvedRunFromBlock = { + startBlockId: rawRunFromBlock.startBlockId, + sourceSnapshot: rawRunFromBlock.sourceSnapshot as SerializableExecutionState, + } + } else if (rawRunFromBlock.executionId) { + const { getExecutionState, getLatestExecutionState } = await import( + '@/lib/workflows/executor/execution-state' + ) + const snapshot = + rawRunFromBlock.executionId === 'latest' + ? await getLatestExecutionState(workflowId) + : await getExecutionState(rawRunFromBlock.executionId) + if (!snapshot) { + return NextResponse.json( + { + error: `No execution state found for ${rawRunFromBlock.executionId === 'latest' ? 'workflow' : `execution ${rawRunFromBlock.executionId}`}. Run the full workflow first.`, + }, + { status: 400 } + ) + } + resolvedRunFromBlock = { + startBlockId: rawRunFromBlock.startBlockId, + sourceSnapshot: snapshot, + } + } else { + return NextResponse.json( + { error: 'runFromBlock requires either sourceSnapshot or executionId' }, + { status: 400 } + ) + } + } + // For API key and internal JWT auth, the entire body is the input (except for our control fields) // For session auth, the input is explicitly provided in the input field const input = @@ -496,7 +541,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id: includeFileBase64, base64MaxBytes, stopAfterBlockId, - runFromBlock, + runFromBlock: resolvedRunFromBlock, abortSignal: timeoutController.signal, }) @@ -837,7 +882,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id: includeFileBase64, base64MaxBytes, stopAfterBlockId, - runFromBlock, + runFromBlock: resolvedRunFromBlock, }) if (result.status === 'paused') { diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/copilot-message.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/copilot-message.tsx index 1e745f3f2..187ff1594 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/copilot-message.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/copilot-message.tsx @@ -211,7 +211,7 @@ const CopilotMessage: FC = memo( if (block.type === 'text') { const isLastTextBlock = index === message.contentBlocks!.length - 1 && block.type === 'text' - const parsed = parseSpecialTags(block.content) + const parsed = parseSpecialTags(block.content ?? '') // Mask credential IDs in the displayed content const cleanBlockContent = maskCredentialValue( parsed.cleanContent.replace(/\n{3,}/g, '\n\n') @@ -243,7 +243,7 @@ const CopilotMessage: FC = memo( return (
= memo(
) } - if (block.type === 'tool_call') { + if (block.type === 'tool_call' && block.toolCall) { const blockKey = `tool-${block.toolCall.id}` return ( diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx index f6ee0679a..c7f103209 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx @@ -1,20 +1,19 @@ 'use client' import { memo, useEffect, useMemo, useRef, useState } from 'react' +import { createLogger } from '@sim/logger' import clsx from 'clsx' import { ChevronUp, LayoutList } from 'lucide-react' import Editor from 'react-simple-code-editor' import { Button, Code, getCodeEditorProps, highlight, languages } from '@/components/emcn' -import { ClientToolCallState } from '@/lib/copilot/tools/client/base-tool' -import { getClientTool } from '@/lib/copilot/tools/client/manager' -import { getRegisteredTools } from '@/lib/copilot/tools/client/registry' -import '@/lib/copilot/tools/client/init-tool-configs' import { - getSubagentLabels as getSubagentLabelsFromConfig, - getToolUIConfig, - hasInterrupt as hasInterruptFromConfig, - isSpecialTool as isSpecialToolFromConfig, -} from '@/lib/copilot/tools/client/ui-config' + CLIENT_EXECUTABLE_RUN_TOOLS, + executeRunToolOnClient, +} from '@/lib/copilot/client-sse/run-tool-execution' +import { + ClientToolCallState, + TOOL_DISPLAY_REGISTRY, +} from '@/lib/copilot/tools/client/tool-display-registry' import { formatDuration } from '@/lib/core/utils/formatting' import { CopilotMarkdownRenderer } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components/markdown-renderer' import { SmoothStreamingText } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components/smooth-streaming' @@ -25,7 +24,6 @@ import { getDisplayValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/co import { getBlock } from '@/blocks/registry' import type { CopilotToolCall } from '@/stores/panel' import { useCopilotStore } from '@/stores/panel' -import { CLASS_TOOL_METADATA } from '@/stores/panel/copilot/store' import type { SubAgentContentBlock } from '@/stores/panel/copilot/types' import { useWorkflowStore } from '@/stores/workflows/workflow/store' @@ -710,8 +708,8 @@ const ShimmerOverlayText = memo(function ShimmerOverlayText({ * @returns The completion label from UI config, defaults to 'Thought' */ function getSubagentCompletionLabel(toolName: string): string { - const labels = getSubagentLabelsFromConfig(toolName, false) - return labels?.completed ?? 'Thought' + const labels = TOOL_DISPLAY_REGISTRY[toolName]?.uiConfig?.subagentLabels + return labels?.completed || 'Thought' } /** @@ -943,7 +941,7 @@ const SubagentContentRenderer = memo(function SubagentContentRenderer({ * Determines if a tool call should display with special gradient styling. */ function isSpecialToolCall(toolCall: CopilotToolCall): boolean { - return isSpecialToolFromConfig(toolCall.name) + return TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.isSpecial === true } /** @@ -1223,143 +1221,88 @@ const WorkflowEditSummary = memo(function WorkflowEditSummary({ /** Checks if a tool is server-side executed (not a client tool) */ function isIntegrationTool(toolName: string): boolean { - return !CLASS_TOOL_METADATA[toolName] + return !TOOL_DISPLAY_REGISTRY[toolName] } function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean { - if (hasInterruptFromConfig(toolCall.name) && toolCall.state === 'pending') { + if (!toolCall.name || toolCall.name === 'unknown_tool') { + return false + } + + if (toolCall.state !== ClientToolCallState.pending) { + return false + } + + // Never show buttons for tools the user has marked as always-allowed + if (useCopilotStore.getState().isToolAutoAllowed(toolCall.name)) { + return false + } + + const hasInterrupt = !!TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.interrupt + if (hasInterrupt) { return true } - const instance = getClientTool(toolCall.id) - let hasInterrupt = !!instance?.getInterruptDisplays?.() - if (!hasInterrupt) { - try { - const def = getRegisteredTools()[toolCall.name] - if (def) { - hasInterrupt = - typeof def.hasInterrupt === 'function' - ? !!def.hasInterrupt(toolCall.params || {}) - : !!def.hasInterrupt - } - } catch {} - } - - if (hasInterrupt && toolCall.state === 'pending') { - return true - } - - const mode = useCopilotStore.getState().mode - if (mode === 'build' && isIntegrationTool(toolCall.name) && toolCall.state === 'pending') { + // Integration tools (user-installed) always require approval + if (isIntegrationTool(toolCall.name)) { return true } return false } +const toolCallLogger = createLogger('CopilotToolCall') + +async function sendToolDecision( + toolCallId: string, + status: 'accepted' | 'rejected' | 'background' +) { + try { + await fetch('/api/copilot/confirm', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ toolCallId, status }), + }) + } catch (error) { + toolCallLogger.warn('Failed to send tool decision', { + toolCallId, + status, + error: error instanceof Error ? error.message : String(error), + }) + } +} + async function handleRun( toolCall: CopilotToolCall, setToolCallState: any, onStateChange?: any, editedParams?: any ) { - const instance = getClientTool(toolCall.id) + setToolCallState(toolCall, 'executing', editedParams ? { params: editedParams } : undefined) + onStateChange?.('executing') + await sendToolDecision(toolCall.id, 'accepted') - if (!instance && isIntegrationTool(toolCall.name)) { - onStateChange?.('executing') - try { - await useCopilotStore.getState().executeIntegrationTool(toolCall.id) - } catch (e) { - setToolCallState(toolCall, 'error', { error: e instanceof Error ? e.message : String(e) }) - onStateChange?.('error') - try { - await fetch('/api/copilot/tools/mark-complete', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - id: toolCall.id, - name: toolCall.name, - status: 500, - message: e instanceof Error ? e.message : 'Tool execution failed', - data: { error: e instanceof Error ? e.message : String(e) }, - }), - }) - } catch { - console.error('[handleRun] Failed to notify backend of tool error:', toolCall.id) - } - } - return - } - - if (!instance) return - try { - const mergedParams = - editedParams || - (toolCall as any).params || - (toolCall as any).parameters || - (toolCall as any).input || - {} - await instance.handleAccept?.(mergedParams) - onStateChange?.('executing') - } catch (e) { - setToolCallState(toolCall, 'error', { error: e instanceof Error ? e.message : String(e) }) + // Client-executable run tools: execute on the client for real-time feedback + // (block pulsing, console logs, stop button). The server defers execution + // for these tools; the client reports back via mark-complete. + if (CLIENT_EXECUTABLE_RUN_TOOLS.has(toolCall.name)) { + const params = editedParams || toolCall.params || {} + executeRunToolOnClient(toolCall.id, toolCall.name, params) } } async function handleSkip(toolCall: CopilotToolCall, setToolCallState: any, onStateChange?: any) { - const instance = getClientTool(toolCall.id) - - if (!instance && isIntegrationTool(toolCall.name)) { - setToolCallState(toolCall, 'rejected') - onStateChange?.('rejected') - - let notified = false - for (let attempt = 0; attempt < 3 && !notified; attempt++) { - try { - const res = await fetch('/api/copilot/tools/mark-complete', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - id: toolCall.id, - name: toolCall.name, - status: 400, - message: 'Tool execution skipped by user', - data: { skipped: true, reason: 'user_skipped' }, - }), - }) - if (res.ok) { - notified = true - } - } catch (e) { - if (attempt < 2) { - await new Promise((resolve) => setTimeout(resolve, 500)) - } - } - } - - if (!notified) { - console.error('[handleSkip] Failed to notify backend after 3 attempts:', toolCall.id) - } - return - } - - if (instance) { - try { - await instance.handleReject?.() - } catch {} - } setToolCallState(toolCall, 'rejected') onStateChange?.('rejected') + await sendToolDecision(toolCall.id, 'rejected') } function getDisplayName(toolCall: CopilotToolCall): string { const fromStore = (toolCall as any).display?.text if (fromStore) return fromStore - try { - const def = getRegisteredTools()[toolCall.name] as any - const byState = def?.metadata?.displayNames?.[toolCall.state] - if (byState?.text) return byState.text - } catch {} + const registryEntry = TOOL_DISPLAY_REGISTRY[toolCall.name] + const byState = registryEntry?.displayNames?.[toolCall.state as ClientToolCallState] + if (byState?.text) return byState.text const stateVerb = getStateVerb(toolCall.state) const formattedName = formatToolName(toolCall.name) @@ -1431,9 +1374,7 @@ function RunSkipButtons({ setButtonsHidden(true) try { await addAutoAllowedTool(toolCall.name) - if (!isIntegrationTool(toolCall.name)) { - await handleRun(toolCall, setToolCallState, onStateChange, editedParams) - } + await handleRun(toolCall, setToolCallState, onStateChange, editedParams) } finally { setIsProcessing(false) actionInProgressRef.current = false @@ -1507,10 +1448,10 @@ export function ToolCall({ const paramsRef = useRef(params) // Check if this integration tool is auto-allowed - // Subscribe to autoAllowedTools so we re-render when it changes - const autoAllowedTools = useCopilotStore((s) => s.autoAllowedTools) - const { removeAutoAllowedTool } = useCopilotStore() - const isAutoAllowed = isIntegrationTool(toolCall.name) && autoAllowedTools.includes(toolCall.name) + const { removeAutoAllowedTool, setToolCallState } = useCopilotStore() + const isAutoAllowed = useCopilotStore( + (s) => isIntegrationTool(toolCall.name) && s.isToolAutoAllowed(toolCall.name) + ) // Update edited params when toolCall params change (deep comparison to avoid resetting user edits on ref change) useEffect(() => { @@ -1526,34 +1467,12 @@ export function ToolCall({ toolCall.name === 'mark_todo_in_progress' || toolCall.name === 'tool_search_tool_regex' || toolCall.name === 'user_memory' || - toolCall.name === 'edit_respond' || - toolCall.name === 'debug_respond' || - toolCall.name === 'plan_respond' || - toolCall.name === 'research_respond' || - toolCall.name === 'info_respond' || - toolCall.name === 'deploy_respond' || - toolCall.name === 'superagent_respond' + toolCall.name.endsWith('_respond') ) return null // Special rendering for subagent tools - show as thinking text with tool calls at top level - const SUBAGENT_TOOLS = [ - 'plan', - 'edit', - 'debug', - 'test', - 'deploy', - 'evaluate', - 'auth', - 'research', - 'knowledge', - 'custom_tool', - 'tour', - 'info', - 'workflow', - 'superagent', - ] - const isSubagentTool = SUBAGENT_TOOLS.includes(toolCall.name) + const isSubagentTool = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.subagent === true // For ALL subagent tools, don't show anything until we have blocks with content if (isSubagentTool) { @@ -1593,17 +1512,18 @@ export function ToolCall({ stateStr === 'aborted' // Allow rendering if: - // 1. Tool is in CLASS_TOOL_METADATA (client tools), OR + // 1. Tool is in TOOL_DISPLAY_REGISTRY (client tools), OR // 2. We're in build mode (integration tools are executed server-side), OR // 3. Tool call is already completed (historical - should always render) - const isClientTool = !!CLASS_TOOL_METADATA[toolCall.name] + const isClientTool = !!TOOL_DISPLAY_REGISTRY[toolCall.name] const isIntegrationToolInBuildMode = mode === 'build' && !isClientTool if (!isClientTool && !isIntegrationToolInBuildMode && !isCompletedToolCall) { return null } + const toolUIConfig = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig // Check if tool has params table config (meaning it's expandable) - const hasParamsTable = !!getToolUIConfig(toolCall.name)?.paramsTable + const hasParamsTable = !!toolUIConfig?.paramsTable const isRunWorkflow = toolCall.name === 'run_workflow' const isExpandableTool = hasParamsTable || @@ -1613,7 +1533,6 @@ export function ToolCall({ const showButtons = isCurrentMessage && shouldShowRunSkipButtons(toolCall) // Check UI config for secondary action - only show for current message tool calls - const toolUIConfig = getToolUIConfig(toolCall.name) const secondaryAction = toolUIConfig?.secondaryAction const showSecondaryAction = secondaryAction?.showInStates.includes( toolCall.state as ClientToolCallState @@ -2211,16 +2130,9 @@ export function ToolCall({
- {/* Show loading state until fully initialized */} - {!isInitialized ? ( + {/* Show loading state until fully initialized, but skip if actively streaming (resume case) */} + {!isInitialized && !isSendingMessage ? (

Loading copilot

diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/hooks/use-chat-history.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/hooks/use-chat-history.ts index 04f1cb033..0978c8335 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/hooks/use-chat-history.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/hooks/use-chat-history.ts @@ -10,7 +10,6 @@ interface UseChatHistoryProps { activeWorkflowId: string | null copilotWorkflowId: string | null loadChats: (forceRefresh: boolean) => Promise - areChatsFresh: (workflowId: string) => boolean isSendingMessage: boolean } @@ -21,8 +20,7 @@ interface UseChatHistoryProps { * @returns Chat history utilities */ export function useChatHistory(props: UseChatHistoryProps) { - const { chats, activeWorkflowId, copilotWorkflowId, loadChats, areChatsFresh, isSendingMessage } = - props + const { chats, activeWorkflowId, copilotWorkflowId, loadChats, isSendingMessage } = props /** Groups chats by time period (Today, Yesterday, This Week, etc.) */ const groupedChats = useMemo(() => { @@ -80,7 +78,7 @@ export function useChatHistory(props: UseChatHistoryProps) { /** Handles history dropdown opening and loads chats if needed (non-blocking) */ const handleHistoryDropdownOpen = useCallback( (open: boolean) => { - if (open && activeWorkflowId && !isSendingMessage && !areChatsFresh(activeWorkflowId)) { + if (open && activeWorkflowId && !isSendingMessage) { loadChats(false).catch((error) => { logger.error('Failed to load chat history:', error) }) @@ -90,7 +88,7 @@ export function useChatHistory(props: UseChatHistoryProps) { logger.info('Chat history opened during stream - showing cached data only') } }, - [activeWorkflowId, areChatsFresh, isSendingMessage, loadChats] + [activeWorkflowId, isSendingMessage, loadChats] ) return { diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/hooks/use-copilot-initialization.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/hooks/use-copilot-initialization.ts index 48a3ead80..1ffe80216 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/hooks/use-copilot-initialization.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/hooks/use-copilot-initialization.ts @@ -14,6 +14,7 @@ interface UseCopilotInitializationProps { loadAutoAllowedTools: () => Promise currentChat: any isSendingMessage: boolean + resumeActiveStream: () => Promise } /** @@ -32,11 +33,13 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) { loadAutoAllowedTools, currentChat, isSendingMessage, + resumeActiveStream, } = props const [isInitialized, setIsInitialized] = useState(false) const lastWorkflowIdRef = useRef(null) const hasMountedRef = useRef(false) + const hasResumedRef = useRef(false) /** Initialize on mount - loads chats if needed. Never loads during streaming */ useEffect(() => { @@ -105,6 +108,16 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) { isSendingMessage, ]) + /** Try to resume active stream on mount - runs early, before waiting for chats */ + useEffect(() => { + if (hasResumedRef.current || isSendingMessage) return + hasResumedRef.current = true + // Resume immediately on mount - don't wait for isInitialized + resumeActiveStream().catch((err) => { + logger.warn('[Copilot] Failed to resume active stream', err) + }) + }, [isSendingMessage, resumeActiveStream]) + /** Load auto-allowed tools once on mount - runs immediately, independent of workflow */ const hasLoadedAutoAllowedToolsRef = useRef(false) useEffect(() => { diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/tool-input/tool-input.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/tool-input/tool-input.tsx index 8f03f4b2e..9990c3eeb 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/tool-input/tool-input.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/tool-input/tool-input.tsx @@ -62,7 +62,12 @@ import { type CustomTool as CustomToolDefinition, useCustomTools, } from '@/hooks/queries/custom-tools' -import { useForceRefreshMcpTools, useMcpServers, useStoredMcpTools } from '@/hooks/queries/mcp' +import { + useForceRefreshMcpTools, + useMcpServers, + useMcpToolsEvents, + useStoredMcpTools, +} from '@/hooks/queries/mcp' import { useChildDeploymentStatus, useDeployChildWorkflow, @@ -1035,6 +1040,7 @@ export const ToolInput = memo(function ToolInput({ const { data: mcpServers = [], isLoading: mcpServersLoading } = useMcpServers(workspaceId) const { data: storedMcpTools = [] } = useStoredMcpTools(workspaceId) const forceRefreshMcpTools = useForceRefreshMcpTools() + useMcpToolsEvents(workspaceId) const openSettingsModal = useSettingsModalStore((state) => state.openModal) const mcpDataLoading = mcpLoading || mcpServersLoading diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.ts index 0d0597f9a..03eb068b2 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.ts @@ -11,6 +11,12 @@ export interface WorkflowExecutionOptions { executionId?: string onBlockComplete?: (blockId: string, output: any) => Promise overrideTriggerType?: 'chat' | 'manual' | 'api' + stopAfterBlockId?: string + /** For run_from_block / run_block: start from a specific block using cached state */ + runFromBlock?: { + startBlockId: string + executionId?: string + } } /** @@ -39,6 +45,15 @@ export async function executeWorkflowWithFullLogging( triggerType: options.overrideTriggerType || 'manual', useDraftState: true, isClientSession: true, + ...(options.stopAfterBlockId ? { stopAfterBlockId: options.stopAfterBlockId } : {}), + ...(options.runFromBlock + ? { + runFromBlock: { + startBlockId: options.runFromBlock.startBlockId, + executionId: options.runFromBlock.executionId || 'latest', + }, + } + : {}), } const response = await fetch(`/api/workflows/${activeWorkflowId}/execute`, { diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/workflow.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/workflow.tsx index 82d05a587..11e3942e7 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/workflow.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/workflow.tsx @@ -18,7 +18,7 @@ import 'reactflow/dist/style.css' import { createLogger } from '@sim/logger' import { useShallow } from 'zustand/react/shallow' import { useSession } from '@/lib/auth/auth-client' -import type { OAuthConnectEventDetail } from '@/lib/copilot/tools/client/other/oauth-request-access' +import type { OAuthConnectEventDetail } from '@/lib/copilot/tools/client/base-tool' import type { OAuthProvider } from '@/lib/oauth' import { BLOCK_DIMENSIONS, CONTAINER_DIMENSIONS } from '@/lib/workflows/blocks/block-dimensions' import { TriggerUtils } from '@/lib/workflows/triggers/triggers' diff --git a/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/mcp/mcp.tsx b/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/mcp/mcp.tsx index d25865a74..4295dd59d 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/mcp/mcp.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/mcp/mcp.tsx @@ -894,14 +894,14 @@ export function MCP({ initialServerId }: MCPProps) { disabled={!hasParams} >
-
-

+

+

{tool.name}

{issues.length > 0 && ( -
+
'Task', + }, + { + id: 'parentIssue', + title: 'Parent Issue Key', + type: 'short-input', + placeholder: 'Parent issue key for subtasks (e.g., PROJ-123)', + dependsOn: ['projectId'], + condition: { field: 'operation', value: 'write' }, + }, + // Write/Update Issue additional fields { id: 'assignee', title: 'Assignee Account ID', type: 'short-input', placeholder: 'Assignee account ID (e.g., 5b109f2e9729b51b54dc274d)', dependsOn: ['projectId'], - condition: { field: 'operation', value: 'write' }, + condition: { field: 'operation', value: ['write', 'update'] }, }, { id: 'priority', @@ -284,7 +302,7 @@ Return ONLY the description text - no explanations.`, type: 'short-input', placeholder: 'Priority ID or name (e.g., "10000" or "High")', dependsOn: ['projectId'], - condition: { field: 'operation', value: 'write' }, + condition: { field: 'operation', value: ['write', 'update'] }, }, { id: 'labels', @@ -292,7 +310,7 @@ Return ONLY the description text - no explanations.`, type: 'short-input', placeholder: 'Comma-separated labels (e.g., bug, urgent)', dependsOn: ['projectId'], - condition: { field: 'operation', value: 'write' }, + condition: { field: 'operation', value: ['write', 'update'] }, }, { id: 'duedate', @@ -300,7 +318,7 @@ Return ONLY the description text - no explanations.`, type: 'short-input', placeholder: 'YYYY-MM-DD (e.g., 2024-12-31)', dependsOn: ['projectId'], - condition: { field: 'operation', value: 'write' }, + condition: { field: 'operation', value: ['write', 'update'] }, wandConfig: { enabled: true, prompt: `Generate a date in YYYY-MM-DD format based on the user's description. @@ -329,7 +347,7 @@ Return ONLY the date string in YYYY-MM-DD format - no explanations, no quotes, n type: 'long-input', placeholder: 'Environment information (e.g., Production, Staging)', dependsOn: ['projectId'], - condition: { field: 'operation', value: 'write' }, + condition: { field: 'operation', value: ['write', 'update'] }, }, { id: 'customFieldId', @@ -337,7 +355,7 @@ Return ONLY the date string in YYYY-MM-DD format - no explanations, no quotes, n type: 'short-input', placeholder: 'e.g., customfield_10001 or 10001', dependsOn: ['projectId'], - condition: { field: 'operation', value: 'write' }, + condition: { field: 'operation', value: ['write', 'update'] }, }, { id: 'customFieldValue', @@ -345,7 +363,34 @@ Return ONLY the date string in YYYY-MM-DD format - no explanations, no quotes, n type: 'short-input', placeholder: 'Value for the custom field', dependsOn: ['projectId'], - condition: { field: 'operation', value: 'write' }, + condition: { field: 'operation', value: ['write', 'update'] }, + }, + { + id: 'components', + title: 'Components', + type: 'short-input', + placeholder: 'Comma-separated component names', + dependsOn: ['projectId'], + condition: { field: 'operation', value: ['write', 'update'] }, + }, + { + id: 'fixVersions', + title: 'Fix Versions', + type: 'short-input', + placeholder: 'Comma-separated fix version names', + dependsOn: ['projectId'], + condition: { field: 'operation', value: ['write', 'update'] }, + }, + { + id: 'notifyUsers', + title: 'Notify Users', + type: 'dropdown', + options: [ + { label: 'Yes', id: 'true' }, + { label: 'No', id: 'false' }, + ], + value: () => 'true', + condition: { field: 'operation', value: 'update' }, }, // Delete Issue fields { @@ -395,6 +440,13 @@ Return ONLY the comment text - no explanations.`, placeholder: 'Describe the transition reason (e.g., "fixed bug", "ready for QA review")...', }, }, + { + id: 'resolution', + title: 'Resolution', + type: 'short-input', + placeholder: 'Resolution name (e.g., "Fixed", "Won\'t Fix")', + condition: { field: 'operation', value: 'transition' }, + }, // Search Issues fields { id: 'jql', @@ -420,6 +472,20 @@ Return ONLY the JQL query - no explanations or markdown formatting.`, generationType: 'sql-query', }, }, + { + id: 'nextPageToken', + title: 'Next Page Token', + type: 'short-input', + placeholder: 'Cursor token for next page (omit for first page)', + condition: { field: 'operation', value: 'search' }, + }, + { + id: 'startAt', + title: 'Start At', + type: 'short-input', + placeholder: 'Pagination start index (default: 0)', + condition: { field: 'operation', value: ['get_comments', 'get_worklogs'] }, + }, { id: 'maxResults', title: 'Max Results', @@ -756,7 +822,9 @@ Return ONLY the comment text - no explanations.`, assignee: params.assignee || undefined, priority: params.priority || undefined, labels: parseCommaSeparated(params.labels), + components: parseCommaSeparated(params.components), duedate: params.duedate || undefined, + fixVersions: parseCommaSeparated(params.fixVersions), reporter: params.reporter || undefined, environment: params.environment || undefined, customFieldId: params.customFieldId || undefined, @@ -768,11 +836,29 @@ Return ONLY the comment text - no explanations.`, } } case 'update': { + const parseCommaSeparated = (value: string | undefined): string[] | undefined => { + if (!value || value.trim() === '') return undefined + return value + .split(',') + .map((item) => item.trim()) + .filter((item) => item !== '') + } + const updateParams = { projectId: effectiveProjectId, issueKey: effectiveIssueKey, - summary: params.summary || '', - description: params.description || '', + summary: params.summary || undefined, + description: params.description || undefined, + assignee: params.assignee || undefined, + priority: params.priority || undefined, + labels: parseCommaSeparated(params.labels), + components: parseCommaSeparated(params.components), + duedate: params.duedate || undefined, + fixVersions: parseCommaSeparated(params.fixVersions), + environment: params.environment || undefined, + customFieldId: params.customFieldId || undefined, + customFieldValue: params.customFieldValue || undefined, + notifyUsers: params.notifyUsers === 'false' ? false : undefined, } return { ...baseParams, @@ -813,12 +899,14 @@ Return ONLY the comment text - no explanations.`, issueKey: effectiveIssueKey, transitionId: params.transitionId, comment: params.transitionComment, + resolution: params.resolution || undefined, } } case 'search': { return { ...baseParams, jql: params.jql, + nextPageToken: params.nextPageToken || undefined, maxResults: params.maxResults ? Number.parseInt(params.maxResults) : undefined, } } @@ -833,6 +921,7 @@ Return ONLY the comment text - no explanations.`, return { ...baseParams, issueKey: effectiveIssueKey, + startAt: params.startAt ? Number.parseInt(params.startAt) : undefined, maxResults: params.maxResults ? Number.parseInt(params.maxResults) : undefined, } } @@ -889,6 +978,7 @@ Return ONLY the comment text - no explanations.`, return { ...baseParams, issueKey: effectiveIssueKey, + startAt: params.startAt ? Number.parseInt(params.startAt) : undefined, maxResults: params.maxResults ? Number.parseInt(params.maxResults) : undefined, } } @@ -966,15 +1056,19 @@ Return ONLY the comment text - no explanations.`, summary: { type: 'string', description: 'Issue summary' }, description: { type: 'string', description: 'Issue description' }, issueType: { type: 'string', description: 'Issue type' }, - // Write operation additional inputs + // Write/Update operation additional inputs + parentIssue: { type: 'string', description: 'Parent issue key for subtasks' }, assignee: { type: 'string', description: 'Assignee account ID' }, priority: { type: 'string', description: 'Priority ID or name' }, labels: { type: 'string', description: 'Comma-separated labels for the issue' }, + components: { type: 'string', description: 'Comma-separated component names' }, duedate: { type: 'string', description: 'Due date in YYYY-MM-DD format' }, + fixVersions: { type: 'string', description: 'Comma-separated fix version names' }, reporter: { type: 'string', description: 'Reporter account ID' }, environment: { type: 'string', description: 'Environment information' }, customFieldId: { type: 'string', description: 'Custom field ID (e.g., customfield_10001)' }, customFieldValue: { type: 'string', description: 'Value for the custom field' }, + notifyUsers: { type: 'string', description: 'Whether to send notifications on update' }, // Delete operation inputs deleteSubtasks: { type: 'string', description: 'Whether to delete subtasks (true/false)' }, // Assign/Watcher operation inputs @@ -985,7 +1079,13 @@ Return ONLY the comment text - no explanations.`, // Transition operation inputs transitionId: { type: 'string', description: 'Transition ID for workflow status changes' }, transitionComment: { type: 'string', description: 'Optional comment for transition' }, + resolution: { type: 'string', description: 'Resolution name for transition (e.g., "Fixed")' }, // Search operation inputs + nextPageToken: { + type: 'string', + description: 'Cursor token for the next page of search results', + }, + startAt: { type: 'string', description: 'Pagination start index' }, jql: { type: 'string', description: 'JQL (Jira Query Language) search query' }, maxResults: { type: 'string', description: 'Maximum number of results to return' }, // Comment operation inputs @@ -1038,8 +1138,11 @@ Return ONLY the comment text - no explanations.`, id: { type: 'string', description: 'Jira issue ID' }, key: { type: 'string', description: 'Jira issue key' }, - // jira_search_issues outputs + // jira_search_issues / jira_bulk_read outputs total: { type: 'number', description: 'Total number of matching issues' }, + nextPageToken: { type: 'string', description: 'Cursor token for the next page of results' }, + isLast: { type: 'boolean', description: 'Whether this is the last page of results' }, + // Shared pagination outputs (get_comments, get_worklogs, get_users) startAt: { type: 'number', description: 'Pagination start index' }, maxResults: { type: 'number', description: 'Maximum results per page' }, issues: { diff --git a/apps/sim/blocks/blocks/jira_service_management.ts b/apps/sim/blocks/blocks/jira_service_management.ts index 95679eac6..86ac86e75 100644 --- a/apps/sim/blocks/blocks/jira_service_management.ts +++ b/apps/sim/blocks/blocks/jira_service_management.ts @@ -40,6 +40,7 @@ export const JiraServiceManagementBlock: BlockConfig = { { label: 'Add Participants', id: 'add_participants' }, { label: 'Get Approvals', id: 'get_approvals' }, { label: 'Answer Approval', id: 'answer_approval' }, + { label: 'Get Request Type Fields', id: 'get_request_type_fields' }, ], value: () => 'get_service_desks', }, @@ -109,6 +110,8 @@ export const JiraServiceManagementBlock: BlockConfig = { 'get_organizations', 'add_organization', 'get_queues', + 'get_requests', + 'get_request_type_fields', ], }, }, @@ -118,7 +121,7 @@ export const JiraServiceManagementBlock: BlockConfig = { type: 'short-input', required: true, placeholder: 'Enter request type ID', - condition: { field: 'operation', value: 'create_request' }, + condition: { field: 'operation', value: ['create_request', 'get_request_type_fields'] }, }, { id: 'issueIdOrKey', @@ -188,6 +191,51 @@ Return ONLY the description text - no explanations.`, placeholder: 'Account ID to raise request on behalf of', condition: { field: 'operation', value: 'create_request' }, }, + { + id: 'requestParticipants', + title: 'Request Participants', + type: 'short-input', + placeholder: 'Comma-separated account IDs to add as participants', + condition: { field: 'operation', value: 'create_request' }, + }, + { + id: 'channel', + title: 'Channel', + type: 'short-input', + placeholder: 'Channel (e.g., portal, email)', + condition: { field: 'operation', value: 'create_request' }, + }, + { + id: 'requestFieldValues', + title: 'Custom Field Values', + type: 'long-input', + placeholder: 'JSON object of custom field values (e.g., {"customfield_10010": "value"})', + condition: { field: 'operation', value: 'create_request' }, + }, + { + id: 'searchQuery', + title: 'Search Query', + type: 'short-input', + placeholder: 'Filter request types by name', + condition: { field: 'operation', value: 'get_request_types' }, + }, + { + id: 'groupId', + title: 'Group ID', + type: 'short-input', + placeholder: 'Filter by request type group', + condition: { field: 'operation', value: 'get_request_types' }, + }, + { + id: 'expand', + title: 'Expand', + type: 'short-input', + placeholder: 'Comma-separated fields to expand', + condition: { + field: 'operation', + value: ['get_request', 'get_requests', 'get_comments'], + }, + }, { id: 'commentBody', title: 'Comment', @@ -220,11 +268,11 @@ Return ONLY the comment text - no explanations.`, condition: { field: 'operation', value: 'add_comment' }, }, { - id: 'emails', - title: 'Email Addresses', + id: 'accountIds', + title: 'Account IDs', type: 'short-input', required: true, - placeholder: 'Comma-separated email addresses', + placeholder: 'Comma-separated Atlassian account IDs', condition: { field: 'operation', value: 'add_customer' }, }, { @@ -269,7 +317,7 @@ Return ONLY the comment text - no explanations.`, { label: 'All Requests', id: 'ALL_REQUESTS' }, { label: 'My Requests', id: 'OWNED_REQUESTS' }, { label: 'Participated', id: 'PARTICIPATED_REQUESTS' }, - { label: 'Organization', id: 'ORGANIZATION' }, + { label: 'Approver', id: 'APPROVER' }, ], value: () => 'ALL_REQUESTS', condition: { field: 'operation', value: 'get_requests' }, @@ -279,11 +327,11 @@ Return ONLY the comment text - no explanations.`, title: 'Request Status', type: 'dropdown', options: [ - { label: 'All', id: 'ALL' }, - { label: 'Open', id: 'OPEN' }, - { label: 'Closed', id: 'CLOSED' }, + { label: 'All', id: 'ALL_REQUESTS' }, + { label: 'Open', id: 'OPEN_REQUESTS' }, + { label: 'Closed', id: 'CLOSED_REQUESTS' }, ], - value: () => 'ALL', + value: () => 'ALL_REQUESTS', condition: { field: 'operation', value: 'get_requests' }, }, { @@ -363,6 +411,9 @@ Return ONLY the comment text - no explanations.`, 'get_organizations', 'get_queues', 'get_sla', + 'get_transitions', + 'get_participants', + 'get_approvals', ], }, }, @@ -389,6 +440,7 @@ Return ONLY the comment text - no explanations.`, 'jsm_add_participants', 'jsm_get_approvals', 'jsm_answer_approval', + 'jsm_get_request_type_fields', ], config: { tool: (params) => { @@ -433,6 +485,8 @@ Return ONLY the comment text - no explanations.`, return 'jsm_get_approvals' case 'answer_approval': return 'jsm_answer_approval' + case 'get_request_type_fields': + return 'jsm_get_request_type_fields' default: return 'jsm_get_service_desks' } @@ -456,6 +510,8 @@ Return ONLY the comment text - no explanations.`, return { ...baseParams, serviceDeskId: params.serviceDeskId, + searchQuery: params.searchQuery, + groupId: params.groupId, limit: params.maxResults ? Number.parseInt(params.maxResults) : undefined, } case 'create_request': @@ -475,6 +531,11 @@ Return ONLY the comment text - no explanations.`, summary: params.summary, description: params.description, raiseOnBehalfOf: params.raiseOnBehalfOf, + requestParticipants: params.requestParticipants, + channel: params.channel, + requestFieldValues: params.requestFieldValues + ? JSON.parse(params.requestFieldValues) + : undefined, } case 'get_request': if (!params.issueIdOrKey) { @@ -483,6 +544,7 @@ Return ONLY the comment text - no explanations.`, return { ...baseParams, issueIdOrKey: params.issueIdOrKey, + expand: params.expand, } case 'get_requests': return { @@ -491,6 +553,7 @@ Return ONLY the comment text - no explanations.`, requestOwnership: params.requestOwnership, requestStatus: params.requestStatus, searchTerm: params.searchTerm, + expand: params.expand, limit: params.maxResults ? Number.parseInt(params.maxResults) : undefined, } case 'add_comment': @@ -513,6 +576,7 @@ Return ONLY the comment text - no explanations.`, return { ...baseParams, issueIdOrKey: params.issueIdOrKey, + expand: params.expand, limit: params.maxResults ? Number.parseInt(params.maxResults) : undefined, } case 'get_customers': @@ -529,26 +593,14 @@ Return ONLY the comment text - no explanations.`, if (!params.serviceDeskId) { throw new Error('Service Desk ID is required') } - const accountIds = params.accountIds - ? params.accountIds - .split(',') - .map((id: string) => id.trim()) - .filter((id: string) => id) - : undefined - const emails = params.emails - ? params.emails - .split(',') - .map((email: string) => email.trim()) - .filter((email: string) => email) - : undefined - if ((!accountIds || accountIds.length === 0) && (!emails || emails.length === 0)) { - throw new Error('At least one account ID or email is required') + if (!params.accountIds && !params.emails) { + throw new Error('Account IDs or emails are required') } return { ...baseParams, serviceDeskId: params.serviceDeskId, - accountIds, - emails, + accountIds: params.accountIds, + emails: params.emails, } } case 'get_organizations': @@ -586,6 +638,7 @@ Return ONLY the comment text - no explanations.`, return { ...baseParams, issueIdOrKey: params.issueIdOrKey, + limit: params.maxResults ? Number.parseInt(params.maxResults) : undefined, } case 'transition_request': if (!params.issueIdOrKey) { @@ -666,6 +719,18 @@ Return ONLY the comment text - no explanations.`, approvalId: params.approvalId, decision: params.approvalDecision, } + case 'get_request_type_fields': + if (!params.serviceDeskId) { + throw new Error('Service Desk ID is required') + } + if (!params.requestTypeId) { + throw new Error('Request Type ID is required') + } + return { + ...baseParams, + serviceDeskId: params.serviceDeskId, + requestTypeId: params.requestTypeId, + } default: return baseParams } @@ -684,8 +749,11 @@ Return ONLY the comment text - no explanations.`, raiseOnBehalfOf: { type: 'string', description: 'Account ID to raise request on behalf of' }, commentBody: { type: 'string', description: 'Comment text' }, isPublic: { type: 'string', description: 'Whether comment is public or internal' }, - accountIds: { type: 'string', description: 'Comma-separated account IDs' }, - emails: { type: 'string', description: 'Comma-separated email addresses' }, + accountIds: { type: 'string', description: 'Comma-separated Atlassian account IDs' }, + emails: { + type: 'string', + description: 'Comma-separated email addresses', + }, customerQuery: { type: 'string', description: 'Customer search query' }, transitionId: { type: 'string', description: 'Transition ID' }, transitionComment: { type: 'string', description: 'Transition comment' }, @@ -702,6 +770,15 @@ Return ONLY the comment text - no explanations.`, }, approvalId: { type: 'string', description: 'Approval ID' }, approvalDecision: { type: 'string', description: 'Approval decision (approve/decline)' }, + requestParticipants: { + type: 'string', + description: 'Comma-separated account IDs for request participants', + }, + channel: { type: 'string', description: 'Channel (e.g., portal, email)' }, + requestFieldValues: { type: 'string', description: 'JSON object of custom field values' }, + searchQuery: { type: 'string', description: 'Filter request types by name' }, + groupId: { type: 'string', description: 'Filter by request type group ID' }, + expand: { type: 'string', description: 'Comma-separated fields to expand' }, }, outputs: { ts: { type: 'string', description: 'Timestamp of the operation' }, @@ -727,9 +804,19 @@ Return ONLY the comment text - no explanations.`, transitionId: { type: 'string', description: 'Applied transition ID' }, participants: { type: 'json', description: 'Array of participants' }, approvals: { type: 'json', description: 'Array of approvals' }, + approval: { type: 'json', description: 'Approval object' }, approvalId: { type: 'string', description: 'Approval ID' }, decision: { type: 'string', description: 'Approval decision' }, total: { type: 'number', description: 'Total count' }, isLastPage: { type: 'boolean', description: 'Whether this is the last page' }, + requestTypeFields: { type: 'json', description: 'Array of request type fields' }, + canAddRequestParticipants: { + type: 'boolean', + description: 'Whether participants can be added to this request type', + }, + canRaiseOnBehalfOf: { + type: 'boolean', + description: 'Whether requests can be raised on behalf of another user', + }, }, } diff --git a/apps/sim/blocks/blocks/onepassword.ts b/apps/sim/blocks/blocks/onepassword.ts new file mode 100644 index 000000000..7407c7f92 --- /dev/null +++ b/apps/sim/blocks/blocks/onepassword.ts @@ -0,0 +1,268 @@ +import { OnePasswordIcon } from '@/components/icons' +import { AuthMode, type BlockConfig } from '@/blocks/types' + +export const OnePasswordBlock: BlockConfig = { + type: 'onepassword', + name: '1Password', + description: 'Manage secrets and items in 1Password vaults', + longDescription: + 'Access and manage secrets stored in 1Password vaults using the Connect API or Service Account SDK. List vaults, retrieve items with their fields and secrets, create new items, update existing ones, delete items, and resolve secret references.', + docsLink: 'https://docs.sim.ai/tools/onepassword', + category: 'tools', + bgColor: '#E0E0E0', + icon: OnePasswordIcon, + authMode: AuthMode.ApiKey, + + subBlocks: [ + { + id: 'operation', + title: 'Operation', + type: 'dropdown', + options: [ + { label: 'List Vaults', id: 'list_vaults' }, + { label: 'Get Vault', id: 'get_vault' }, + { label: 'List Items', id: 'list_items' }, + { label: 'Get Item', id: 'get_item' }, + { label: 'Create Item', id: 'create_item' }, + { label: 'Replace Item', id: 'replace_item' }, + { label: 'Update Item', id: 'update_item' }, + { label: 'Delete Item', id: 'delete_item' }, + { label: 'Resolve Secret', id: 'resolve_secret' }, + ], + value: () => 'get_item', + }, + { + id: 'connectionMode', + title: 'Connection Mode', + type: 'dropdown', + options: [ + { label: 'Service Account', id: 'service_account' }, + { label: 'Connect Server', id: 'connect' }, + ], + value: () => 'service_account', + }, + { + id: 'serviceAccountToken', + title: 'Service Account Token', + type: 'short-input', + placeholder: 'Enter your 1Password Service Account token', + password: true, + required: { field: 'connectionMode', value: 'service_account' }, + condition: { field: 'connectionMode', value: 'service_account' }, + }, + { + id: 'serverUrl', + title: 'Server URL', + type: 'short-input', + placeholder: 'http://localhost:8080', + required: { field: 'connectionMode', value: 'connect' }, + condition: { field: 'connectionMode', value: 'connect' }, + }, + { + id: 'apiKey', + title: 'Connect Token', + type: 'short-input', + placeholder: 'Enter your 1Password Connect token', + password: true, + required: { field: 'connectionMode', value: 'connect' }, + condition: { field: 'connectionMode', value: 'connect' }, + }, + { + id: 'secretReference', + title: 'Secret Reference', + type: 'short-input', + placeholder: 'op://vault-name-or-id/item-name-or-id/field-name', + required: { field: 'operation', value: 'resolve_secret' }, + condition: { field: 'operation', value: 'resolve_secret' }, + wandConfig: { + enabled: true, + prompt: `Generate a 1Password secret reference URI based on the user's description. +The format is: op://vault-name-or-id/item-name-or-id/field-name +You can also use: op://vault/item/section/field for fields inside sections. +Examples: +- op://Development/AWS/access-key +- op://Production/Database/password +- op://MyVault/Stripe/API Keys/secret-key + +Return ONLY the op:// URI - no explanations, no quotes, no markdown.`, + }, + }, + { + id: 'vaultId', + title: 'Vault ID', + type: 'short-input', + placeholder: 'Enter vault UUID', + password: true, + required: { + field: 'operation', + value: [ + 'get_vault', + 'list_items', + 'get_item', + 'create_item', + 'replace_item', + 'update_item', + 'delete_item', + ], + }, + condition: { + field: 'operation', + value: ['list_vaults', 'resolve_secret'], + not: true, + }, + }, + { + id: 'itemId', + title: 'Item ID', + type: 'short-input', + placeholder: 'Enter item UUID', + required: { + field: 'operation', + value: ['get_item', 'replace_item', 'update_item', 'delete_item'], + }, + condition: { + field: 'operation', + value: ['get_item', 'replace_item', 'update_item', 'delete_item'], + }, + }, + { + id: 'filter', + title: 'Filter', + type: 'short-input', + placeholder: 'SCIM filter (e.g., name eq "My Vault")', + condition: { field: 'operation', value: ['list_vaults', 'list_items'] }, + }, + { + id: 'category', + title: 'Category', + type: 'dropdown', + options: [ + { label: 'Login', id: 'LOGIN' }, + { label: 'Password', id: 'PASSWORD' }, + { label: 'API Credential', id: 'API_CREDENTIAL' }, + { label: 'Secure Note', id: 'SECURE_NOTE' }, + { label: 'Server', id: 'SERVER' }, + { label: 'Database', id: 'DATABASE' }, + { label: 'Credit Card', id: 'CREDIT_CARD' }, + { label: 'Identity', id: 'IDENTITY' }, + { label: 'SSH Key', id: 'SSH_KEY' }, + ], + value: () => 'LOGIN', + required: { field: 'operation', value: 'create_item' }, + condition: { field: 'operation', value: 'create_item' }, + }, + { + id: 'title', + title: 'Title', + type: 'short-input', + placeholder: 'Item title', + condition: { field: 'operation', value: 'create_item' }, + }, + { + id: 'tags', + title: 'Tags', + type: 'short-input', + placeholder: 'Comma-separated tags (e.g., production, api)', + condition: { field: 'operation', value: 'create_item' }, + }, + { + id: 'fields', + title: 'Fields', + type: 'code', + placeholder: + '[\n {\n "label": "username",\n "value": "admin",\n "type": "STRING",\n "purpose": "USERNAME"\n }\n]', + condition: { field: 'operation', value: 'create_item' }, + wandConfig: { + enabled: true, + prompt: `Generate a 1Password item fields JSON array based on the user's description. +Each field object can have: label, value, type (STRING, CONCEALED, EMAIL, URL, TOTP, DATE), purpose (USERNAME, PASSWORD, NOTES, or empty). +Examples: +- [{"label":"username","value":"admin","type":"STRING","purpose":"USERNAME"},{"label":"password","value":"secret123","type":"CONCEALED","purpose":"PASSWORD"}] +- [{"label":"API Key","value":"sk-abc123","type":"CONCEALED"}] + +Return ONLY valid JSON - no explanations, no markdown code blocks.`, + }, + }, + { + id: 'item', + title: 'Item (JSON)', + type: 'code', + placeholder: + '{\n "vault": {"id": "..."},\n "category": "LOGIN",\n "title": "My Item",\n "fields": []\n}', + required: { field: 'operation', value: 'replace_item' }, + condition: { field: 'operation', value: 'replace_item' }, + wandConfig: { + enabled: true, + prompt: `Generate a full 1Password item JSON object based on the user's description. +The object must include vault.id, category, and optionally title, tags, fields, and sections. +Categories: LOGIN, PASSWORD, API_CREDENTIAL, SECURE_NOTE, SERVER, DATABASE, CREDIT_CARD, IDENTITY, SSH_KEY. +Field types: STRING, CONCEALED, EMAIL, URL, TOTP, DATE. Purposes: USERNAME, PASSWORD, NOTES, or empty. +Example: {"vault":{"id":"abc123"},"category":"LOGIN","title":"My Login","fields":[{"label":"username","value":"admin","type":"STRING","purpose":"USERNAME"}]} + +Return ONLY valid JSON - no explanations, no markdown code blocks.`, + }, + }, + { + id: 'operations', + title: 'Patch Operations (JSON)', + type: 'code', + placeholder: + '[\n {\n "op": "replace",\n "path": "/title",\n "value": "New Title"\n }\n]', + required: { field: 'operation', value: 'update_item' }, + condition: { field: 'operation', value: 'update_item' }, + wandConfig: { + enabled: true, + prompt: `Generate a JSON array of RFC6902 patch operations for a 1Password item based on the user's description. +Each operation has: op (add, remove, replace), path (JSON pointer), and value. +Examples: +- [{"op":"replace","path":"/title","value":"New Title"}] +- [{"op":"replace","path":"/fields/username/value","value":"newuser"}] +- [{"op":"add","path":"/tags/-","value":"production"}] + +Return ONLY valid JSON - no explanations, no markdown code blocks.`, + }, + }, + ], + + tools: { + access: [ + 'onepassword_list_vaults', + 'onepassword_get_vault', + 'onepassword_list_items', + 'onepassword_get_item', + 'onepassword_create_item', + 'onepassword_replace_item', + 'onepassword_update_item', + 'onepassword_delete_item', + 'onepassword_resolve_secret', + ], + config: { + tool: (params) => `onepassword_${params.operation}`, + }, + }, + + inputs: { + operation: { type: 'string', description: 'Operation to perform' }, + connectionMode: { type: 'string', description: 'Connection mode: service_account or connect' }, + serviceAccountToken: { type: 'string', description: '1Password Service Account token' }, + serverUrl: { type: 'string', description: '1Password Connect server URL' }, + apiKey: { type: 'string', description: '1Password Connect token' }, + secretReference: { type: 'string', description: 'Secret reference URI (op://...)' }, + vaultId: { type: 'string', description: 'Vault UUID' }, + itemId: { type: 'string', description: 'Item UUID' }, + filter: { type: 'string', description: 'SCIM filter expression' }, + category: { type: 'string', description: 'Item category' }, + title: { type: 'string', description: 'Item title' }, + tags: { type: 'string', description: 'Comma-separated tags' }, + fields: { type: 'string', description: 'JSON array of field objects' }, + item: { type: 'string', description: 'Full item JSON for replacement' }, + operations: { type: 'string', description: 'JSON array of patch operations' }, + }, + + outputs: { + response: { + type: 'json', + description: 'Operation response data', + }, + }, +} diff --git a/apps/sim/blocks/registry.ts b/apps/sim/blocks/registry.ts index 4d59cd866..301b7b350 100644 --- a/apps/sim/blocks/registry.ts +++ b/apps/sim/blocks/registry.ts @@ -91,6 +91,7 @@ import { Neo4jBlock } from '@/blocks/blocks/neo4j' import { NoteBlock } from '@/blocks/blocks/note' import { NotionBlock, NotionV2Block } from '@/blocks/blocks/notion' import { OneDriveBlock } from '@/blocks/blocks/onedrive' +import { OnePasswordBlock } from '@/blocks/blocks/onepassword' import { OpenAIBlock } from '@/blocks/blocks/openai' import { OutlookBlock } from '@/blocks/blocks/outlook' import { ParallelBlock } from '@/blocks/blocks/parallel' @@ -268,6 +269,7 @@ export const registry: Record = { note: NoteBlock, notion: NotionBlock, notion_v2: NotionV2Block, + onepassword: OnePasswordBlock, onedrive: OneDriveBlock, openai: OpenAIBlock, outlook: OutlookBlock, diff --git a/apps/sim/components/icons.tsx b/apps/sim/components/icons.tsx index d62410d7f..f13fc8aa8 100644 --- a/apps/sim/components/icons.tsx +++ b/apps/sim/components/icons.tsx @@ -5483,3 +5483,37 @@ export function AgentSkillsIcon(props: SVGProps) { ) } + +export function OnePasswordIcon(props: SVGProps) { + return ( + + + + + + + ) +} diff --git a/apps/sim/components/ui/tool-call.tsx b/apps/sim/components/ui/tool-call.tsx index 0d7d2ece2..bc523894f 100644 --- a/apps/sim/components/ui/tool-call.tsx +++ b/apps/sim/components/ui/tool-call.tsx @@ -5,10 +5,43 @@ import { CheckCircle, ChevronDown, ChevronRight, Loader2, Settings, XCircle } fr import { Badge } from '@/components/emcn' import { Button } from '@/components/ui/button' import { Collapsible, CollapsibleContent, CollapsibleTrigger } from '@/components/ui/collapsible' -import type { ToolCallGroup, ToolCallState } from '@/lib/copilot/types' import { cn } from '@/lib/core/utils/cn' import { formatDuration } from '@/lib/core/utils/formatting' +interface ToolCallState { + id: string + name: string + displayName?: string + parameters?: Record + state: + | 'detecting' + | 'pending' + | 'executing' + | 'completed' + | 'error' + | 'rejected' + | 'applied' + | 'ready_for_review' + | 'aborted' + | 'skipped' + | 'background' + startTime?: number + endTime?: number + duration?: number + result?: unknown + error?: string + progress?: string +} + +interface ToolCallGroup { + id: string + toolCalls: ToolCallState[] + status: 'pending' | 'in_progress' | 'completed' | 'error' + startTime?: number + endTime?: number + summary?: string +} + interface ToolCallProps { toolCall: ToolCallState isCompact?: boolean diff --git a/apps/sim/executor/execution/engine.ts b/apps/sim/executor/execution/engine.ts index 47afd8b03..2f4791252 100644 --- a/apps/sim/executor/execution/engine.ts +++ b/apps/sim/executor/execution/engine.ts @@ -4,6 +4,7 @@ import { BlockType } from '@/executor/constants' import type { DAG } from '@/executor/dag/builder' import type { EdgeManager } from '@/executor/execution/edge-manager' import { serializePauseSnapshot } from '@/executor/execution/snapshot-serializer' +import type { SerializableExecutionState } from '@/executor/execution/types' import type { NodeExecutionOrchestrator } from '@/executor/orchestrators/node' import type { ExecutionContext, @@ -135,6 +136,7 @@ export class ExecutionEngine { success: false, output: this.finalOutput, logs: this.context.blockLogs, + executionState: this.getSerializableExecutionState(), metadata: this.context.metadata, status: 'cancelled', } @@ -144,6 +146,7 @@ export class ExecutionEngine { success: true, output: this.finalOutput, logs: this.context.blockLogs, + executionState: this.getSerializableExecutionState(), metadata: this.context.metadata, } } catch (error) { @@ -157,6 +160,7 @@ export class ExecutionEngine { success: false, output: this.finalOutput, logs: this.context.blockLogs, + executionState: this.getSerializableExecutionState(), metadata: this.context.metadata, status: 'cancelled', } @@ -459,6 +463,7 @@ export class ExecutionEngine { success: true, output: this.collectPauseResponses(), logs: this.context.blockLogs, + executionState: this.getSerializableExecutionState(snapshotSeed), metadata: this.context.metadata, status: 'paused', pausePoints, @@ -466,6 +471,24 @@ export class ExecutionEngine { } } + private getSerializableExecutionState(snapshotSeed?: { + snapshot: string + }): SerializableExecutionState | undefined { + try { + const serializedSnapshot = + snapshotSeed?.snapshot ?? serializePauseSnapshot(this.context, [], this.dag).snapshot + const parsedSnapshot = JSON.parse(serializedSnapshot) as { + state?: SerializableExecutionState + } + return parsedSnapshot.state + } catch (error) { + logger.warn('Failed to serialize execution state', { + error: error instanceof Error ? error.message : String(error), + }) + return undefined + } + } + private collectPauseResponses(): NormalizedBlockOutput { const responses = Array.from(this.pausedBlocks.values()).map((pause) => pause.response) diff --git a/apps/sim/executor/types.ts b/apps/sim/executor/types.ts index 10c1996b3..b8bcb70f1 100644 --- a/apps/sim/executor/types.ts +++ b/apps/sim/executor/types.ts @@ -1,6 +1,7 @@ import type { TraceSpan } from '@/lib/logs/types' import type { PermissionGroupConfig } from '@/lib/permission-groups/types' import type { BlockOutput } from '@/blocks/types' +import type { SerializableExecutionState } from '@/executor/execution/types' import type { RunFromBlockContext } from '@/executor/utils/run-from-block' import type { SerializedBlock, SerializedWorkflow } from '@/serializer/types' @@ -302,6 +303,7 @@ export interface ExecutionResult { output: NormalizedBlockOutput error?: string logs?: BlockLog[] + executionState?: SerializableExecutionState metadata?: ExecutionMetadata status?: 'completed' | 'paused' | 'cancelled' pausePoints?: PausePoint[] diff --git a/apps/sim/hooks/queries/mcp.ts b/apps/sim/hooks/queries/mcp.ts index 5ef4170d3..607cb5e1e 100644 --- a/apps/sim/hooks/queries/mcp.ts +++ b/apps/sim/hooks/queries/mcp.ts @@ -1,3 +1,4 @@ +import { useEffect } from 'react' import { createLogger } from '@sim/logger' import { keepPreviousData, useMutation, useQuery, useQueryClient } from '@tanstack/react-query' import { sanitizeForHttp, sanitizeHeaders } from '@/lib/mcp/shared' @@ -359,3 +360,65 @@ export function useStoredMcpTools(workspaceId: string) { staleTime: 60 * 1000, }) } + +/** + * Shared EventSource connections keyed by workspaceId. + * Reference-counted so the connection is closed when the last consumer unmounts. + * Attached to `globalThis` so connections survive HMR in development. + */ +const SSE_KEY = '__mcp_sse_connections' as const + +type SseEntry = { source: EventSource; refs: number } + +const sseConnections: Map = + ((globalThis as Record)[SSE_KEY] as Map) ?? + ((globalThis as Record)[SSE_KEY] = new Map()) + +/** + * Subscribe to MCP tool-change SSE events for a workspace. + * On each `tools_changed` event, invalidates the relevant React Query caches + * so the UI refreshes automatically. + */ +export function useMcpToolsEvents(workspaceId: string) { + const queryClient = useQueryClient() + + useEffect(() => { + if (!workspaceId) return + + const invalidate = () => { + queryClient.invalidateQueries({ queryKey: mcpKeys.tools(workspaceId) }) + queryClient.invalidateQueries({ queryKey: mcpKeys.servers(workspaceId) }) + queryClient.invalidateQueries({ queryKey: mcpKeys.storedTools(workspaceId) }) + } + + let entry = sseConnections.get(workspaceId) + + if (!entry) { + const source = new EventSource(`/api/mcp/events?workspaceId=${workspaceId}`) + + source.addEventListener('tools_changed', () => { + invalidate() + }) + + source.onerror = () => { + logger.warn(`SSE connection error for workspace ${workspaceId}`) + } + + entry = { source, refs: 0 } + sseConnections.set(workspaceId, entry) + } + + entry.refs++ + + return () => { + const current = sseConnections.get(workspaceId) + if (!current) return + + current.refs-- + if (current.refs <= 0) { + current.source.close() + sseConnections.delete(workspaceId) + } + } + }, [workspaceId, queryClient]) +} diff --git a/apps/sim/hooks/use-undo-redo.ts b/apps/sim/hooks/use-undo-redo.ts index 252f0785a..880af7c06 100644 --- a/apps/sim/hooks/use-undo-redo.ts +++ b/apps/sim/hooks/use-undo-redo.ts @@ -1,5 +1,12 @@ import { useCallback } from 'react' import { createLogger } from '@sim/logger' + +declare global { + interface Window { + __skipDiffRecording?: boolean + } +} + import type { Edge } from 'reactflow' import { useSession } from '@/lib/auth/auth-client' import { enqueueReplaceWorkflowState } from '@/lib/workflows/operations/socket-operations' @@ -908,7 +915,7 @@ export function useUndoRedo() { // Set flag to skip recording during this operation - ;(window as any).__skipDiffRecording = true + window.__skipDiffRecording = true try { // Restore baseline state and broadcast to everyone if (baselineSnapshot && activeWorkflowId) { @@ -945,7 +952,7 @@ export function useUndoRedo() { logger.info('Clearing diff UI state') useWorkflowDiffStore.getState().clearDiff({ restoreBaseline: false }) } finally { - ;(window as any).__skipDiffRecording = false + window.__skipDiffRecording = false } logger.info('Undid apply-diff operation successfully') @@ -965,7 +972,7 @@ export function useUndoRedo() { // Set flag to skip recording during this operation - ;(window as any).__skipDiffRecording = true + window.__skipDiffRecording = true try { // Apply the before-accept state (with markers for this user) useWorkflowStore.getState().replaceWorkflowState(beforeAccept) @@ -1004,7 +1011,7 @@ export function useUndoRedo() { diffAnalysis: diffAnalysis, }) } finally { - ;(window as any).__skipDiffRecording = false + window.__skipDiffRecording = false } logger.info('Undid accept-diff operation - restored diff view') @@ -1018,7 +1025,7 @@ export function useUndoRedo() { const { useWorkflowStore } = await import('@/stores/workflows/workflow/store') const { useSubBlockStore } = await import('@/stores/workflows/subblock/store') - ;(window as any).__skipDiffRecording = true + window.__skipDiffRecording = true try { // Apply the before-reject state (with markers for this user) useWorkflowStore.getState().replaceWorkflowState(beforeReject) @@ -1055,7 +1062,7 @@ export function useUndoRedo() { diffAnalysis: diffAnalysis, }) } finally { - ;(window as any).__skipDiffRecording = false + window.__skipDiffRecording = false } logger.info('Undid reject-diff operation - restored diff view') @@ -1526,7 +1533,7 @@ export function useUndoRedo() { // Set flag to skip recording during this operation - ;(window as any).__skipDiffRecording = true + window.__skipDiffRecording = true try { // Manually apply the proposed state and set up diff store (similar to setProposedChanges but with original baseline) const diffStore = useWorkflowDiffStore.getState() @@ -1567,7 +1574,7 @@ export function useUndoRedo() { diffAnalysis: diffAnalysis, }) } finally { - ;(window as any).__skipDiffRecording = false + window.__skipDiffRecording = false } logger.info('Redid apply-diff operation') @@ -1583,7 +1590,7 @@ export function useUndoRedo() { // Set flag to skip recording during this operation - ;(window as any).__skipDiffRecording = true + window.__skipDiffRecording = true try { // Clear diff state FIRST to prevent flash of colors (local UI only) // Use setState directly to ensure synchronous clearing @@ -1621,7 +1628,7 @@ export function useUndoRedo() { operationId: opId, }) } finally { - ;(window as any).__skipDiffRecording = false + window.__skipDiffRecording = false } logger.info('Redid accept-diff operation - cleared diff view') @@ -1635,7 +1642,7 @@ export function useUndoRedo() { const { useWorkflowStore } = await import('@/stores/workflows/workflow/store') const { useSubBlockStore } = await import('@/stores/workflows/subblock/store') - ;(window as any).__skipDiffRecording = true + window.__skipDiffRecording = true try { // Clear diff state FIRST to prevent flash of colors (local UI only) // Use setState directly to ensure synchronous clearing @@ -1673,7 +1680,7 @@ export function useUndoRedo() { operationId: opId, }) } finally { - ;(window as any).__skipDiffRecording = false + window.__skipDiffRecording = false } logger.info('Redid reject-diff operation - cleared diff view') diff --git a/apps/sim/lib/billing/core/usage-log.ts b/apps/sim/lib/billing/core/usage-log.ts index a5c94393b..9c4e6851c 100644 --- a/apps/sim/lib/billing/core/usage-log.ts +++ b/apps/sim/lib/billing/core/usage-log.ts @@ -14,7 +14,7 @@ export type UsageLogCategory = 'model' | 'fixed' /** * Usage log source types */ -export type UsageLogSource = 'workflow' | 'wand' | 'copilot' +export type UsageLogSource = 'workflow' | 'wand' | 'copilot' | 'mcp_copilot' /** * Metadata for 'model' category charges diff --git a/apps/sim/lib/copilot/api.ts b/apps/sim/lib/copilot/api.ts index c680f9751..06ac46b32 100644 --- a/apps/sim/lib/copilot/api.ts +++ b/apps/sim/lib/copilot/api.ts @@ -1,4 +1,5 @@ import { createLogger } from '@sim/logger' +import { COPILOT_CHAT_API_PATH, COPILOT_CHAT_STREAM_API_PATH } from '@/lib/copilot/constants' import type { CopilotMode, CopilotModelId, CopilotTransportMode } from '@/lib/copilot/models' const logger = createLogger('CopilotAPI') @@ -82,6 +83,7 @@ export interface SendMessageRequest { executionId?: string }> commands?: string[] + resumeFromEventId?: number } /** @@ -120,7 +122,7 @@ export async function sendStreamingMessage( request: SendMessageRequest ): Promise { try { - const { abortSignal, ...requestBody } = request + const { abortSignal, resumeFromEventId, ...requestBody } = request try { const preview = Array.isArray((requestBody as any).contexts) ? (requestBody as any).contexts.map((c: any) => ({ @@ -136,9 +138,56 @@ export async function sendStreamingMessage( ? (requestBody as any).contexts.length : 0, contextsPreview: preview, + resumeFromEventId, }) - } catch {} - const response = await fetch('/api/copilot/chat', { + } catch (error) { + logger.warn('Failed to log streaming message context preview', { + error: error instanceof Error ? error.message : String(error), + }) + } + + const streamId = request.userMessageId + if (typeof resumeFromEventId === 'number') { + if (!streamId) { + return { + success: false, + error: 'streamId is required to resume a stream', + status: 400, + } + } + const url = `${COPILOT_CHAT_STREAM_API_PATH}?streamId=${encodeURIComponent( + streamId + )}&from=${encodeURIComponent(String(resumeFromEventId))}` + const response = await fetch(url, { + method: 'GET', + signal: abortSignal, + credentials: 'include', + }) + + if (!response.ok) { + const errorMessage = await handleApiError(response, 'Failed to resume streaming message') + return { + success: false, + error: errorMessage, + status: response.status, + } + } + + if (!response.body) { + return { + success: false, + error: 'No response body received', + status: 500, + } + } + + return { + success: true, + stream: response.body, + } + } + + const response = await fetch(COPILOT_CHAT_API_PATH, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ ...requestBody, stream: true }), diff --git a/apps/sim/lib/copilot/chat-context.ts b/apps/sim/lib/copilot/chat-context.ts new file mode 100644 index 000000000..b793f5f79 --- /dev/null +++ b/apps/sim/lib/copilot/chat-context.ts @@ -0,0 +1,66 @@ +import { createLogger } from '@sim/logger' +import { CopilotFiles } from '@/lib/uploads' +import { createFileContent } from '@/lib/uploads/utils/file-utils' + +const logger = createLogger('CopilotChatContext') + +/** + * Build conversation history from stored chat messages. + */ +export function buildConversationHistory( + messages: unknown[], + conversationId?: string +): { history: unknown[]; conversationId?: string } { + const history = Array.isArray(messages) ? messages : [] + return { + history, + ...(conversationId ? { conversationId } : {}), + } +} + +export interface FileAttachmentInput { + id: string + key: string + name?: string + filename?: string + mimeType?: string + media_type?: string + size: number +} + +export interface FileContent { + type: string + [key: string]: unknown +} + +/** + * Process file attachments into content for the payload. + */ +export async function processFileAttachments( + fileAttachments: FileAttachmentInput[], + userId: string +): Promise { + if (!Array.isArray(fileAttachments) || fileAttachments.length === 0) return [] + + const processedFileContents: FileContent[] = [] + const requestId = `copilot-${userId}-${Date.now()}` + const processedAttachments = await CopilotFiles.processCopilotAttachments( + fileAttachments as Parameters[0], + requestId + ) + + for (const { buffer, attachment } of processedAttachments) { + const fileContent = createFileContent(buffer, attachment.media_type) + if (fileContent) { + processedFileContents.push(fileContent as FileContent) + } + } + + logger.debug('Processed file attachments for payload', { + userId, + inputCount: fileAttachments.length, + outputCount: processedFileContents.length, + }) + + return processedFileContents +} diff --git a/apps/sim/lib/copilot/chat-lifecycle.ts b/apps/sim/lib/copilot/chat-lifecycle.ts new file mode 100644 index 000000000..5d25eee24 --- /dev/null +++ b/apps/sim/lib/copilot/chat-lifecycle.ts @@ -0,0 +1,69 @@ +import { db } from '@sim/db' +import { copilotChats } from '@sim/db/schema' +import { createLogger } from '@sim/logger' +import { and, eq } from 'drizzle-orm' + +const logger = createLogger('CopilotChatLifecycle') + +export interface ChatLoadResult { + chatId: string + chat: typeof copilotChats.$inferSelect | null + conversationHistory: unknown[] + isNew: boolean +} + +/** + * Resolve or create a copilot chat session. + * If chatId is provided, loads the existing chat. Otherwise creates a new one. + */ +export async function resolveOrCreateChat(params: { + chatId?: string + userId: string + workflowId: string + model: string +}): Promise { + const { chatId, userId, workflowId, model } = params + + if (chatId) { + const [chat] = await db + .select() + .from(copilotChats) + .where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, userId))) + .limit(1) + + return { + chatId, + chat: chat ?? null, + conversationHistory: chat && Array.isArray(chat.messages) ? chat.messages : [], + isNew: false, + } + } + + const [newChat] = await db + .insert(copilotChats) + .values({ + userId, + workflowId, + title: null, + model, + messages: [], + }) + .returning() + + if (!newChat) { + logger.warn('Failed to create new copilot chat row', { userId, workflowId }) + return { + chatId: '', + chat: null, + conversationHistory: [], + isNew: true, + } + } + + return { + chatId: newChat.id, + chat: newChat, + conversationHistory: [], + isNew: true, + } +} diff --git a/apps/sim/lib/copilot/chat-payload.ts b/apps/sim/lib/copilot/chat-payload.ts new file mode 100644 index 000000000..54763ee02 --- /dev/null +++ b/apps/sim/lib/copilot/chat-payload.ts @@ -0,0 +1,209 @@ +import { createLogger } from '@sim/logger' +import { processFileAttachments } from '@/lib/copilot/chat-context' +import { getCopilotModel } from '@/lib/copilot/config' +import { SIM_AGENT_VERSION } from '@/lib/copilot/constants' +import { getCredentialsServerTool } from '@/lib/copilot/tools/server/user/get-credentials' +import type { CopilotProviderConfig } from '@/lib/copilot/types' +import { env } from '@/lib/core/config/env' +import { tools } from '@/tools/registry' +import { getLatestVersionTools, stripVersionSuffix } from '@/tools/utils' + +const logger = createLogger('CopilotChatPayload') + +export interface BuildPayloadParams { + message: string + workflowId: string + userId: string + userMessageId: string + mode: string + model: string + conversationHistory?: unknown[] + contexts?: Array<{ type: string; content: string }> + fileAttachments?: Array<{ id: string; key: string; size: number; [key: string]: unknown }> + commands?: string[] + chatId?: string + implicitFeedback?: string +} + +interface ToolSchema { + name: string + description: string + input_schema: Record + defer_loading?: boolean + executeLocally?: boolean + oauth?: { required: boolean; provider: string } +} + +interface CredentialsPayload { + oauth: Record< + string, + { accessToken: string; accountId: string; name: string; expiresAt?: string } + > + apiKeys: string[] + metadata?: { + connectedOAuth: Array<{ provider: string; name: string; scopes?: string[] }> + configuredApiKeys: string[] + } +} + +function buildProviderConfig(selectedModel: string): CopilotProviderConfig | undefined { + const defaults = getCopilotModel('chat') + const envModel = env.COPILOT_MODEL || defaults.model + const providerEnv = env.COPILOT_PROVIDER + + if (!providerEnv) return undefined + + if (providerEnv === 'azure-openai') { + return { + provider: 'azure-openai', + model: envModel, + apiKey: env.AZURE_OPENAI_API_KEY, + apiVersion: 'preview', + endpoint: env.AZURE_OPENAI_ENDPOINT, + } + } + + if (providerEnv === 'azure-anthropic') { + return { + provider: 'azure-anthropic', + model: envModel, + apiKey: env.AZURE_ANTHROPIC_API_KEY, + apiVersion: env.AZURE_ANTHROPIC_API_VERSION, + endpoint: env.AZURE_ANTHROPIC_ENDPOINT, + } + } + + if (providerEnv === 'vertex') { + return { + provider: 'vertex', + model: envModel, + apiKey: env.COPILOT_API_KEY, + vertexProject: env.VERTEX_PROJECT, + vertexLocation: env.VERTEX_LOCATION, + } + } + + return { + provider: providerEnv as Exclude, + model: selectedModel, + apiKey: env.COPILOT_API_KEY, + } as CopilotProviderConfig +} + +/** + * Build the request payload for the copilot backend. + */ +export async function buildCopilotRequestPayload( + params: BuildPayloadParams, + options: { + providerConfig?: CopilotProviderConfig + selectedModel: string + } +): Promise> { + const { + message, + workflowId, + userId, + userMessageId, + mode, + contexts, + fileAttachments, + commands, + chatId, + } = params + + const selectedModel = options.selectedModel + const providerConfig = options.providerConfig ?? buildProviderConfig(selectedModel) + + const effectiveMode = mode === 'agent' ? 'build' : mode + const transportMode = effectiveMode === 'build' ? 'agent' : effectiveMode + + const processedFileContents = await processFileAttachments(fileAttachments ?? [], userId) + + const integrationTools: ToolSchema[] = [] + let credentials: CredentialsPayload | null = null + + if (effectiveMode === 'build') { + // function_execute sandbox tool is now defined in Go — no need to send it + + try { + const rawCredentials = await getCredentialsServerTool.execute({ workflowId }, { userId }) + + const oauthMap: CredentialsPayload['oauth'] = {} + const connectedOAuth: Array<{ provider: string; name: string; scopes?: string[] }> = [] + for (const cred of rawCredentials?.oauth?.connected?.credentials ?? []) { + if (cred.accessToken) { + oauthMap[cred.provider] = { + accessToken: cred.accessToken, + accountId: cred.id, + name: cred.name, + } + connectedOAuth.push({ provider: cred.provider, name: cred.name }) + } + } + + credentials = { + oauth: oauthMap, + apiKeys: rawCredentials?.environment?.variableNames ?? [], + metadata: { + connectedOAuth, + configuredApiKeys: rawCredentials?.environment?.variableNames ?? [], + }, + } + } catch (error) { + logger.warn('Failed to fetch credentials for build payload', { + error: error instanceof Error ? error.message : String(error), + }) + } + + try { + const { createUserToolSchema } = await import('@/tools/params') + const latestTools = getLatestVersionTools(tools) + + for (const [toolId, toolConfig] of Object.entries(latestTools)) { + try { + const userSchema = createUserToolSchema(toolConfig) + const strippedName = stripVersionSuffix(toolId) + integrationTools.push({ + name: strippedName, + description: toolConfig.description || toolConfig.name || strippedName, + input_schema: userSchema as unknown as Record, + defer_loading: true, + ...(toolConfig.oauth?.required && { + oauth: { + required: true, + provider: toolConfig.oauth.provider, + }, + }), + }) + } catch (toolError) { + logger.warn('Failed to build schema for tool, skipping', { + toolId, + error: toolError instanceof Error ? toolError.message : String(toolError), + }) + } + } + } catch (error) { + logger.warn('Failed to build tool schemas for payload', { + error: error instanceof Error ? error.message : String(error), + }) + } + } + + return { + message, + workflowId, + userId, + model: selectedModel, + mode: transportMode, + messageId: userMessageId, + version: SIM_AGENT_VERSION, + ...(providerConfig ? { provider: providerConfig } : {}), + ...(contexts && contexts.length > 0 ? { context: contexts } : {}), + ...(chatId ? { chatId } : {}), + ...(processedFileContents.length > 0 ? { fileAttachments: processedFileContents } : {}), + ...(integrationTools.length > 0 ? { integrationTools } : {}), + ...(credentials ? { credentials } : {}), + ...(commands && commands.length > 0 ? { commands } : {}), + } +} diff --git a/apps/sim/lib/copilot/client-sse/content-blocks.ts b/apps/sim/lib/copilot/client-sse/content-blocks.ts new file mode 100644 index 000000000..0e7788252 --- /dev/null +++ b/apps/sim/lib/copilot/client-sse/content-blocks.ts @@ -0,0 +1,147 @@ +import type { + ChatContext, + CopilotMessage, + MessageFileAttachment, +} from '@/stores/panel/copilot/types' +import type { ClientContentBlock, ClientStreamingContext } from './types' + +const TEXT_BLOCK_TYPE = 'text' +const THINKING_BLOCK_TYPE = 'thinking' +const CONTINUE_OPTIONS_TAG = '{"1":"Continue"}' + +export function createUserMessage( + content: string, + fileAttachments?: MessageFileAttachment[], + contexts?: ChatContext[], + messageId?: string +): CopilotMessage { + return { + id: messageId || crypto.randomUUID(), + role: 'user', + content, + timestamp: new Date().toISOString(), + ...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }), + ...(contexts && contexts.length > 0 && { contexts }), + ...(contexts && + contexts.length > 0 && { + contentBlocks: [{ type: 'contexts', contexts, timestamp: Date.now() }], + }), + } +} + +export function createStreamingMessage(): CopilotMessage { + return { + id: crypto.randomUUID(), + role: 'assistant', + content: '', + timestamp: new Date().toISOString(), + } +} + +export function createErrorMessage( + messageId: string, + content: string, + errorType?: 'usage_limit' | 'unauthorized' | 'forbidden' | 'rate_limit' | 'upgrade_required' +): CopilotMessage { + return { + id: messageId, + role: 'assistant', + content, + timestamp: new Date().toISOString(), + contentBlocks: [ + { + type: 'text', + content, + timestamp: Date.now(), + }, + ], + errorType, + } +} + +export function appendTextBlock(context: ClientStreamingContext, text: string) { + if (!text) return + context.accumulatedContent += text + if (context.currentTextBlock && context.contentBlocks.length > 0) { + const lastBlock = context.contentBlocks[context.contentBlocks.length - 1] + if (lastBlock.type === TEXT_BLOCK_TYPE && lastBlock === context.currentTextBlock) { + lastBlock.content += text + return + } + } + const newBlock: ClientContentBlock = { type: 'text', content: text, timestamp: Date.now() } + context.currentTextBlock = newBlock + context.contentBlocks.push(newBlock) +} + +export function appendContinueOption(content: string): string { + if (//i.test(content)) return content + const suffix = content.trim().length > 0 ? '\n\n' : '' + return `${content}${suffix}${CONTINUE_OPTIONS_TAG}` +} + +export function appendContinueOptionBlock(blocks: ClientContentBlock[]): ClientContentBlock[] { + if (!Array.isArray(blocks)) return blocks + const hasOptions = blocks.some( + (block) => + block?.type === TEXT_BLOCK_TYPE && + typeof block.content === 'string' && + //i.test(block.content) + ) + if (hasOptions) return blocks + return [ + ...blocks, + { + type: TEXT_BLOCK_TYPE, + content: CONTINUE_OPTIONS_TAG, + timestamp: Date.now(), + }, + ] +} + +export function stripContinueOption(content: string): string { + if (!content || !content.includes(CONTINUE_OPTIONS_TAG)) return content + const next = content.replace(CONTINUE_OPTIONS_TAG, '') + return next.replace(/\n{2,}\s*$/g, '\n').trimEnd() +} + +export function stripContinueOptionFromBlocks(blocks: ClientContentBlock[]): ClientContentBlock[] { + if (!Array.isArray(blocks)) return blocks + return blocks.flatMap((block) => { + if ( + block?.type === TEXT_BLOCK_TYPE && + typeof block.content === 'string' && + block.content.includes(CONTINUE_OPTIONS_TAG) + ) { + const nextContent = stripContinueOption(block.content) + if (!nextContent.trim()) return [] + return [{ ...block, content: nextContent }] + } + return [block] + }) +} + +export function beginThinkingBlock(context: ClientStreamingContext) { + if (!context.currentThinkingBlock) { + const newBlock: ClientContentBlock = { + type: 'thinking', + content: '', + timestamp: Date.now(), + startTime: Date.now(), + } + context.currentThinkingBlock = newBlock + context.contentBlocks.push(newBlock) + } + context.isInThinkingBlock = true + context.currentTextBlock = null +} + +export function finalizeThinkingBlock(context: ClientStreamingContext) { + if (context.currentThinkingBlock) { + context.currentThinkingBlock.duration = + Date.now() - (context.currentThinkingBlock.startTime || Date.now()) + } + context.isInThinkingBlock = false + context.currentThinkingBlock = null + context.currentTextBlock = null +} diff --git a/apps/sim/lib/copilot/client-sse/handlers.ts b/apps/sim/lib/copilot/client-sse/handlers.ts new file mode 100644 index 000000000..82f887a48 --- /dev/null +++ b/apps/sim/lib/copilot/client-sse/handlers.ts @@ -0,0 +1,935 @@ +import { createLogger } from '@sim/logger' +import { COPILOT_CONFIRM_API_PATH, STREAM_STORAGE_KEY } from '@/lib/copilot/constants' +import { asRecord } from '@/lib/copilot/orchestrator/sse-utils' +import type { SSEEvent } from '@/lib/copilot/orchestrator/types' +import { + isBackgroundState, + isRejectedState, + isReviewState, + resolveToolDisplay, +} from '@/lib/copilot/store-utils' +import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' +import type { CopilotStore, CopilotStreamInfo, CopilotToolCall } from '@/stores/panel/copilot/types' +import { useVariablesStore } from '@/stores/panel/variables/store' +import { useEnvironmentStore } from '@/stores/settings/environment/store' +import { useWorkflowDiffStore } from '@/stores/workflow-diff/store' +import { useWorkflowRegistry } from '@/stores/workflows/registry/store' +import type { WorkflowState } from '@/stores/workflows/workflow/types' +import { appendTextBlock, beginThinkingBlock, finalizeThinkingBlock } from './content-blocks' +import { CLIENT_EXECUTABLE_RUN_TOOLS, executeRunToolOnClient } from './run-tool-execution' +import type { ClientContentBlock, ClientStreamingContext } from './types' + +const logger = createLogger('CopilotClientSseHandlers') +const TEXT_BLOCK_TYPE = 'text' + +const MAX_BATCH_INTERVAL = 50 +const MIN_BATCH_INTERVAL = 16 +const MAX_QUEUE_SIZE = 5 + +/** + * Send an auto-accept confirmation to the server for auto-allowed tools. + * The server-side orchestrator polls Redis for this decision. + */ +export function sendAutoAcceptConfirmation(toolCallId: string): void { + fetch(COPILOT_CONFIRM_API_PATH, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ toolCallId, status: 'accepted' }), + }).catch((error) => { + logger.warn('Failed to send auto-accept confirmation', { + toolCallId, + error: error instanceof Error ? error.message : String(error), + }) + }) +} + +function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void { + if (typeof window === 'undefined') return + try { + if (!info) { + window.sessionStorage.removeItem(STREAM_STORAGE_KEY) + return + } + window.sessionStorage.setItem(STREAM_STORAGE_KEY, JSON.stringify(info)) + } catch (error) { + logger.warn('Failed to write active stream to storage', { + error: error instanceof Error ? error.message : String(error), + }) + } +} + +type StoreSet = ( + partial: Partial | ((state: CopilotStore) => Partial) +) => void + +export type SSEHandler = ( + data: SSEEvent, + context: ClientStreamingContext, + get: () => CopilotStore, + set: StoreSet +) => Promise | void + +const streamingUpdateQueue = new Map() +let streamingUpdateRAF: number | null = null +let lastBatchTime = 0 + +export function stopStreamingUpdates() { + if (streamingUpdateRAF !== null) { + cancelAnimationFrame(streamingUpdateRAF) + streamingUpdateRAF = null + } + streamingUpdateQueue.clear() +} + +function createOptimizedContentBlocks(contentBlocks: ClientContentBlock[]): ClientContentBlock[] { + const result: ClientContentBlock[] = new Array(contentBlocks.length) + for (let i = 0; i < contentBlocks.length; i++) { + const block = contentBlocks[i] + result[i] = { ...block } + } + return result +} + +export function flushStreamingUpdates(set: StoreSet) { + if (streamingUpdateRAF !== null) { + cancelAnimationFrame(streamingUpdateRAF) + streamingUpdateRAF = null + } + if (streamingUpdateQueue.size === 0) return + + const updates = new Map(streamingUpdateQueue) + streamingUpdateQueue.clear() + + set((state: CopilotStore) => { + if (updates.size === 0) return state + return { + messages: state.messages.map((msg) => { + const update = updates.get(msg.id) + if (update) { + return { + ...msg, + content: '', + contentBlocks: + update.contentBlocks.length > 0 + ? createOptimizedContentBlocks(update.contentBlocks) + : [], + } + } + return msg + }), + } + }) +} + +export function updateStreamingMessage(set: StoreSet, context: ClientStreamingContext) { + if (context.suppressStreamingUpdates) return + const now = performance.now() + streamingUpdateQueue.set(context.messageId, context) + const timeSinceLastBatch = now - lastBatchTime + const shouldFlushImmediately = + streamingUpdateQueue.size >= MAX_QUEUE_SIZE || timeSinceLastBatch > MAX_BATCH_INTERVAL + + if (streamingUpdateRAF === null) { + const scheduleUpdate = () => { + streamingUpdateRAF = requestAnimationFrame(() => { + const updates = new Map(streamingUpdateQueue) + streamingUpdateQueue.clear() + streamingUpdateRAF = null + lastBatchTime = performance.now() + set((state: CopilotStore) => { + if (updates.size === 0) return state + const messages = state.messages + const lastMessage = messages[messages.length - 1] + const lastMessageUpdate = lastMessage ? updates.get(lastMessage.id) : null + if (updates.size === 1 && lastMessageUpdate) { + const newMessages = [...messages] + newMessages[messages.length - 1] = { + ...lastMessage, + content: '', + contentBlocks: + lastMessageUpdate.contentBlocks.length > 0 + ? createOptimizedContentBlocks(lastMessageUpdate.contentBlocks) + : [], + } + return { messages: newMessages } + } + return { + messages: messages.map((msg) => { + const update = updates.get(msg.id) + if (update) { + return { + ...msg, + content: '', + contentBlocks: + update.contentBlocks.length > 0 + ? createOptimizedContentBlocks(update.contentBlocks) + : [], + } + } + return msg + }), + } + }) + }) + } + if (shouldFlushImmediately) scheduleUpdate() + else setTimeout(scheduleUpdate, Math.max(0, MIN_BATCH_INTERVAL - timeSinceLastBatch)) + } +} + +export function upsertToolCallBlock(context: ClientStreamingContext, toolCall: CopilotToolCall) { + let found = false + for (let i = 0; i < context.contentBlocks.length; i++) { + const b = context.contentBlocks[i] + if (b.type === 'tool_call' && b.toolCall?.id === toolCall.id) { + context.contentBlocks[i] = { ...b, toolCall } + found = true + break + } + } + if (!found) { + context.contentBlocks.push({ type: 'tool_call', toolCall, timestamp: Date.now() }) + } +} + +function stripThinkingTags(text: string): string { + return text.replace(/<\/?thinking[^>]*>/gi, '').replace(/<\/?thinking[^&]*>/gi, '') +} + +function appendThinkingContent(context: ClientStreamingContext, text: string) { + if (!text) return + const cleanedText = stripThinkingTags(text) + if (!cleanedText) return + if (context.currentThinkingBlock) { + context.currentThinkingBlock.content += cleanedText + } else { + const newBlock: ClientContentBlock = { + type: 'thinking', + content: cleanedText, + timestamp: Date.now(), + startTime: Date.now(), + } + context.currentThinkingBlock = newBlock + context.contentBlocks.push(newBlock) + } + context.isInThinkingBlock = true + context.currentTextBlock = null +} + +export const sseHandlers: Record = { + chat_id: async (data, context, get, set) => { + context.newChatId = data.chatId + const { currentChat, activeStream } = get() + if (!currentChat && context.newChatId) { + await get().handleNewChatCreation(context.newChatId) + } + if (activeStream && context.newChatId && !activeStream.chatId) { + const updatedStream = { ...activeStream, chatId: context.newChatId } + set({ activeStream: updatedStream }) + writeActiveStreamToStorage(updatedStream) + } + }, + title_updated: (_data, _context, get, set) => { + const title = _data.title + if (!title) return + const { currentChat, chats } = get() + if (currentChat) { + set({ + currentChat: { ...currentChat, title }, + chats: chats.map((c) => (c.id === currentChat.id ? { ...c, title } : c)), + }) + } + }, + tool_result: (data, context, get, set) => { + try { + const eventData = asRecord(data?.data) + const toolCallId: string | undefined = + data?.toolCallId || (eventData.id as string | undefined) + const success: boolean | undefined = data?.success + const failedDependency: boolean = data?.failedDependency === true + const resultObj = asRecord(data?.result) + const skipped: boolean = resultObj.skipped === true + if (!toolCallId) return + const { toolCallsById } = get() + const current = toolCallsById[toolCallId] + if (current) { + if ( + isRejectedState(current.state) || + isReviewState(current.state) || + isBackgroundState(current.state) + ) { + return + } + const targetState = success + ? ClientToolCallState.success + : failedDependency || skipped + ? ClientToolCallState.rejected + : ClientToolCallState.error + const updatedMap = { ...toolCallsById } + updatedMap[toolCallId] = { + ...current, + state: targetState, + display: resolveToolDisplay(current.name, targetState, current.id, current.params), + } + set({ toolCallsById: updatedMap }) + + if (targetState === ClientToolCallState.success && current.name === 'checkoff_todo') { + try { + const result = asRecord(data?.result) || asRecord(eventData.result) + const input = asRecord(current.params || current.input) + const todoId = (input.id || input.todoId || result.id || result.todoId) as + | string + | undefined + if (todoId) { + get().updatePlanTodoStatus(todoId, 'completed') + } + } catch (error) { + logger.warn('Failed to process checkoff_todo tool result', { + error: error instanceof Error ? error.message : String(error), + toolCallId, + }) + } + } + + if ( + targetState === ClientToolCallState.success && + current.name === 'mark_todo_in_progress' + ) { + try { + const result = asRecord(data?.result) || asRecord(eventData.result) + const input = asRecord(current.params || current.input) + const todoId = (input.id || input.todoId || result.id || result.todoId) as + | string + | undefined + if (todoId) { + get().updatePlanTodoStatus(todoId, 'executing') + } + } catch (error) { + logger.warn('Failed to process mark_todo_in_progress tool result', { + error: error instanceof Error ? error.message : String(error), + toolCallId, + }) + } + } + + if (current.name === 'edit_workflow') { + try { + const resultPayload = asRecord( + data?.result || eventData.result || eventData.data || data?.data + ) + const workflowState = asRecord(resultPayload?.workflowState) + const hasWorkflowState = !!resultPayload?.workflowState + logger.info('[SSE] edit_workflow result received', { + hasWorkflowState, + blockCount: hasWorkflowState ? Object.keys(workflowState.blocks ?? {}).length : 0, + edgeCount: Array.isArray(workflowState.edges) ? workflowState.edges.length : 0, + }) + if (hasWorkflowState) { + const diffStore = useWorkflowDiffStore.getState() + diffStore + .setProposedChanges(resultPayload.workflowState as WorkflowState) + .catch((err) => { + logger.error('[SSE] Failed to apply edit_workflow diff', { + error: err instanceof Error ? err.message : String(err), + }) + }) + } + } catch (err) { + logger.error('[SSE] edit_workflow result handling failed', { + error: err instanceof Error ? err.message : String(err), + }) + } + } + + // Deploy tools: update deployment status in workflow registry + if ( + targetState === ClientToolCallState.success && + (current.name === 'deploy_api' || + current.name === 'deploy_chat' || + current.name === 'deploy_mcp' || + current.name === 'redeploy') + ) { + try { + const resultPayload = asRecord( + data?.result || eventData.result || eventData.data || data?.data + ) + const input = asRecord(current.params) + const workflowId = + (resultPayload?.workflowId as string) || + (input?.workflowId as string) || + useWorkflowRegistry.getState().activeWorkflowId + const isDeployed = resultPayload?.isDeployed !== false + if (workflowId) { + useWorkflowRegistry + .getState() + .setDeploymentStatus(workflowId, isDeployed, isDeployed ? new Date() : undefined) + logger.info('[SSE] Updated deployment status from tool result', { + toolName: current.name, + workflowId, + isDeployed, + }) + } + } catch (err) { + logger.warn('[SSE] Failed to hydrate deployment status', { + error: err instanceof Error ? err.message : String(err), + }) + } + } + + // Environment variables: reload store after successful set + if ( + targetState === ClientToolCallState.success && + current.name === 'set_environment_variables' + ) { + try { + useEnvironmentStore.getState().loadEnvironmentVariables() + logger.info('[SSE] Triggered environment variables reload') + } catch (err) { + logger.warn('[SSE] Failed to reload environment variables', { + error: err instanceof Error ? err.message : String(err), + }) + } + } + + // Workflow variables: reload store after successful set + if ( + targetState === ClientToolCallState.success && + current.name === 'set_global_workflow_variables' + ) { + try { + const input = asRecord(current.params) + const workflowId = + (input?.workflowId as string) || useWorkflowRegistry.getState().activeWorkflowId + if (workflowId) { + useVariablesStore.getState().loadForWorkflow(workflowId) + logger.info('[SSE] Triggered workflow variables reload', { workflowId }) + } + } catch (err) { + logger.warn('[SSE] Failed to reload workflow variables', { + error: err instanceof Error ? err.message : String(err), + }) + } + } + + // Generate API key: update deployment status with the new key + if (targetState === ClientToolCallState.success && current.name === 'generate_api_key') { + try { + const resultPayload = asRecord( + data?.result || eventData.result || eventData.data || data?.data + ) + const input = asRecord(current.params) + const workflowId = + (input?.workflowId as string) || useWorkflowRegistry.getState().activeWorkflowId + const apiKey = (resultPayload?.apiKey || resultPayload?.key) as string | undefined + if (workflowId) { + const existingStatus = useWorkflowRegistry + .getState() + .getWorkflowDeploymentStatus(workflowId) + useWorkflowRegistry + .getState() + .setDeploymentStatus( + workflowId, + existingStatus?.isDeployed ?? false, + existingStatus?.deployedAt, + apiKey + ) + logger.info('[SSE] Updated deployment status with API key', { + workflowId, + hasKey: !!apiKey, + }) + } + } catch (err) { + logger.warn('[SSE] Failed to hydrate API key status', { + error: err instanceof Error ? err.message : String(err), + }) + } + } + } + + for (let i = 0; i < context.contentBlocks.length; i++) { + const b = context.contentBlocks[i] + if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) { + if ( + isRejectedState(b.toolCall?.state) || + isReviewState(b.toolCall?.state) || + isBackgroundState(b.toolCall?.state) + ) + break + const targetState = success + ? ClientToolCallState.success + : failedDependency || skipped + ? ClientToolCallState.rejected + : ClientToolCallState.error + context.contentBlocks[i] = { + ...b, + toolCall: { + ...b.toolCall, + state: targetState, + display: resolveToolDisplay( + b.toolCall?.name, + targetState, + toolCallId, + b.toolCall?.params + ), + }, + } + break + } + } + updateStreamingMessage(set, context) + } catch (error) { + logger.warn('Failed to process tool_result SSE event', { + error: error instanceof Error ? error.message : String(error), + }) + } + }, + tool_error: (data, context, get, set) => { + try { + const errorData = asRecord(data?.data) + const toolCallId: string | undefined = + data?.toolCallId || (errorData.id as string | undefined) + const failedDependency: boolean = data?.failedDependency === true + if (!toolCallId) return + const { toolCallsById } = get() + const current = toolCallsById[toolCallId] + if (current) { + if ( + isRejectedState(current.state) || + isReviewState(current.state) || + isBackgroundState(current.state) + ) { + return + } + const targetState = failedDependency + ? ClientToolCallState.rejected + : ClientToolCallState.error + const updatedMap = { ...toolCallsById } + updatedMap[toolCallId] = { + ...current, + state: targetState, + display: resolveToolDisplay(current.name, targetState, current.id, current.params), + } + set({ toolCallsById: updatedMap }) + } + for (let i = 0; i < context.contentBlocks.length; i++) { + const b = context.contentBlocks[i] + if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) { + if ( + isRejectedState(b.toolCall?.state) || + isReviewState(b.toolCall?.state) || + isBackgroundState(b.toolCall?.state) + ) + break + const targetState = failedDependency + ? ClientToolCallState.rejected + : ClientToolCallState.error + context.contentBlocks[i] = { + ...b, + toolCall: { + ...b.toolCall, + state: targetState, + display: resolveToolDisplay( + b.toolCall?.name, + targetState, + toolCallId, + b.toolCall?.params + ), + }, + } + break + } + } + updateStreamingMessage(set, context) + } catch (error) { + logger.warn('Failed to process tool_error SSE event', { + error: error instanceof Error ? error.message : String(error), + }) + } + }, + tool_generating: (data, context, get, set) => { + const { toolCallId, toolName } = data + if (!toolCallId || !toolName) return + const { toolCallsById } = get() + + if (!toolCallsById[toolCallId]) { + const isAutoAllowed = get().isToolAutoAllowed(toolName) + const initialState = isAutoAllowed + ? ClientToolCallState.executing + : ClientToolCallState.pending + const tc: CopilotToolCall = { + id: toolCallId, + name: toolName, + state: initialState, + display: resolveToolDisplay(toolName, initialState, toolCallId), + } + const updated = { ...toolCallsById, [toolCallId]: tc } + set({ toolCallsById: updated }) + logger.info('[toolCallsById] map updated', updated) + + upsertToolCallBlock(context, tc) + updateStreamingMessage(set, context) + } + }, + tool_call: (data, context, get, set) => { + const toolData = asRecord(data?.data) + const id: string | undefined = (toolData.id as string | undefined) || data?.toolCallId + const name: string | undefined = (toolData.name as string | undefined) || data?.toolName + if (!id) return + const args = toolData.arguments as Record | undefined + const isPartial = toolData.partial === true + const { toolCallsById } = get() + + const existing = toolCallsById[id] + const toolName = name || existing?.name || 'unknown_tool' + const isAutoAllowed = get().isToolAutoAllowed(toolName) + let initialState = isAutoAllowed ? ClientToolCallState.executing : ClientToolCallState.pending + + // Avoid flickering back to pending on partial/duplicate events once a tool is executing. + if ( + existing?.state === ClientToolCallState.executing && + initialState === ClientToolCallState.pending + ) { + initialState = ClientToolCallState.executing + } + + const next: CopilotToolCall = existing + ? { + ...existing, + name: toolName, + state: initialState, + ...(args ? { params: args } : {}), + display: resolveToolDisplay(toolName, initialState, id, args || existing.params), + } + : { + id, + name: toolName, + state: initialState, + ...(args ? { params: args } : {}), + display: resolveToolDisplay(toolName, initialState, id, args), + } + const updated = { ...toolCallsById, [id]: next } + set({ toolCallsById: updated }) + logger.info(`[toolCallsById] → ${initialState}`, { id, name: toolName, params: args }) + + upsertToolCallBlock(context, next) + updateStreamingMessage(set, context) + + if (isPartial) { + return + } + + // Auto-allowed tools: send confirmation to the server so it can proceed + // without waiting for the user to click "Allow". + if (isAutoAllowed) { + sendAutoAcceptConfirmation(id) + } + + // Client-executable run tools: execute on the client for real-time feedback + // (block pulsing, console logs, stop button). The server defers execution + // for these tools in interactive mode; the client reports back via mark-complete. + if ( + CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName) && + initialState === ClientToolCallState.executing + ) { + executeRunToolOnClient(id, toolName, args || existing?.params || {}) + } + + // OAuth: dispatch event to open the OAuth connect modal + if (toolName === 'oauth_request_access' && args && typeof window !== 'undefined') { + try { + window.dispatchEvent( + new CustomEvent('open-oauth-connect', { + detail: { + providerName: (args.providerName || args.provider_name || '') as string, + serviceId: (args.serviceId || args.service_id || '') as string, + providerId: (args.providerId || args.provider_id || '') as string, + requiredScopes: (args.requiredScopes || args.required_scopes || []) as string[], + newScopes: (args.newScopes || args.new_scopes || []) as string[], + }, + }) + ) + logger.info('[SSE] Dispatched OAuth connect event', { + providerId: args.providerId || args.provider_id, + providerName: args.providerName || args.provider_name, + }) + } catch (err) { + logger.warn('[SSE] Failed to dispatch OAuth connect event', { + error: err instanceof Error ? err.message : String(err), + }) + } + } + + return + }, + reasoning: (data, context, _get, set) => { + const phase = (data && (data.phase || data?.data?.phase)) as string | undefined + if (phase === 'start') { + beginThinkingBlock(context) + updateStreamingMessage(set, context) + return + } + if (phase === 'end') { + finalizeThinkingBlock(context) + updateStreamingMessage(set, context) + return + } + const chunk: string = typeof data?.data === 'string' ? data.data : data?.content || '' + if (!chunk) return + appendThinkingContent(context, chunk) + updateStreamingMessage(set, context) + }, + content: (data, context, get, set) => { + if (!data.data) return + context.pendingContent += data.data + + let contentToProcess = context.pendingContent + let hasProcessedContent = false + + const thinkingStartRegex = // + const thinkingEndRegex = /<\/thinking>/ + const designWorkflowStartRegex = // + const designWorkflowEndRegex = /<\/design_workflow>/ + + const splitTrailingPartialTag = ( + text: string, + tags: string[] + ): { text: string; remaining: string } => { + const partialIndex = text.lastIndexOf('<') + if (partialIndex < 0) { + return { text, remaining: '' } + } + const possibleTag = text.substring(partialIndex) + const matchesTagStart = tags.some((tag) => tag.startsWith(possibleTag)) + if (!matchesTagStart) { + return { text, remaining: '' } + } + return { + text: text.substring(0, partialIndex), + remaining: possibleTag, + } + } + + while (contentToProcess.length > 0) { + if (context.isInDesignWorkflowBlock) { + const endMatch = designWorkflowEndRegex.exec(contentToProcess) + if (endMatch) { + const designContent = contentToProcess.substring(0, endMatch.index) + context.designWorkflowContent += designContent + context.isInDesignWorkflowBlock = false + + logger.info('[design_workflow] Tag complete, setting plan content', { + contentLength: context.designWorkflowContent.length, + }) + set({ streamingPlanContent: context.designWorkflowContent }) + + contentToProcess = contentToProcess.substring(endMatch.index + endMatch[0].length) + hasProcessedContent = true + } else { + const { text, remaining } = splitTrailingPartialTag(contentToProcess, [ + '', + ]) + context.designWorkflowContent += text + + set({ streamingPlanContent: context.designWorkflowContent }) + + contentToProcess = remaining + hasProcessedContent = true + if (remaining) { + break + } + } + continue + } + + if (!context.isInThinkingBlock && !context.isInDesignWorkflowBlock) { + const designStartMatch = designWorkflowStartRegex.exec(contentToProcess) + if (designStartMatch) { + const textBeforeDesign = contentToProcess.substring(0, designStartMatch.index) + if (textBeforeDesign) { + appendTextBlock(context, textBeforeDesign) + hasProcessedContent = true + } + context.isInDesignWorkflowBlock = true + context.designWorkflowContent = '' + contentToProcess = contentToProcess.substring( + designStartMatch.index + designStartMatch[0].length + ) + hasProcessedContent = true + continue + } + + const nextMarkIndex = contentToProcess.indexOf('') + const nextCheckIndex = contentToProcess.indexOf('') + const hasMark = nextMarkIndex >= 0 + const hasCheck = nextCheckIndex >= 0 + + const nextTagIndex = + hasMark && hasCheck + ? Math.min(nextMarkIndex, nextCheckIndex) + : hasMark + ? nextMarkIndex + : hasCheck + ? nextCheckIndex + : -1 + + if (nextTagIndex >= 0) { + const isMarkTodo = hasMark && nextMarkIndex === nextTagIndex + const tagStart = isMarkTodo ? '' : '' + const tagEnd = isMarkTodo ? '' : '' + const closingIndex = contentToProcess.indexOf(tagEnd, nextTagIndex + tagStart.length) + + if (closingIndex === -1) { + break + } + + const todoId = contentToProcess + .substring(nextTagIndex + tagStart.length, closingIndex) + .trim() + logger.info( + isMarkTodo ? '[TODO] Detected marktodo tag' : '[TODO] Detected checkofftodo tag', + { todoId } + ) + + if (todoId) { + try { + get().updatePlanTodoStatus(todoId, isMarkTodo ? 'executing' : 'completed') + logger.info( + isMarkTodo + ? '[TODO] Successfully marked todo in progress' + : '[TODO] Successfully checked off todo', + { todoId } + ) + } catch (e) { + logger.error( + isMarkTodo + ? '[TODO] Failed to mark todo in progress' + : '[TODO] Failed to checkoff todo', + { todoId, error: e } + ) + } + } else { + logger.warn('[TODO] Empty todoId extracted from todo tag', { tagType: tagStart }) + } + + let beforeTag = contentToProcess.substring(0, nextTagIndex) + let afterTag = contentToProcess.substring(closingIndex + tagEnd.length) + + const hadNewlineBefore = /(\r?\n)+$/.test(beforeTag) + const hadNewlineAfter = /^(\r?\n)+/.test(afterTag) + + beforeTag = beforeTag.replace(/(\r?\n)+$/, '') + afterTag = afterTag.replace(/^(\r?\n)+/, '') + + contentToProcess = + beforeTag + (hadNewlineBefore && hadNewlineAfter ? '\n' : '') + afterTag + context.currentTextBlock = null + hasProcessedContent = true + continue + } + } + + if (context.isInThinkingBlock) { + const endMatch = thinkingEndRegex.exec(contentToProcess) + if (endMatch) { + const thinkingContent = contentToProcess.substring(0, endMatch.index) + appendThinkingContent(context, thinkingContent) + finalizeThinkingBlock(context) + contentToProcess = contentToProcess.substring(endMatch.index + endMatch[0].length) + hasProcessedContent = true + } else { + const { text, remaining } = splitTrailingPartialTag(contentToProcess, ['']) + if (text) { + appendThinkingContent(context, text) + hasProcessedContent = true + } + contentToProcess = remaining + if (remaining) { + break + } + } + } else { + const startMatch = thinkingStartRegex.exec(contentToProcess) + if (startMatch) { + const textBeforeThinking = contentToProcess.substring(0, startMatch.index) + if (textBeforeThinking) { + appendTextBlock(context, textBeforeThinking) + hasProcessedContent = true + } + context.isInThinkingBlock = true + context.currentTextBlock = null + contentToProcess = contentToProcess.substring(startMatch.index + startMatch[0].length) + hasProcessedContent = true + } else { + let partialTagIndex = contentToProcess.lastIndexOf('<') + + const partialMarkTodo = contentToProcess.lastIndexOf(' partialTagIndex) { + partialTagIndex = partialMarkTodo + } + if (partialCheckoffTodo > partialTagIndex) { + partialTagIndex = partialCheckoffTodo + } + + let textToAdd = contentToProcess + let remaining = '' + if (partialTagIndex >= 0 && partialTagIndex > contentToProcess.length - 50) { + textToAdd = contentToProcess.substring(0, partialTagIndex) + remaining = contentToProcess.substring(partialTagIndex) + } + if (textToAdd) { + appendTextBlock(context, textToAdd) + hasProcessedContent = true + } + contentToProcess = remaining + break + } + } + } + + context.pendingContent = contentToProcess + if (hasProcessedContent) { + updateStreamingMessage(set, context) + } + }, + done: (_data, context) => { + logger.info('[SSE] DONE EVENT RECEIVED', { + doneEventCount: context.doneEventCount, + data: _data, + }) + context.doneEventCount++ + if (context.doneEventCount >= 1) { + logger.info('[SSE] Setting streamComplete = true, stream will terminate') + context.streamComplete = true + } + }, + error: (data, context, _get, set) => { + logger.error('Stream error:', data.error) + set((state: CopilotStore) => ({ + messages: state.messages.map((msg) => + msg.id === context.messageId + ? { + ...msg, + content: context.accumulatedContent || 'An error occurred.', + error: data.error, + } + : msg + ), + })) + context.streamComplete = true + }, + stream_end: (_data, context, _get, set) => { + if (context.pendingContent) { + if (context.isInThinkingBlock && context.currentThinkingBlock) { + appendThinkingContent(context, context.pendingContent) + } else if (context.pendingContent.trim()) { + appendTextBlock(context, context.pendingContent) + } + context.pendingContent = '' + } + finalizeThinkingBlock(context) + updateStreamingMessage(set, context) + }, + default: () => {}, +} diff --git a/apps/sim/lib/copilot/client-sse/index.ts b/apps/sim/lib/copilot/client-sse/index.ts new file mode 100644 index 000000000..8c45d3ae1 --- /dev/null +++ b/apps/sim/lib/copilot/client-sse/index.ts @@ -0,0 +1,3 @@ +export type { SSEHandler } from './handlers' +export { sseHandlers } from './handlers' +export { applySseEvent, subAgentSSEHandlers } from './subagent-handlers' diff --git a/apps/sim/lib/copilot/client-sse/run-tool-execution.ts b/apps/sim/lib/copilot/client-sse/run-tool-execution.ts new file mode 100644 index 000000000..1835967aa --- /dev/null +++ b/apps/sim/lib/copilot/client-sse/run-tool-execution.ts @@ -0,0 +1,221 @@ +import { createLogger } from '@sim/logger' +import { v4 as uuidv4 } from 'uuid' +import { COPILOT_CONFIRM_API_PATH } from '@/lib/copilot/constants' +import { resolveToolDisplay } from '@/lib/copilot/store-utils' +import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' +import { executeWorkflowWithFullLogging } from '@/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils' +import { useExecutionStore } from '@/stores/execution/store' +import { useCopilotStore } from '@/stores/panel/copilot/store' +import { useWorkflowRegistry } from '@/stores/workflows/registry/store' + +const logger = createLogger('CopilotRunToolExecution') + +/** + * Run tools that execute client-side for real-time feedback + * (block pulsing, logs, stop button, etc.). + */ +export const CLIENT_EXECUTABLE_RUN_TOOLS = new Set([ + 'run_workflow', + 'run_workflow_until_block', + 'run_from_block', + 'run_block', +]) + +/** + * Execute a run tool on the client side using the streaming execute endpoint. + * This gives full interactive feedback: block pulsing, console logs, stop button. + * + * Mirrors staging's RunWorkflowClientTool.handleAccept(): + * 1. Execute via executeWorkflowWithFullLogging + * 2. Update client tool state directly (success/error) + * 3. Report completion to server via /api/copilot/confirm (Redis), + * where the server-side handler picks it up and tells Go + */ +export function executeRunToolOnClient( + toolCallId: string, + toolName: string, + params: Record +): void { + doExecuteRunTool(toolCallId, toolName, params).catch((err) => { + logger.error('[RunTool] Unhandled error in client-side run tool execution', { + toolCallId, + toolName, + error: err instanceof Error ? err.message : String(err), + }) + }) +} + +async function doExecuteRunTool( + toolCallId: string, + toolName: string, + params: Record +): Promise { + const { isExecuting, setIsExecuting } = useExecutionStore.getState() + + if (isExecuting) { + logger.warn('[RunTool] Execution prevented: already executing', { toolCallId, toolName }) + setToolState(toolCallId, ClientToolCallState.error) + await reportCompletion(toolCallId, false, 'Workflow is already executing. Try again later') + return + } + + const { activeWorkflowId } = useWorkflowRegistry.getState() + if (!activeWorkflowId) { + logger.warn('[RunTool] Execution prevented: no active workflow', { toolCallId, toolName }) + setToolState(toolCallId, ClientToolCallState.error) + await reportCompletion(toolCallId, false, 'No active workflow found') + return + } + + // Extract params for all tool types + const workflowInput = (params.workflow_input || params.input || undefined) as + | Record + | undefined + + const stopAfterBlockId = (() => { + if (toolName === 'run_workflow_until_block') + return params.stopAfterBlockId as string | undefined + if (toolName === 'run_block') return params.blockId as string | undefined + return undefined + })() + + const runFromBlock = (() => { + if (toolName === 'run_from_block' && params.startBlockId) { + return { + startBlockId: params.startBlockId as string, + executionId: (params.executionId as string | undefined) || 'latest', + } + } + if (toolName === 'run_block' && params.blockId) { + return { + startBlockId: params.blockId as string, + executionId: (params.executionId as string | undefined) || 'latest', + } + } + return undefined + })() + + setIsExecuting(true) + const executionId = uuidv4() + const executionStartTime = new Date().toISOString() + + logger.info('[RunTool] Starting client-side workflow execution', { + toolCallId, + toolName, + executionId, + activeWorkflowId, + hasInput: !!workflowInput, + stopAfterBlockId, + runFromBlock: runFromBlock ? { startBlockId: runFromBlock.startBlockId } : undefined, + }) + + try { + const result = await executeWorkflowWithFullLogging({ + workflowInput, + executionId, + stopAfterBlockId, + runFromBlock, + }) + + // Determine success (same logic as staging's RunWorkflowClientTool) + let succeeded = true + let errorMessage: string | undefined + try { + if (result && typeof result === 'object' && 'success' in (result as any)) { + succeeded = Boolean((result as any).success) + if (!succeeded) { + errorMessage = (result as any)?.error || (result as any)?.output?.error + } + } else if ( + result && + typeof result === 'object' && + 'execution' in (result as any) && + (result as any).execution + ) { + succeeded = Boolean((result as any).execution.success) + if (!succeeded) { + errorMessage = + (result as any).execution?.error || (result as any).execution?.output?.error + } + } + } catch {} + + if (succeeded) { + logger.info('[RunTool] Workflow execution succeeded', { toolCallId, toolName }) + setToolState(toolCallId, ClientToolCallState.success) + await reportCompletion( + toolCallId, + true, + `Workflow execution completed. Started at: ${executionStartTime}` + ) + } else { + const msg = errorMessage || 'Workflow execution failed' + logger.error('[RunTool] Workflow execution failed', { toolCallId, toolName, error: msg }) + setToolState(toolCallId, ClientToolCallState.error) + await reportCompletion(toolCallId, false, msg) + } + } catch (err) { + const msg = err instanceof Error ? err.message : String(err) + logger.error('[RunTool] Workflow execution threw', { toolCallId, toolName, error: msg }) + setToolState(toolCallId, ClientToolCallState.error) + await reportCompletion(toolCallId, false, msg) + } finally { + setIsExecuting(false) + } +} + +/** Update the tool call state directly in the copilot store (like staging's setState). */ +function setToolState(toolCallId: string, state: ClientToolCallState): void { + try { + const store = useCopilotStore.getState() + const current = store.toolCallsById[toolCallId] + if (!current) return + const updated = { + ...store.toolCallsById, + [toolCallId]: { + ...current, + state, + display: resolveToolDisplay(current.name, state, toolCallId, current.params), + }, + } + useCopilotStore.setState({ toolCallsById: updated }) + } catch (err) { + logger.warn('[RunTool] Failed to update tool state', { + toolCallId, + state, + error: err instanceof Error ? err.message : String(err), + }) + } +} + +/** + * Report tool completion to the server via the existing /api/copilot/confirm endpoint. + * This writes {status: 'success'|'error', message} to Redis. The server-side handler + * is polling Redis via waitForToolCompletion() and will pick this up, then fire-and-forget + * markToolComplete to the Go backend. + */ +async function reportCompletion( + toolCallId: string, + success: boolean, + message?: string +): Promise { + try { + const res = await fetch(COPILOT_CONFIRM_API_PATH, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + toolCallId, + status: success ? 'success' : 'error', + message: message || (success ? 'Tool completed' : 'Tool failed'), + }), + }) + if (!res.ok) { + logger.warn('[RunTool] reportCompletion failed', { toolCallId, status: res.status }) + } + } catch (err) { + logger.error('[RunTool] reportCompletion error', { + toolCallId, + error: err instanceof Error ? err.message : String(err), + }) + } +} diff --git a/apps/sim/lib/copilot/client-sse/subagent-handlers.ts b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts new file mode 100644 index 000000000..314a40573 --- /dev/null +++ b/apps/sim/lib/copilot/client-sse/subagent-handlers.ts @@ -0,0 +1,416 @@ +import { createLogger } from '@sim/logger' +import { + asRecord, + normalizeSseEvent, + shouldSkipToolCallEvent, + shouldSkipToolResultEvent, +} from '@/lib/copilot/orchestrator/sse-utils' +import type { SSEEvent } from '@/lib/copilot/orchestrator/types' +import { resolveToolDisplay } from '@/lib/copilot/store-utils' +import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' +import type { CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types' +import { + type SSEHandler, + sendAutoAcceptConfirmation, + sseHandlers, + updateStreamingMessage, +} from './handlers' +import { CLIENT_EXECUTABLE_RUN_TOOLS, executeRunToolOnClient } from './run-tool-execution' +import type { ClientStreamingContext } from './types' + +const logger = createLogger('CopilotClientSubagentHandlers') + +type StoreSet = ( + partial: Partial | ((state: CopilotStore) => Partial) +) => void + +export function appendSubAgentContent( + context: ClientStreamingContext, + parentToolCallId: string, + text: string +) { + if (!context.subAgentContent[parentToolCallId]) { + context.subAgentContent[parentToolCallId] = '' + } + if (!context.subAgentBlocks[parentToolCallId]) { + context.subAgentBlocks[parentToolCallId] = [] + } + context.subAgentContent[parentToolCallId] += text + const blocks = context.subAgentBlocks[parentToolCallId] + const lastBlock = blocks[blocks.length - 1] + if (lastBlock && lastBlock.type === 'subagent_text') { + lastBlock.content = (lastBlock.content || '') + text + } else { + blocks.push({ + type: 'subagent_text', + content: text, + timestamp: Date.now(), + }) + } +} + +export function updateToolCallWithSubAgentData( + context: ClientStreamingContext, + get: () => CopilotStore, + set: StoreSet, + parentToolCallId: string +) { + const { toolCallsById } = get() + const parentToolCall = toolCallsById[parentToolCallId] + if (!parentToolCall) { + logger.warn('[SubAgent] updateToolCallWithSubAgentData: parent tool call not found', { + parentToolCallId, + availableToolCallIds: Object.keys(toolCallsById), + }) + return + } + + const blocks = context.subAgentBlocks[parentToolCallId] ?? [] + + const updatedToolCall: CopilotToolCall = { + ...parentToolCall, + subAgentContent: context.subAgentContent[parentToolCallId] || '', + subAgentToolCalls: context.subAgentToolCalls[parentToolCallId] ?? [], + subAgentBlocks: blocks, + subAgentStreaming: true, + } + + logger.info('[SubAgent] Updating tool call with subagent data', { + parentToolCallId, + parentToolName: parentToolCall.name, + subAgentContentLength: updatedToolCall.subAgentContent?.length, + subAgentBlocksCount: updatedToolCall.subAgentBlocks?.length, + subAgentToolCallsCount: updatedToolCall.subAgentToolCalls?.length, + }) + + const updatedMap = { ...toolCallsById, [parentToolCallId]: updatedToolCall } + set({ toolCallsById: updatedMap }) + + let foundInContentBlocks = false + for (let i = 0; i < context.contentBlocks.length; i++) { + const b = context.contentBlocks[i] + if (b.type === 'tool_call' && b.toolCall?.id === parentToolCallId) { + context.contentBlocks[i] = { ...b, toolCall: updatedToolCall } + foundInContentBlocks = true + break + } + } + + if (!foundInContentBlocks) { + logger.warn('[SubAgent] Parent tool call not found in contentBlocks', { + parentToolCallId, + contentBlocksCount: context.contentBlocks.length, + toolCallBlockIds: context.contentBlocks + .filter((b) => b.type === 'tool_call') + .map((b) => b.toolCall?.id), + }) + } + + updateStreamingMessage(set, context) +} + +export const subAgentSSEHandlers: Record = { + start: () => { + // Subagent start event - no action needed, parent is already tracked from subagent_start + }, + + content: (data, context, get, set) => { + const parentToolCallId = context.subAgentParentToolCallId + const contentStr = typeof data.data === 'string' ? data.data : data.content || '' + logger.info('[SubAgent] content event', { + parentToolCallId, + hasData: !!contentStr, + dataPreview: contentStr ? contentStr.substring(0, 50) : null, + }) + if (!parentToolCallId || !contentStr) { + logger.warn('[SubAgent] content missing parentToolCallId or data', { + parentToolCallId, + hasData: !!contentStr, + }) + return + } + + appendSubAgentContent(context, parentToolCallId, contentStr) + + updateToolCallWithSubAgentData(context, get, set, parentToolCallId) + }, + + reasoning: (data, context, get, set) => { + const parentToolCallId = context.subAgentParentToolCallId + const dataObj = asRecord(data?.data) + const phase = data?.phase || (dataObj.phase as string | undefined) + if (!parentToolCallId) return + + if (phase === 'start' || phase === 'end') return + + const chunk = typeof data?.data === 'string' ? data.data : data?.content || '' + if (!chunk) return + + appendSubAgentContent(context, parentToolCallId, chunk) + + updateToolCallWithSubAgentData(context, get, set, parentToolCallId) + }, + + tool_generating: () => { + // Tool generating event - no action needed, we'll handle the actual tool_call + }, + + tool_call: async (data, context, get, set) => { + const parentToolCallId = context.subAgentParentToolCallId + if (!parentToolCallId) return + + const toolData = asRecord(data?.data) + const id: string | undefined = (toolData.id as string | undefined) || data?.toolCallId + const name: string | undefined = (toolData.name as string | undefined) || data?.toolName + if (!id || !name) return + const isPartial = toolData.partial === true + + let args: Record | undefined = (toolData.arguments || toolData.input) as + | Record + | undefined + + if (typeof args === 'string') { + try { + args = JSON.parse(args) as Record + } catch { + logger.warn('[SubAgent] Failed to parse arguments string', { args }) + } + } + + logger.info('[SubAgent] tool_call received', { + id, + name, + hasArgs: !!args, + argsKeys: args ? Object.keys(args) : [], + toolDataKeys: Object.keys(toolData), + dataKeys: Object.keys(data ?? {}), + }) + + if (!context.subAgentToolCalls[parentToolCallId]) { + context.subAgentToolCalls[parentToolCallId] = [] + } + if (!context.subAgentBlocks[parentToolCallId]) { + context.subAgentBlocks[parentToolCallId] = [] + } + + const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex( + (tc: CopilotToolCall) => tc.id === id + ) + const existingToolCall = + existingIndex >= 0 ? context.subAgentToolCalls[parentToolCallId][existingIndex] : undefined + + // Auto-allowed tools skip pending state to avoid flashing interrupt buttons + const isAutoAllowed = get().isToolAutoAllowed(name) + let initialState = isAutoAllowed ? ClientToolCallState.executing : ClientToolCallState.pending + + // Avoid flickering back to pending on partial/duplicate events once a tool is executing. + if ( + existingToolCall?.state === ClientToolCallState.executing && + initialState === ClientToolCallState.pending + ) { + initialState = ClientToolCallState.executing + } + + const subAgentToolCall: CopilotToolCall = { + id, + name, + state: initialState, + ...(args ? { params: args } : {}), + display: resolveToolDisplay(name, initialState, id, args), + } + + if (existingIndex >= 0) { + context.subAgentToolCalls[parentToolCallId][existingIndex] = subAgentToolCall + } else { + context.subAgentToolCalls[parentToolCallId].push(subAgentToolCall) + + context.subAgentBlocks[parentToolCallId].push({ + type: 'subagent_tool_call', + toolCall: subAgentToolCall, + timestamp: Date.now(), + }) + } + + const { toolCallsById } = get() + const updated = { ...toolCallsById, [id]: subAgentToolCall } + set({ toolCallsById: updated }) + + updateToolCallWithSubAgentData(context, get, set, parentToolCallId) + + if (isPartial) { + return + } + + // Auto-allowed tools: send confirmation to the server so it can proceed + // without waiting for the user to click "Allow". + if (isAutoAllowed) { + sendAutoAcceptConfirmation(id) + } + + // Client-executable run tools: if auto-allowed, execute immediately for + // real-time feedback. For non-auto-allowed, the user must click "Allow" + // first — handleRun in tool-call.tsx triggers executeRunToolOnClient. + if (CLIENT_EXECUTABLE_RUN_TOOLS.has(name) && isAutoAllowed) { + executeRunToolOnClient(id, name, args || {}) + } + }, + + tool_result: (data, context, get, set) => { + const parentToolCallId = context.subAgentParentToolCallId + if (!parentToolCallId) return + + const resultData = asRecord(data?.data) + const toolCallId: string | undefined = data?.toolCallId || (resultData.id as string | undefined) + // Determine success: explicit `success` field takes priority; otherwise + // infer from presence of result data vs error (same logic as server-side + // inferToolSuccess). The Go backend uses `*bool` with omitempty so + // `success` is present when explicitly set, and absent for non-tool events. + const hasExplicitSuccess = data?.success !== undefined || resultData.success !== undefined + const explicitSuccess = data?.success ?? resultData.success + const hasResultData = data?.result !== undefined || resultData.result !== undefined + const hasError = !!data?.error || !!resultData.error + const success: boolean = hasExplicitSuccess ? !!explicitSuccess : hasResultData && !hasError + if (!toolCallId) return + + if (!context.subAgentToolCalls[parentToolCallId]) return + if (!context.subAgentBlocks[parentToolCallId]) return + + const targetState = success ? ClientToolCallState.success : ClientToolCallState.error + const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex( + (tc: CopilotToolCall) => tc.id === toolCallId + ) + + if (existingIndex >= 0) { + const existing = context.subAgentToolCalls[parentToolCallId][existingIndex] + const updatedSubAgentToolCall = { + ...existing, + state: targetState, + display: resolveToolDisplay(existing.name, targetState, toolCallId, existing.params), + } + context.subAgentToolCalls[parentToolCallId][existingIndex] = updatedSubAgentToolCall + + for (const block of context.subAgentBlocks[parentToolCallId]) { + if (block.type === 'subagent_tool_call' && block.toolCall?.id === toolCallId) { + block.toolCall = updatedSubAgentToolCall + break + } + } + + const { toolCallsById } = get() + if (toolCallsById[toolCallId]) { + const updatedMap = { + ...toolCallsById, + [toolCallId]: updatedSubAgentToolCall, + } + set({ toolCallsById: updatedMap }) + logger.info('[SubAgent] Updated subagent tool call state in toolCallsById', { + toolCallId, + name: existing.name, + state: targetState, + }) + } + } + + updateToolCallWithSubAgentData(context, get, set, parentToolCallId) + }, + + done: (_data, context, get, set) => { + const parentToolCallId = context.subAgentParentToolCallId + if (!parentToolCallId) return + + updateToolCallWithSubAgentData(context, get, set, parentToolCallId) + }, +} + +export async function applySseEvent( + rawData: SSEEvent, + context: ClientStreamingContext, + get: () => CopilotStore, + set: (next: Partial | ((state: CopilotStore) => Partial)) => void +): Promise { + const normalizedEvent = normalizeSseEvent(rawData) + if (shouldSkipToolCallEvent(normalizedEvent) || shouldSkipToolResultEvent(normalizedEvent)) { + return true + } + const data = normalizedEvent + + if (data.type === 'subagent_start') { + const startData = asRecord(data.data) + const toolCallId = startData.tool_call_id as string | undefined + if (toolCallId) { + context.subAgentParentToolCallId = toolCallId + const { toolCallsById } = get() + const parentToolCall = toolCallsById[toolCallId] + if (parentToolCall) { + const updatedToolCall: CopilotToolCall = { + ...parentToolCall, + subAgentStreaming: true, + } + const updatedMap = { ...toolCallsById, [toolCallId]: updatedToolCall } + set({ toolCallsById: updatedMap }) + } + logger.info('[SSE] Subagent session started', { + subagent: data.subagent, + parentToolCallId: toolCallId, + }) + } + return true + } + + if (data.type === 'subagent_end') { + const parentToolCallId = context.subAgentParentToolCallId + if (parentToolCallId) { + const { toolCallsById } = get() + const parentToolCall = toolCallsById[parentToolCallId] + if (parentToolCall) { + const updatedToolCall: CopilotToolCall = { + ...parentToolCall, + subAgentContent: context.subAgentContent[parentToolCallId] || '', + subAgentToolCalls: context.subAgentToolCalls[parentToolCallId] ?? [], + subAgentBlocks: context.subAgentBlocks[parentToolCallId] ?? [], + subAgentStreaming: false, + } + const updatedMap = { ...toolCallsById, [parentToolCallId]: updatedToolCall } + set({ toolCallsById: updatedMap }) + logger.info('[SSE] Subagent session ended', { + subagent: data.subagent, + parentToolCallId, + contentLength: context.subAgentContent[parentToolCallId]?.length || 0, + toolCallCount: context.subAgentToolCalls[parentToolCallId]?.length || 0, + }) + } + } + context.subAgentParentToolCallId = undefined + return true + } + + if (data.subagent) { + const parentToolCallId = context.subAgentParentToolCallId + if (!parentToolCallId) { + logger.warn('[SSE] Subagent event without parent tool call ID', { + type: data.type, + subagent: data.subagent, + }) + return true + } + + logger.info('[SSE] Processing subagent event', { + type: data.type, + subagent: data.subagent, + parentToolCallId, + hasHandler: !!subAgentSSEHandlers[data.type], + }) + + const subAgentHandler = subAgentSSEHandlers[data.type] + if (subAgentHandler) { + await subAgentHandler(data, context, get, set) + } else { + logger.warn('[SSE] No handler for subagent event type', { type: data.type }) + } + return !context.streamComplete + } + + const handler = sseHandlers[data.type] || sseHandlers.default + await handler(data, context, get, set) + return !context.streamComplete +} diff --git a/apps/sim/lib/copilot/client-sse/types.ts b/apps/sim/lib/copilot/client-sse/types.ts new file mode 100644 index 000000000..5f46f7492 --- /dev/null +++ b/apps/sim/lib/copilot/client-sse/types.ts @@ -0,0 +1,45 @@ +import type { + ChatContext, + CopilotToolCall, + SubAgentContentBlock, +} from '@/stores/panel/copilot/types' + +/** + * A content block used in copilot messages and during streaming. + * Uses a literal type union for `type` to stay compatible with CopilotMessage. + */ +export type ContentBlockType = 'text' | 'thinking' | 'tool_call' | 'contexts' + +export interface ClientContentBlock { + type: ContentBlockType + content?: string + timestamp: number + toolCall?: CopilotToolCall | null + startTime?: number + duration?: number + contexts?: ChatContext[] +} + +export interface StreamingContext { + messageId: string + accumulatedContent: string + contentBlocks: ClientContentBlock[] + currentTextBlock: ClientContentBlock | null + isInThinkingBlock: boolean + currentThinkingBlock: ClientContentBlock | null + isInDesignWorkflowBlock: boolean + designWorkflowContent: string + pendingContent: string + newChatId?: string + doneEventCount: number + streamComplete?: boolean + wasAborted?: boolean + suppressContinueOption?: boolean + subAgentParentToolCallId?: string + subAgentContent: Record + subAgentToolCalls: Record + subAgentBlocks: Record + suppressStreamingUpdates?: boolean +} + +export type ClientStreamingContext = StreamingContext diff --git a/apps/sim/lib/copilot/config.ts b/apps/sim/lib/copilot/config.ts index 5700e9930..d82a63012 100644 --- a/apps/sim/lib/copilot/config.ts +++ b/apps/sim/lib/copilot/config.ts @@ -109,14 +109,14 @@ function parseBooleanEnv(value: string | undefined): boolean | null { export const DEFAULT_COPILOT_CONFIG: CopilotConfig = { chat: { defaultProvider: 'anthropic', - defaultModel: 'claude-3-7-sonnet-latest', + defaultModel: 'claude-4.6-opus', temperature: 0.1, maxTokens: 8192, systemPrompt: AGENT_MODE_SYSTEM_PROMPT, }, rag: { defaultProvider: 'anthropic', - defaultModel: 'claude-3-7-sonnet-latest', + defaultModel: 'claude-4.6-opus', temperature: 0.1, maxTokens: 2000, embeddingModel: 'text-embedding-3-small', diff --git a/apps/sim/lib/copilot/constants.ts b/apps/sim/lib/copilot/constants.ts index e4b1f3a5d..f95ec48b3 100644 --- a/apps/sim/lib/copilot/constants.ts +++ b/apps/sim/lib/copilot/constants.ts @@ -1,2 +1,115 @@ +import { env } from '@/lib/core/config/env' + export const SIM_AGENT_API_URL_DEFAULT = 'https://copilot.sim.ai' -export const SIM_AGENT_VERSION = '1.0.3' +export const SIM_AGENT_VERSION = '3.0.0' + +/** Resolved copilot backend URL — reads from env with fallback to default. */ +const rawAgentUrl = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT +export const SIM_AGENT_API_URL = + rawAgentUrl.startsWith('http://') || rawAgentUrl.startsWith('https://') + ? rawAgentUrl + : SIM_AGENT_API_URL_DEFAULT + +// --------------------------------------------------------------------------- +// Redis key prefixes +// --------------------------------------------------------------------------- + +/** Redis key prefix for tool call confirmation payloads (polled by waitForToolDecision). */ +export const REDIS_TOOL_CALL_PREFIX = 'tool_call:' + +/** Redis key prefix for copilot SSE stream buffers. */ +export const REDIS_COPILOT_STREAM_PREFIX = 'copilot_stream:' + +// --------------------------------------------------------------------------- +// Timeouts +// --------------------------------------------------------------------------- + +/** Default timeout for the copilot orchestration stream loop (5 min). */ +export const ORCHESTRATION_TIMEOUT_MS = 300_000 + +/** Timeout for the client-side streaming response handler (10 min). */ +export const STREAM_TIMEOUT_MS = 600_000 + +/** TTL for Redis tool call confirmation entries (24 h). */ +export const REDIS_TOOL_CALL_TTL_SECONDS = 86_400 + +// --------------------------------------------------------------------------- +// Tool decision polling +// --------------------------------------------------------------------------- + +/** Initial poll interval when waiting for a user tool decision. */ +export const TOOL_DECISION_INITIAL_POLL_MS = 100 + +/** Maximum poll interval when waiting for a user tool decision. */ +export const TOOL_DECISION_MAX_POLL_MS = 3_000 + +/** Backoff multiplier for the tool decision poll interval. */ +export const TOOL_DECISION_POLL_BACKOFF = 1.5 + +// --------------------------------------------------------------------------- +// Stream resume +// --------------------------------------------------------------------------- + +/** Maximum number of resume attempts before giving up. */ +export const MAX_RESUME_ATTEMPTS = 3 + +/** SessionStorage key for persisting active stream metadata across page reloads. */ +export const STREAM_STORAGE_KEY = 'copilot_active_stream' + +// --------------------------------------------------------------------------- +// Client-side streaming batching +// --------------------------------------------------------------------------- + +/** Delay (ms) before processing the next queued message after stream completion. */ +export const QUEUE_PROCESS_DELAY_MS = 100 + +/** Delay (ms) before invalidating subscription queries after stream completion. */ +export const SUBSCRIPTION_INVALIDATE_DELAY_MS = 1_000 + +// --------------------------------------------------------------------------- +// UI helpers +// --------------------------------------------------------------------------- + +/** Maximum character length for an optimistic chat title derived from a user message. */ +export const OPTIMISTIC_TITLE_MAX_LENGTH = 50 + +// --------------------------------------------------------------------------- +// Copilot API paths (client-side fetch targets) +// --------------------------------------------------------------------------- + +/** POST — send a chat message to the copilot. */ +export const COPILOT_CHAT_API_PATH = '/api/copilot/chat' + +/** GET — resume/replay a copilot SSE stream. */ +export const COPILOT_CHAT_STREAM_API_PATH = '/api/copilot/chat/stream' + +/** POST — persist chat messages / plan artifact / config. */ +export const COPILOT_UPDATE_MESSAGES_API_PATH = '/api/copilot/chat/update-messages' + +/** DELETE — delete a copilot chat. */ +export const COPILOT_DELETE_CHAT_API_PATH = '/api/copilot/chat/delete' + +/** POST — confirm or reject a tool call. */ +export const COPILOT_CONFIRM_API_PATH = '/api/copilot/confirm' + +/** POST — forward diff-accepted/rejected stats to the copilot backend. */ +export const COPILOT_STATS_API_PATH = '/api/copilot/stats' + +/** GET — load checkpoints for a chat. */ +export const COPILOT_CHECKPOINTS_API_PATH = '/api/copilot/checkpoints' + +/** POST — revert to a checkpoint. */ +export const COPILOT_CHECKPOINTS_REVERT_API_PATH = '/api/copilot/checkpoints/revert' + +/** GET/POST/DELETE — manage auto-allowed tools. */ +export const COPILOT_AUTO_ALLOWED_TOOLS_API_PATH = '/api/copilot/auto-allowed-tools' + +/** GET — fetch user credentials for masking. */ +export const COPILOT_CREDENTIALS_API_PATH = '/api/copilot/credentials' + +// --------------------------------------------------------------------------- +// Dedup limits +// --------------------------------------------------------------------------- + +/** Maximum entries in the in-memory SSE tool-event dedup cache. */ +export const STREAM_BUFFER_MAX_DEDUP_ENTRIES = 1_000 diff --git a/apps/sim/lib/copilot/messages/checkpoints.ts b/apps/sim/lib/copilot/messages/checkpoints.ts new file mode 100644 index 000000000..3764adedc --- /dev/null +++ b/apps/sim/lib/copilot/messages/checkpoints.ts @@ -0,0 +1,129 @@ +import { createLogger } from '@sim/logger' +import { COPILOT_CHECKPOINTS_API_PATH } from '@/lib/copilot/constants' +import type { CopilotMessage, CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types' +import { mergeSubblockState } from '@/stores/workflows/utils' +import { useWorkflowStore } from '@/stores/workflows/workflow/store' +import type { WorkflowState } from '@/stores/workflows/workflow/types' + +const logger = createLogger('CopilotMessageCheckpoints') + +export function buildCheckpointWorkflowState(workflowId: string): WorkflowState | null { + const rawState = useWorkflowStore.getState().getWorkflowState() + if (!rawState) return null + + const blocksWithSubblockValues = mergeSubblockState(rawState.blocks, workflowId) + + const filteredBlocks = Object.entries(blocksWithSubblockValues).reduce( + (acc, [blockId, block]) => { + if (block?.type && block?.name) { + acc[blockId] = { + ...block, + id: block.id || blockId, + enabled: block.enabled !== undefined ? block.enabled : true, + horizontalHandles: block.horizontalHandles !== undefined ? block.horizontalHandles : true, + height: block.height !== undefined ? block.height : 90, + subBlocks: block.subBlocks ?? {}, + outputs: block.outputs ?? {}, + data: block.data ?? {}, + position: block.position || { x: 0, y: 0 }, + } + } + return acc + }, + {} as WorkflowState['blocks'] + ) + + return { + blocks: filteredBlocks, + edges: rawState.edges ?? [], + loops: rawState.loops ?? {}, + parallels: rawState.parallels ?? {}, + lastSaved: rawState.lastSaved || Date.now(), + deploymentStatuses: rawState.deploymentStatuses ?? {}, + } +} + +export async function saveMessageCheckpoint( + messageId: string, + get: () => CopilotStore, + set: (partial: Partial | ((state: CopilotStore) => Partial)) => void +): Promise { + const { workflowId, currentChat, messageSnapshots, messageCheckpoints } = get() + if (!workflowId || !currentChat?.id) return false + + const snapshot = messageSnapshots[messageId] + if (!snapshot) return false + + const nextSnapshots = { ...messageSnapshots } + delete nextSnapshots[messageId] + set({ messageSnapshots: nextSnapshots }) + + try { + const response = await fetch(COPILOT_CHECKPOINTS_API_PATH, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + workflowId, + chatId: currentChat.id, + messageId, + workflowState: JSON.stringify(snapshot), + }), + }) + + if (!response.ok) { + throw new Error(`Failed to create checkpoint: ${response.statusText}`) + } + + const result = await response.json() + const newCheckpoint = result.checkpoint + if (newCheckpoint) { + const existingCheckpoints = messageCheckpoints[messageId] ?? [] + const updatedCheckpoints = { + ...messageCheckpoints, + [messageId]: [newCheckpoint, ...existingCheckpoints], + } + set({ messageCheckpoints: updatedCheckpoints }) + } + + return true + } catch (error) { + logger.error('Failed to create checkpoint from snapshot:', error) + return false + } +} + +export function extractToolCallsRecursively( + toolCall: CopilotToolCall, + map: Record +): void { + if (!toolCall?.id) return + map[toolCall.id] = toolCall + + if (Array.isArray(toolCall.subAgentBlocks)) { + for (const block of toolCall.subAgentBlocks) { + if (block?.type === 'subagent_tool_call' && block.toolCall?.id) { + extractToolCallsRecursively(block.toolCall, map) + } + } + } + + if (Array.isArray(toolCall.subAgentToolCalls)) { + for (const subTc of toolCall.subAgentToolCalls) { + extractToolCallsRecursively(subTc, map) + } + } +} + +export function buildToolCallsById(messages: CopilotMessage[]): Record { + const toolCallsById: Record = {} + for (const msg of messages) { + if (msg.contentBlocks) { + for (const block of msg.contentBlocks) { + if (block?.type === 'tool_call' && block.toolCall?.id) { + extractToolCallsRecursively(block.toolCall, toolCallsById) + } + } + } + } + return toolCallsById +} diff --git a/apps/sim/lib/copilot/messages/credential-masking.ts b/apps/sim/lib/copilot/messages/credential-masking.ts new file mode 100644 index 000000000..c1eee9f0b --- /dev/null +++ b/apps/sim/lib/copilot/messages/credential-masking.ts @@ -0,0 +1,28 @@ +export function maskCredentialIdsInValue(value: T, credentialIds: Set): T { + if (!value || credentialIds.size === 0) return value + + if (typeof value === 'string') { + let masked = value as string + const sortedIds = Array.from(credentialIds).sort((a, b) => b.length - a.length) + for (const id of sortedIds) { + if (id && masked.includes(id)) { + masked = masked.split(id).join('••••••••') + } + } + return masked as unknown as T + } + + if (Array.isArray(value)) { + return value.map((item) => maskCredentialIdsInValue(item, credentialIds)) as T + } + + if (typeof value === 'object') { + const masked: Record = {} + for (const key of Object.keys(value as Record)) { + masked[key] = maskCredentialIdsInValue((value as Record)[key], credentialIds) + } + return masked as T + } + + return value +} diff --git a/apps/sim/lib/copilot/messages/index.ts b/apps/sim/lib/copilot/messages/index.ts new file mode 100644 index 000000000..901c60943 --- /dev/null +++ b/apps/sim/lib/copilot/messages/index.ts @@ -0,0 +1,4 @@ +export * from './checkpoints' +export * from './credential-masking' +export * from './persist' +export * from './serialization' diff --git a/apps/sim/lib/copilot/messages/persist.ts b/apps/sim/lib/copilot/messages/persist.ts new file mode 100644 index 000000000..9ca3a24fe --- /dev/null +++ b/apps/sim/lib/copilot/messages/persist.ts @@ -0,0 +1,43 @@ +import { createLogger } from '@sim/logger' +import { COPILOT_UPDATE_MESSAGES_API_PATH } from '@/lib/copilot/constants' +import type { CopilotMessage } from '@/stores/panel/copilot/types' +import { serializeMessagesForDB } from './serialization' + +const logger = createLogger('CopilotMessagePersistence') + +export async function persistMessages(params: { + chatId: string + messages: CopilotMessage[] + sensitiveCredentialIds?: Set + planArtifact?: string | null + mode?: string + model?: string + conversationId?: string +}): Promise { + try { + const dbMessages = serializeMessagesForDB( + params.messages, + params.sensitiveCredentialIds ?? new Set() + ) + const response = await fetch(COPILOT_UPDATE_MESSAGES_API_PATH, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + chatId: params.chatId, + messages: dbMessages, + ...(params.planArtifact !== undefined ? { planArtifact: params.planArtifact } : {}), + ...(params.mode || params.model + ? { config: { mode: params.mode, model: params.model } } + : {}), + ...(params.conversationId ? { conversationId: params.conversationId } : {}), + }), + }) + return response.ok + } catch (error) { + logger.warn('Failed to persist messages', { + chatId: params.chatId, + error: error instanceof Error ? error.message : String(error), + }) + return false + } +} diff --git a/apps/sim/lib/copilot/messages/serialization.ts b/apps/sim/lib/copilot/messages/serialization.ts new file mode 100644 index 000000000..29686f6bc --- /dev/null +++ b/apps/sim/lib/copilot/messages/serialization.ts @@ -0,0 +1,200 @@ +import { createLogger } from '@sim/logger' +import { resolveToolDisplay } from '@/lib/copilot/store-utils' +import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' +import type { CopilotMessage, CopilotToolCall } from '@/stores/panel/copilot/types' +import { maskCredentialIdsInValue } from './credential-masking' + +const logger = createLogger('CopilotMessageSerialization') + +const TERMINAL_STATES = new Set([ + ClientToolCallState.success, + ClientToolCallState.error, + ClientToolCallState.rejected, + ClientToolCallState.aborted, + ClientToolCallState.review, + ClientToolCallState.background, +]) + +/** + * Clears streaming flags and normalizes non-terminal tool call states to 'aborted'. + * This ensures that tool calls loaded from DB after a refresh/abort don't render + * as in-progress with shimmer animations or interrupt buttons. + */ +export function clearStreamingFlags(toolCall: CopilotToolCall): void { + if (!toolCall) return + + toolCall.subAgentStreaming = false + + // Normalize non-terminal states when loading from DB. + // 'executing' → 'success': the server was running it, assume it completed. + // 'pending'/'generating' → 'aborted': never reached execution. + if (toolCall.state && !TERMINAL_STATES.has(toolCall.state)) { + const normalized = + toolCall.state === ClientToolCallState.executing + ? ClientToolCallState.success + : ClientToolCallState.aborted + toolCall.state = normalized + toolCall.display = resolveToolDisplay(toolCall.name, normalized, toolCall.id, toolCall.params) + } + + if (Array.isArray(toolCall.subAgentBlocks)) { + for (const block of toolCall.subAgentBlocks) { + if (block?.type === 'subagent_tool_call' && block.toolCall) { + clearStreamingFlags(block.toolCall) + } + } + } + if (Array.isArray(toolCall.subAgentToolCalls)) { + for (const subTc of toolCall.subAgentToolCalls) { + clearStreamingFlags(subTc) + } + } +} + +export function normalizeMessagesForUI(messages: CopilotMessage[]): CopilotMessage[] { + try { + for (const message of messages) { + if (message.role === 'assistant') { + logger.debug('[normalizeMessagesForUI] Loading assistant message', { + id: message.id, + hasContent: !!message.content?.trim(), + contentBlockCount: message.contentBlocks?.length || 0, + contentBlockTypes: message.contentBlocks?.map((b) => b?.type) ?? [], + }) + } + } + + for (const message of messages) { + if (message.contentBlocks) { + for (const block of message.contentBlocks) { + if (block?.type === 'tool_call' && block.toolCall) { + clearStreamingFlags(block.toolCall) + } + } + } + if (message.toolCalls) { + for (const toolCall of message.toolCalls) { + clearStreamingFlags(toolCall) + } + } + } + return messages + } catch (error) { + logger.warn('[normalizeMessagesForUI] Failed to normalize messages', { + error: error instanceof Error ? error.message : String(error), + }) + return messages + } +} + +export function deepClone(obj: T): T { + try { + const json = JSON.stringify(obj) + if (!json || json === 'undefined') { + logger.warn('[deepClone] JSON.stringify returned empty for object', { + type: typeof obj, + isArray: Array.isArray(obj), + length: Array.isArray(obj) ? obj.length : undefined, + }) + return obj + } + const parsed = JSON.parse(json) + if (Array.isArray(obj) && (!Array.isArray(parsed) || parsed.length !== obj.length)) { + logger.warn('[deepClone] Array clone mismatch', { + originalLength: obj.length, + clonedLength: Array.isArray(parsed) ? parsed.length : 'not array', + }) + } + return parsed + } catch (err) { + logger.error('[deepClone] Failed to clone object', { + error: String(err), + type: typeof obj, + isArray: Array.isArray(obj), + }) + return obj + } +} + +export function serializeMessagesForDB( + messages: CopilotMessage[], + credentialIds: Set +): CopilotMessage[] { + const result = messages + .map((msg) => { + let timestamp: string = msg.timestamp + if (typeof timestamp !== 'string') { + const ts = timestamp as unknown + timestamp = ts instanceof Date ? ts.toISOString() : new Date().toISOString() + } + + const serialized: CopilotMessage = { + id: msg.id, + role: msg.role, + content: msg.content || '', + timestamp, + } + + if (Array.isArray(msg.contentBlocks) && msg.contentBlocks.length > 0) { + serialized.contentBlocks = deepClone(msg.contentBlocks) + } + + if (Array.isArray(msg.toolCalls) && msg.toolCalls.length > 0) { + serialized.toolCalls = deepClone(msg.toolCalls) + } + + if (Array.isArray(msg.fileAttachments) && msg.fileAttachments.length > 0) { + serialized.fileAttachments = deepClone(msg.fileAttachments) + } + + if (Array.isArray(msg.contexts) && msg.contexts.length > 0) { + serialized.contexts = deepClone(msg.contexts) + } + + if (Array.isArray(msg.citations) && msg.citations.length > 0) { + serialized.citations = deepClone(msg.citations) + } + + if (msg.errorType) { + serialized.errorType = msg.errorType + } + + return maskCredentialIdsInValue(serialized, credentialIds) + }) + .filter((msg) => { + if (msg.role === 'assistant') { + const hasContent = typeof msg.content === 'string' && msg.content.trim().length > 0 + const hasTools = Array.isArray(msg.toolCalls) && msg.toolCalls.length > 0 + const hasBlocks = Array.isArray(msg.contentBlocks) && msg.contentBlocks.length > 0 + return hasContent || hasTools || hasBlocks + } + return true + }) + + for (const msg of messages) { + if (msg.role === 'assistant') { + logger.debug('[serializeMessagesForDB] Input assistant message', { + id: msg.id, + hasContent: !!msg.content?.trim(), + contentBlockCount: msg.contentBlocks?.length || 0, + contentBlockTypes: msg.contentBlocks?.map((b) => b?.type) ?? [], + }) + } + } + + logger.debug('[serializeMessagesForDB] Serialized messages', { + inputCount: messages.length, + outputCount: result.length, + sample: + result.length > 0 + ? { + role: result[result.length - 1].role, + hasContent: !!result[result.length - 1].content, + contentBlockCount: result[result.length - 1].contentBlocks?.length || 0, + toolCallCount: result[result.length - 1].toolCalls?.length || 0, + } + : null, + }) + + return result +} diff --git a/apps/sim/lib/copilot/models.ts b/apps/sim/lib/copilot/models.ts index 83a90169b..90d43f1b0 100644 --- a/apps/sim/lib/copilot/models.ts +++ b/apps/sim/lib/copilot/models.ts @@ -18,6 +18,7 @@ export const COPILOT_MODEL_IDS = [ 'claude-4-sonnet', 'claude-4.5-haiku', 'claude-4.5-sonnet', + 'claude-4.6-opus', 'claude-4.5-opus', 'claude-4.1-opus', 'gemini-3-pro', diff --git a/apps/sim/lib/copilot/orchestrator/config.ts b/apps/sim/lib/copilot/orchestrator/config.ts new file mode 100644 index 000000000..9e3dc1a28 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/config.ts @@ -0,0 +1,67 @@ +export const INTERRUPT_TOOL_NAMES = [ + 'set_global_workflow_variables', + 'run_workflow', + 'run_workflow_until_block', + 'run_from_block', + 'run_block', + 'manage_mcp_tool', + 'manage_custom_tool', + 'deploy_mcp', + 'deploy_chat', + 'deploy_api', + 'create_workspace_mcp_server', + 'set_environment_variables', + 'make_api_request', + 'oauth_request_access', + 'navigate_ui', + 'knowledge_base', + 'generate_api_key', +] as const + +export const INTERRUPT_TOOL_SET = new Set(INTERRUPT_TOOL_NAMES) + +export const SUBAGENT_TOOL_NAMES = [ + 'debug', + 'edit', + 'build', + 'plan', + 'test', + 'deploy', + 'auth', + 'research', + 'knowledge', + 'custom_tool', + 'tour', + 'info', + 'workflow', + 'evaluate', + 'superagent', + 'discovery', +] as const + +export const SUBAGENT_TOOL_SET = new Set(SUBAGENT_TOOL_NAMES) + +/** + * Respond tools are internal to the copilot's subagent system. + * They're used by subagents to signal completion and should NOT be executed by the sim side. + * The copilot backend handles these internally. + */ +export const RESPOND_TOOL_NAMES = [ + 'plan_respond', + 'edit_respond', + 'build_respond', + 'debug_respond', + 'info_respond', + 'research_respond', + 'deploy_respond', + 'superagent_respond', + 'discovery_respond', + 'tour_respond', + 'auth_respond', + 'workflow_respond', + 'knowledge_respond', + 'custom_tool_respond', + 'test_respond', +] as const + +export const RESPOND_TOOL_SET = new Set(RESPOND_TOOL_NAMES) diff --git a/apps/sim/lib/copilot/orchestrator/index.ts b/apps/sim/lib/copilot/orchestrator/index.ts new file mode 100644 index 000000000..9540027cd --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/index.ts @@ -0,0 +1,70 @@ +import { createLogger } from '@sim/logger' +import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor' +import type { OrchestratorOptions, OrchestratorResult } from '@/lib/copilot/orchestrator/types' +import { env } from '@/lib/core/config/env' +import { buildToolCallSummaries, createStreamingContext, runStreamLoop } from './stream-core' + +const logger = createLogger('CopilotOrchestrator') + +export interface OrchestrateStreamOptions extends OrchestratorOptions { + userId: string + workflowId: string + chatId?: string +} + +export async function orchestrateCopilotStream( + requestPayload: Record, + options: OrchestrateStreamOptions +): Promise { + const { userId, workflowId, chatId } = options + const execContext = await prepareExecutionContext(userId, workflowId) + + const payloadMsgId = requestPayload?.messageId + const context = createStreamingContext({ + chatId, + messageId: typeof payloadMsgId === 'string' ? payloadMsgId : crypto.randomUUID(), + }) + + try { + await runStreamLoop( + `${SIM_AGENT_API_URL}/api/chat-completion-streaming`, + { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), + }, + body: JSON.stringify(requestPayload), + }, + context, + execContext, + options + ) + + const result: OrchestratorResult = { + success: context.errors.length === 0, + content: context.accumulatedContent, + contentBlocks: context.contentBlocks, + toolCalls: buildToolCallSummaries(context), + chatId: context.chatId, + conversationId: context.conversationId, + errors: context.errors.length ? context.errors : undefined, + } + await options.onComplete?.(result) + return result + } catch (error) { + const err = error instanceof Error ? error : new Error('Copilot orchestration failed') + logger.error('Copilot orchestration failed', { error: err.message }) + await options.onError?.(err) + return { + success: false, + content: '', + contentBlocks: [], + toolCalls: [], + chatId: context.chatId, + conversationId: context.conversationId, + error: err.message, + } + } +} diff --git a/apps/sim/lib/copilot/orchestrator/persistence.ts b/apps/sim/lib/copilot/orchestrator/persistence.ts new file mode 100644 index 000000000..2743a51d4 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/persistence.ts @@ -0,0 +1,29 @@ +import { createLogger } from '@sim/logger' +import { REDIS_TOOL_CALL_PREFIX } from '@/lib/copilot/constants' +import { getRedisClient } from '@/lib/core/config/redis' + +const logger = createLogger('CopilotOrchestratorPersistence') + +/** + * Get a tool call confirmation status from Redis. + */ +export async function getToolConfirmation(toolCallId: string): Promise<{ + status: string + message?: string + timestamp?: string +} | null> { + const redis = getRedisClient() + if (!redis) return null + + try { + const data = await redis.get(`${REDIS_TOOL_CALL_PREFIX}${toolCallId}`) + if (!data) return null + return JSON.parse(data) as { status: string; message?: string; timestamp?: string } + } catch (error) { + logger.error('Failed to read tool confirmation', { + toolCallId, + error: error instanceof Error ? error.message : String(error), + }) + return null + } +} diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts new file mode 100644 index 000000000..fcc5abf43 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts @@ -0,0 +1,102 @@ +/** + * @vitest-environment node + */ + +import { loggerMock } from '@sim/testing' +import { beforeEach, describe, expect, it, vi } from 'vitest' + +vi.mock('@sim/logger', () => loggerMock) + +const { executeToolServerSide, markToolComplete, isIntegrationTool, isToolAvailableOnSimSide } = + vi.hoisted(() => ({ + executeToolServerSide: vi.fn(), + markToolComplete: vi.fn(), + isIntegrationTool: vi.fn().mockReturnValue(false), + isToolAvailableOnSimSide: vi.fn().mockReturnValue(true), + })) + +vi.mock('@/lib/copilot/orchestrator/tool-executor', () => ({ + executeToolServerSide, + markToolComplete, + isIntegrationTool, + isToolAvailableOnSimSide, +})) + +import { sseHandlers } from '@/lib/copilot/orchestrator/sse-handlers' +import type { ExecutionContext, StreamingContext } from '@/lib/copilot/orchestrator/types' + +describe('sse-handlers tool lifecycle', () => { + let context: StreamingContext + let execContext: ExecutionContext + + beforeEach(() => { + vi.clearAllMocks() + context = { + chatId: undefined, + conversationId: undefined, + messageId: 'msg-1', + accumulatedContent: '', + contentBlocks: [], + toolCalls: new Map(), + currentThinkingBlock: null, + isInThinkingBlock: false, + subAgentParentToolCallId: undefined, + subAgentContent: {}, + subAgentToolCalls: {}, + pendingContent: '', + streamComplete: false, + wasAborted: false, + errors: [], + } + execContext = { + userId: 'user-1', + workflowId: 'workflow-1', + } + }) + + it('executes tool_call and emits tool_result + mark-complete', async () => { + executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } }) + markToolComplete.mockResolvedValueOnce(true) + const onEvent = vi.fn() + + await sseHandlers.tool_call( + { + type: 'tool_call', + data: { id: 'tool-1', name: 'get_user_workflow', arguments: { workflowId: 'workflow-1' } }, + } as any, + context, + execContext, + { onEvent, interactive: false, timeout: 1000 } + ) + + expect(executeToolServerSide).toHaveBeenCalledTimes(1) + expect(markToolComplete).toHaveBeenCalledTimes(1) + expect(onEvent).toHaveBeenCalledWith( + expect.objectContaining({ + type: 'tool_result', + toolCallId: 'tool-1', + success: true, + }) + ) + + const updated = context.toolCalls.get('tool-1') + expect(updated?.status).toBe('success') + expect(updated?.result?.output).toEqual({ ok: true }) + }) + + it('skips duplicate tool_call after result', async () => { + executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } }) + markToolComplete.mockResolvedValueOnce(true) + + const event = { + type: 'tool_call', + data: { id: 'tool-dup', name: 'get_user_workflow', arguments: { workflowId: 'workflow-1' } }, + } + + await sseHandlers.tool_call(event as any, context, execContext, { interactive: false }) + await sseHandlers.tool_call(event as any, context, execContext, { interactive: false }) + + expect(executeToolServerSide).toHaveBeenCalledTimes(1) + expect(markToolComplete).toHaveBeenCalledTimes(1) + }) +}) diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts new file mode 100644 index 000000000..0f29ef3b3 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts @@ -0,0 +1,635 @@ +import { createLogger } from '@sim/logger' +import { STREAM_TIMEOUT_MS } from '@/lib/copilot/constants' +import { RESPOND_TOOL_SET, SUBAGENT_TOOL_SET } from '@/lib/copilot/orchestrator/config' +import { + asRecord, + getEventData, + markToolResultSeen, + wasToolResultSeen, +} from '@/lib/copilot/orchestrator/sse-utils' +import { + isIntegrationTool, + isToolAvailableOnSimSide, + markToolComplete, +} from '@/lib/copilot/orchestrator/tool-executor' +import type { + ContentBlock, + ExecutionContext, + OrchestratorOptions, + SSEEvent, + StreamingContext, + ToolCallState, +} from '@/lib/copilot/orchestrator/types' +import { + executeToolAndReport, + isInterruptToolName, + waitForToolCompletion, + waitForToolDecision, +} from './tool-execution' + +const logger = createLogger('CopilotSseHandlers') + +/** + * Run tools that can be executed client-side for real-time feedback + * (block pulsing, logs, stop button). When interactive, the server defers + * execution to the browser client instead of running executeWorkflow directly. + */ +const CLIENT_EXECUTABLE_RUN_TOOLS = new Set([ + 'run_workflow', + 'run_workflow_until_block', + 'run_from_block', + 'run_block', +]) + +// Normalization + dedupe helpers live in sse-utils to keep server/client in sync. + +function inferToolSuccess(data: Record | undefined): { + success: boolean + hasResultData: boolean + hasError: boolean +} { + const resultObj = asRecord(data?.result) + const hasExplicitSuccess = data?.success !== undefined || resultObj.success !== undefined + const explicitSuccess = data?.success ?? resultObj.success + const hasResultData = data?.result !== undefined || data?.data !== undefined + const hasError = !!data?.error || !!resultObj.error + const success = hasExplicitSuccess ? !!explicitSuccess : hasResultData && !hasError + return { success, hasResultData, hasError } +} + +export type SSEHandler = ( + event: SSEEvent, + context: StreamingContext, + execContext: ExecutionContext, + options: OrchestratorOptions +) => void | Promise + +function addContentBlock(context: StreamingContext, block: Omit): void { + context.contentBlocks.push({ + ...block, + timestamp: Date.now(), + }) +} + +export const sseHandlers: Record = { + chat_id: (event, context) => { + context.chatId = asRecord(event.data).chatId as string | undefined + }, + title_updated: () => {}, + tool_result: (event, context) => { + const data = getEventData(event) + const toolCallId = event.toolCallId || (data?.id as string | undefined) + if (!toolCallId) return + const current = context.toolCalls.get(toolCallId) + if (!current) return + + const { success, hasResultData, hasError } = inferToolSuccess(data) + + current.status = success ? 'success' : 'error' + current.endTime = Date.now() + if (hasResultData) { + current.result = { + success, + output: data?.result || data?.data, + } + } + if (hasError) { + const resultObj = asRecord(data?.result) + current.error = (data?.error || resultObj.error) as string | undefined + } + }, + tool_error: (event, context) => { + const data = getEventData(event) + const toolCallId = event.toolCallId || (data?.id as string | undefined) + if (!toolCallId) return + const current = context.toolCalls.get(toolCallId) + if (!current) return + current.status = 'error' + current.error = (data?.error as string | undefined) || 'Tool execution failed' + current.endTime = Date.now() + }, + tool_generating: (event, context) => { + const data = getEventData(event) + const toolCallId = + event.toolCallId || + (data?.toolCallId as string | undefined) || + (data?.id as string | undefined) + const toolName = + event.toolName || (data?.toolName as string | undefined) || (data?.name as string | undefined) + if (!toolCallId || !toolName) return + if (!context.toolCalls.has(toolCallId)) { + context.toolCalls.set(toolCallId, { + id: toolCallId, + name: toolName, + status: 'pending', + startTime: Date.now(), + }) + } + }, + tool_call: async (event, context, execContext, options) => { + const toolData = getEventData(event) || ({} as Record) + const toolCallId = (toolData.id as string | undefined) || event.toolCallId + const toolName = (toolData.name as string | undefined) || event.toolName + if (!toolCallId || !toolName) return + + const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as + | Record + | undefined + const isPartial = toolData.partial === true + const existing = context.toolCalls.get(toolCallId) + + // If we've already completed this tool call, ignore late/duplicate tool_call events + // to avoid resetting UI/state back to pending and re-executing. + if ( + existing?.endTime || + (existing && existing.status !== 'pending' && existing.status !== 'executing') + ) { + if (!existing.params && args) { + existing.params = args + } + return + } + + if (existing) { + if (args && !existing.params) existing.params = args + } else { + context.toolCalls.set(toolCallId, { + id: toolCallId, + name: toolName, + status: 'pending', + params: args, + startTime: Date.now(), + }) + const created = context.toolCalls.get(toolCallId)! + addContentBlock(context, { type: 'tool_call', toolCall: created }) + } + + if (isPartial) return + if (wasToolResultSeen(toolCallId)) return + + const toolCall = context.toolCalls.get(toolCallId) + if (!toolCall) return + + // Subagent tools are executed by the copilot backend, not sim side. + if (SUBAGENT_TOOL_SET.has(toolName)) { + return + } + + // Respond tools are internal to copilot's subagent system - skip execution. + // The copilot backend handles these internally to signal subagent completion. + if (RESPOND_TOOL_SET.has(toolName)) { + toolCall.status = 'success' + toolCall.endTime = Date.now() + toolCall.result = { + success: true, + output: 'Internal respond tool - handled by copilot backend', + } + return + } + + const isInterruptTool = isInterruptToolName(toolName) + const isInteractive = options.interactive === true + // Integration tools (user-installed) also require approval in interactive mode + const needsApproval = isInterruptTool || isIntegrationTool(toolName) + + if (needsApproval && isInteractive) { + const decision = await waitForToolDecision( + toolCallId, + options.timeout || STREAM_TIMEOUT_MS, + options.abortSignal + ) + if (decision?.status === 'accepted' || decision?.status === 'success') { + // Client-executable run tools: defer execution to the browser client. + // The client calls executeWorkflowWithFullLogging for real-time feedback + // (block pulsing, logs, stop button) and reports completion via + // /api/copilot/confirm with status success/error. We poll Redis for + // that completion signal, then fire-and-forget markToolComplete to Go. + if (CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName)) { + toolCall.status = 'executing' + const completion = await waitForToolCompletion( + toolCallId, + options.timeout || STREAM_TIMEOUT_MS, + options.abortSignal + ) + if (completion?.status === 'background') { + toolCall.status = 'skipped' + toolCall.endTime = Date.now() + markToolComplete( + toolCall.id, + toolCall.name, + 202, + completion.message || 'Tool execution moved to background', + { background: true } + ).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (run tool background)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCallId) + return + } + const success = completion?.status === 'success' + toolCall.status = success ? 'success' : 'error' + toolCall.endTime = Date.now() + const msg = + completion?.message || (success ? 'Tool completed' : 'Tool failed or timed out') + // Fire-and-forget: tell Go backend the tool is done + // (must NOT await — see deadlock note in executeToolAndReport) + markToolComplete(toolCall.id, toolCall.name, success ? 200 : 500, msg).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (run tool)', { + toolCallId: toolCall.id, + toolName: toolCall.name, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCallId) + return + } + await executeToolAndReport(toolCallId, context, execContext, options) + return + } + + if (decision?.status === 'rejected' || decision?.status === 'error') { + toolCall.status = 'rejected' + toolCall.endTime = Date.now() + // Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport + markToolComplete( + toolCall.id, + toolCall.name, + 400, + decision.message || 'Tool execution rejected', + { skipped: true, reason: 'user_rejected' } + ).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (rejected)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCall.id) + return + } + + if (decision?.status === 'background') { + toolCall.status = 'skipped' + toolCall.endTime = Date.now() + // Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport + markToolComplete( + toolCall.id, + toolCall.name, + 202, + decision.message || 'Tool execution moved to background', + { background: true } + ).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (background)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCall.id) + return + } + + // Decision was null — timed out or aborted. + // Do NOT fall through to auto-execute. Mark the tool as timed out + // and notify Go so it can unblock waitForExternalTool. + toolCall.status = 'rejected' + toolCall.endTime = Date.now() + markToolComplete(toolCall.id, toolCall.name, 408, 'Tool approval timed out', { + skipped: true, + reason: 'timeout', + }).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (timeout)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCall.id) + return + } + + if (options.autoExecuteTools !== false) { + await executeToolAndReport(toolCallId, context, execContext, options) + } + }, + reasoning: (event, context) => { + const d = asRecord(event.data) + const phase = d.phase || asRecord(d.data).phase + if (phase === 'start') { + context.isInThinkingBlock = true + context.currentThinkingBlock = { + type: 'thinking', + content: '', + timestamp: Date.now(), + } + return + } + if (phase === 'end') { + if (context.currentThinkingBlock) { + context.contentBlocks.push(context.currentThinkingBlock) + } + context.isInThinkingBlock = false + context.currentThinkingBlock = null + return + } + const chunk = (d.data || d.content || event.content) as string | undefined + if (!chunk || !context.currentThinkingBlock) return + context.currentThinkingBlock.content = `${context.currentThinkingBlock.content || ''}${chunk}` + }, + content: (event, context) => { + // Go backend sends content as a plain string in event.data, not wrapped in an object. + let chunk: string | undefined + if (typeof event.data === 'string') { + chunk = event.data + } else { + const d = asRecord(event.data) + chunk = (d.content || d.data || event.content) as string | undefined + } + if (!chunk) return + context.accumulatedContent += chunk + addContentBlock(context, { type: 'text', content: chunk }) + }, + done: (event, context) => { + const d = asRecord(event.data) + if (d.responseId) { + context.conversationId = d.responseId as string + } + context.streamComplete = true + }, + start: (event, context) => { + const d = asRecord(event.data) + if (d.responseId) { + context.conversationId = d.responseId as string + } + }, + error: (event, context) => { + const d = asRecord(event.data) + const message = (d.message || d.error || event.error) as string | undefined + if (message) { + context.errors.push(message) + } + context.streamComplete = true + }, +} + +export const subAgentHandlers: Record = { + content: (event, context) => { + const parentToolCallId = context.subAgentParentToolCallId + if (!parentToolCallId || !event.data) return + // Go backend sends content as a plain string in event.data + let chunk: string | undefined + if (typeof event.data === 'string') { + chunk = event.data + } else { + const d = asRecord(event.data) + chunk = (d.content || d.data || event.content) as string | undefined + } + if (!chunk) return + context.subAgentContent[parentToolCallId] = + (context.subAgentContent[parentToolCallId] || '') + chunk + addContentBlock(context, { type: 'subagent_text', content: chunk }) + }, + tool_call: async (event, context, execContext, options) => { + const parentToolCallId = context.subAgentParentToolCallId + if (!parentToolCallId) return + const toolData = getEventData(event) || ({} as Record) + const toolCallId = (toolData.id as string | undefined) || event.toolCallId + const toolName = (toolData.name as string | undefined) || event.toolName + if (!toolCallId || !toolName) return + const isPartial = toolData.partial === true + const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as + | Record + | undefined + + const existing = context.toolCalls.get(toolCallId) + // Ignore late/duplicate tool_call events once we already have a result. + if (wasToolResultSeen(toolCallId) || existing?.endTime) { + return + } + + const toolCall: ToolCallState = { + id: toolCallId, + name: toolName, + status: 'pending', + params: args, + startTime: Date.now(), + } + + // Store in both places - but do NOT overwrite existing tool call state for the same id. + if (!context.subAgentToolCalls[parentToolCallId]) { + context.subAgentToolCalls[parentToolCallId] = [] + } + if (!context.subAgentToolCalls[parentToolCallId].some((tc) => tc.id === toolCallId)) { + context.subAgentToolCalls[parentToolCallId].push(toolCall) + } + if (!context.toolCalls.has(toolCallId)) { + context.toolCalls.set(toolCallId, toolCall) + } + + if (isPartial) return + + // Respond tools are internal to copilot's subagent system - skip execution. + if (RESPOND_TOOL_SET.has(toolName)) { + toolCall.status = 'success' + toolCall.endTime = Date.now() + toolCall.result = { + success: true, + output: 'Internal respond tool - handled by copilot backend', + } + return + } + + // Tools that only exist on the Go backend (e.g. search_patterns, + // search_errors, remember_debug) should NOT be re-executed on the Sim side. + // The Go backend already executed them and will send its own tool_result + // SSE event with the real outcome. Trying to execute them here would fail + // with "Tool not found" and incorrectly mark the tool as failed. + if (!isToolAvailableOnSimSide(toolName)) { + return + } + + // Interrupt tools and integration tools (user-installed) require approval + // in interactive mode, same as top-level handler. + const needsSubagentApproval = isInterruptToolName(toolName) || isIntegrationTool(toolName) + if (options.interactive === true && needsSubagentApproval) { + const decision = await waitForToolDecision( + toolCallId, + options.timeout || STREAM_TIMEOUT_MS, + options.abortSignal + ) + if (decision?.status === 'accepted' || decision?.status === 'success') { + await executeToolAndReport(toolCallId, context, execContext, options) + return + } + if (decision?.status === 'rejected' || decision?.status === 'error') { + toolCall.status = 'rejected' + toolCall.endTime = Date.now() + // Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport + markToolComplete( + toolCall.id, + toolCall.name, + 400, + decision.message || 'Tool execution rejected', + { skipped: true, reason: 'user_rejected' } + ).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (subagent rejected)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCall.id) + return + } + if (decision?.status === 'background') { + toolCall.status = 'skipped' + toolCall.endTime = Date.now() + // Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport + markToolComplete( + toolCall.id, + toolCall.name, + 202, + decision.message || 'Tool execution moved to background', + { background: true } + ).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (subagent background)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCall.id) + return + } + + // Decision was null — timed out or aborted. + // Do NOT fall through to auto-execute. + toolCall.status = 'rejected' + toolCall.endTime = Date.now() + markToolComplete(toolCall.id, toolCall.name, 408, 'Tool approval timed out', { + skipped: true, + reason: 'timeout', + }).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (subagent timeout)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCall.id) + return + } + + // Client-executable run tools in interactive mode: defer to client. + // Same pattern as main handler: wait for client completion, then tell Go. + if (options.interactive === true && CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName)) { + toolCall.status = 'executing' + const completion = await waitForToolCompletion( + toolCallId, + options.timeout || STREAM_TIMEOUT_MS, + options.abortSignal + ) + if (completion?.status === 'rejected') { + toolCall.status = 'rejected' + toolCall.endTime = Date.now() + markToolComplete( + toolCall.id, + toolCall.name, + 400, + completion.message || 'Tool execution rejected' + ).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (subagent run tool rejected)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCallId) + return + } + if (completion?.status === 'background') { + toolCall.status = 'skipped' + toolCall.endTime = Date.now() + markToolComplete( + toolCall.id, + toolCall.name, + 202, + completion.message || 'Tool execution moved to background', + { background: true } + ).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (subagent run tool background)', { + toolCallId: toolCall.id, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCallId) + return + } + const success = completion?.status === 'success' + toolCall.status = success ? 'success' : 'error' + toolCall.endTime = Date.now() + const msg = completion?.message || (success ? 'Tool completed' : 'Tool failed or timed out') + markToolComplete(toolCall.id, toolCall.name, success ? 200 : 500, msg).catch((err) => { + logger.error('markToolComplete fire-and-forget failed (subagent run tool)', { + toolCallId: toolCall.id, + toolName: toolCall.name, + error: err instanceof Error ? err.message : String(err), + }) + }) + markToolResultSeen(toolCallId) + return + } + + if (options.autoExecuteTools !== false) { + await executeToolAndReport(toolCallId, context, execContext, options) + } + }, + tool_result: (event, context) => { + const parentToolCallId = context.subAgentParentToolCallId + if (!parentToolCallId) return + const data = getEventData(event) + const toolCallId = event.toolCallId || (data?.id as string | undefined) + if (!toolCallId) return + + // Update in subAgentToolCalls. + const toolCalls = context.subAgentToolCalls[parentToolCallId] || [] + const subAgentToolCall = toolCalls.find((tc) => tc.id === toolCallId) + + // Also update in main toolCalls (where we added it for execution). + const mainToolCall = context.toolCalls.get(toolCallId) + + const { success, hasResultData, hasError } = inferToolSuccess(data) + + const status = success ? 'success' : 'error' + const endTime = Date.now() + const result = hasResultData ? { success, output: data?.result || data?.data } : undefined + + if (subAgentToolCall) { + subAgentToolCall.status = status + subAgentToolCall.endTime = endTime + if (result) subAgentToolCall.result = result + if (hasError) { + const resultObj = asRecord(data?.result) + subAgentToolCall.error = (data?.error || resultObj.error) as string | undefined + } + } + + if (mainToolCall) { + mainToolCall.status = status + mainToolCall.endTime = endTime + if (result) mainToolCall.result = result + if (hasError) { + const resultObj = asRecord(data?.result) + mainToolCall.error = (data?.error || resultObj.error) as string | undefined + } + } + }, +} + +export function handleSubagentRouting(event: SSEEvent, context: StreamingContext): boolean { + if (!event.subagent) return false + if (!context.subAgentParentToolCallId) { + logger.warn('Subagent event missing parent tool call', { + type: event.type, + subagent: event.subagent, + }) + return false + } + return true +} diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/index.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/index.ts new file mode 100644 index 000000000..d0d6b14b5 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/index.ts @@ -0,0 +1,2 @@ +export type { SSEHandler } from './handlers' +export { handleSubagentRouting, sseHandlers, subAgentHandlers } from './handlers' diff --git a/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts new file mode 100644 index 000000000..26865176c --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts @@ -0,0 +1,184 @@ +import { createLogger } from '@sim/logger' +import { + TOOL_DECISION_INITIAL_POLL_MS, + TOOL_DECISION_MAX_POLL_MS, + TOOL_DECISION_POLL_BACKOFF, +} from '@/lib/copilot/constants' +import { INTERRUPT_TOOL_SET } from '@/lib/copilot/orchestrator/config' +import { getToolConfirmation } from '@/lib/copilot/orchestrator/persistence' +import { + asRecord, + markToolResultSeen, + wasToolResultSeen, +} from '@/lib/copilot/orchestrator/sse-utils' +import { executeToolServerSide, markToolComplete } from '@/lib/copilot/orchestrator/tool-executor' +import type { + ExecutionContext, + OrchestratorOptions, + SSEEvent, + StreamingContext, +} from '@/lib/copilot/orchestrator/types' + +const logger = createLogger('CopilotSseToolExecution') + +export function isInterruptToolName(toolName: string): boolean { + return INTERRUPT_TOOL_SET.has(toolName) +} + +export async function executeToolAndReport( + toolCallId: string, + context: StreamingContext, + execContext: ExecutionContext, + options?: OrchestratorOptions +): Promise { + const toolCall = context.toolCalls.get(toolCallId) + if (!toolCall) return + + if (toolCall.status === 'executing') return + if (wasToolResultSeen(toolCall.id)) return + + toolCall.status = 'executing' + try { + const result = await executeToolServerSide(toolCall, execContext) + toolCall.status = result.success ? 'success' : 'error' + toolCall.result = result + toolCall.error = result.error + toolCall.endTime = Date.now() + + // If create_workflow was successful, update the execution context with the new workflowId. + // This ensures subsequent tools in the same stream have access to the workflowId. + const output = asRecord(result.output) + if ( + toolCall.name === 'create_workflow' && + result.success && + output.workflowId && + !execContext.workflowId + ) { + execContext.workflowId = output.workflowId as string + if (output.workspaceId) { + execContext.workspaceId = output.workspaceId as string + } + } + + markToolResultSeen(toolCall.id) + + // Fire-and-forget: notify the copilot backend that the tool completed. + // IMPORTANT: We must NOT await this — the Go backend may block on the + // mark-complete handler until it can write back on the SSE stream, but + // the SSE reader (our for-await loop) is paused while we're in this + // handler. Awaiting here would deadlock: sim waits for Go's response, + // Go waits for sim to drain the SSE stream. + markToolComplete( + toolCall.id, + toolCall.name, + result.success ? 200 : 500, + result.error || (result.success ? 'Tool completed' : 'Tool failed'), + result.output + ).catch((err) => { + logger.error('markToolComplete fire-and-forget failed', { + toolCallId: toolCall.id, + toolName: toolCall.name, + error: err instanceof Error ? err.message : String(err), + }) + }) + + const resultEvent: SSEEvent = { + type: 'tool_result', + toolCallId: toolCall.id, + toolName: toolCall.name, + success: result.success, + result: result.output, + data: { + id: toolCall.id, + name: toolCall.name, + success: result.success, + result: result.output, + }, + } + await options?.onEvent?.(resultEvent) + } catch (error) { + toolCall.status = 'error' + toolCall.error = error instanceof Error ? error.message : String(error) + toolCall.endTime = Date.now() + + markToolResultSeen(toolCall.id) + + // Fire-and-forget (same reasoning as above). + markToolComplete(toolCall.id, toolCall.name, 500, toolCall.error).catch((err) => { + logger.error('markToolComplete fire-and-forget failed', { + toolCallId: toolCall.id, + toolName: toolCall.name, + error: err instanceof Error ? err.message : String(err), + }) + }) + + const errorEvent: SSEEvent = { + type: 'tool_error', + toolCallId: toolCall.id, + data: { + id: toolCall.id, + name: toolCall.name, + error: toolCall.error, + }, + } + await options?.onEvent?.(errorEvent) + } +} + +export async function waitForToolDecision( + toolCallId: string, + timeoutMs: number, + abortSignal?: AbortSignal +): Promise<{ status: string; message?: string } | null> { + const start = Date.now() + let interval = TOOL_DECISION_INITIAL_POLL_MS + const maxInterval = TOOL_DECISION_MAX_POLL_MS + while (Date.now() - start < timeoutMs) { + if (abortSignal?.aborted) return null + const decision = await getToolConfirmation(toolCallId) + if (decision?.status) { + return decision + } + await new Promise((resolve) => setTimeout(resolve, interval)) + interval = Math.min(interval * TOOL_DECISION_POLL_BACKOFF, maxInterval) + } + return null +} + +/** + * Wait for a tool completion signal (success/error/rejected) from the client. + * Unlike waitForToolDecision which returns on any status, this ignores the + * initial 'accepted' status and only returns on terminal statuses: + * - success: client finished executing successfully + * - error: client execution failed + * - rejected: user clicked Skip (subagent run tools where user hasn't auto-allowed) + * + * Used for client-executable run tools: the client executes the workflow + * and posts success/error to /api/copilot/confirm when done. The server + * polls here until that completion signal arrives. + */ +export async function waitForToolCompletion( + toolCallId: string, + timeoutMs: number, + abortSignal?: AbortSignal +): Promise<{ status: string; message?: string } | null> { + const start = Date.now() + let interval = TOOL_DECISION_INITIAL_POLL_MS + const maxInterval = TOOL_DECISION_MAX_POLL_MS + while (Date.now() - start < timeoutMs) { + if (abortSignal?.aborted) return null + const decision = await getToolConfirmation(toolCallId) + // Return on completion/terminal statuses, not intermediate 'accepted' + if ( + decision?.status === 'success' || + decision?.status === 'error' || + decision?.status === 'rejected' || + decision?.status === 'background' + ) { + return decision + } + await new Promise((resolve) => setTimeout(resolve, interval)) + interval = Math.min(interval * TOOL_DECISION_POLL_BACKOFF, maxInterval) + } + return null +} diff --git a/apps/sim/lib/copilot/orchestrator/sse-parser.ts b/apps/sim/lib/copilot/orchestrator/sse-parser.ts new file mode 100644 index 000000000..8ab50365c --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/sse-parser.ts @@ -0,0 +1,71 @@ +import { createLogger } from '@sim/logger' +import type { SSEEvent } from '@/lib/copilot/orchestrator/types' + +const logger = createLogger('CopilotSseParser') + +/** + * Parses SSE streams from the copilot backend into typed events. + */ +export async function* parseSSEStream( + reader: ReadableStreamDefaultReader, + decoder: TextDecoder, + abortSignal?: AbortSignal +): AsyncGenerator { + let buffer = '' + + try { + while (true) { + if (abortSignal?.aborted) { + logger.info('SSE stream aborted by signal') + break + } + + const { done, value } = await reader.read() + if (done) break + + buffer += decoder.decode(value, { stream: true }) + const lines = buffer.split('\n') + buffer = lines.pop() || '' + + for (const line of lines) { + if (!line.trim()) continue + if (!line.startsWith('data: ')) continue + + const jsonStr = line.slice(6) + if (jsonStr === '[DONE]') continue + + try { + const event = JSON.parse(jsonStr) as SSEEvent + if (event?.type) { + yield event + } + } catch (error) { + logger.warn('Failed to parse SSE event', { + preview: jsonStr.slice(0, 200), + error: error instanceof Error ? error.message : String(error), + }) + } + } + } + + if (buffer.trim() && buffer.startsWith('data: ')) { + try { + const event = JSON.parse(buffer.slice(6)) as SSEEvent + if (event?.type) { + yield event + } + } catch (error) { + logger.warn('Failed to parse final SSE buffer', { + preview: buffer.slice(0, 200), + error: error instanceof Error ? error.message : String(error), + }) + } + } + } finally { + try { + reader.releaseLock() + } catch { + logger.warn('Failed to release SSE reader lock') + } + } +} diff --git a/apps/sim/lib/copilot/orchestrator/sse-utils.test.ts b/apps/sim/lib/copilot/orchestrator/sse-utils.test.ts new file mode 100644 index 000000000..ce41e3270 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/sse-utils.test.ts @@ -0,0 +1,42 @@ +/** + * @vitest-environment node + */ +import { describe, expect, it } from 'vitest' +import { + normalizeSseEvent, + shouldSkipToolCallEvent, + shouldSkipToolResultEvent, +} from '@/lib/copilot/orchestrator/sse-utils' + +describe('sse-utils', () => { + it.concurrent('normalizes tool fields from string data', () => { + const event = { + type: 'tool_result', + data: JSON.stringify({ + id: 'tool_1', + name: 'edit_workflow', + success: true, + result: { ok: true }, + }), + } + + const normalized = normalizeSseEvent(event as any) + + expect(normalized.toolCallId).toBe('tool_1') + expect(normalized.toolName).toBe('edit_workflow') + expect(normalized.success).toBe(true) + expect(normalized.result).toEqual({ ok: true }) + }) + + it.concurrent('dedupes tool_call events', () => { + const event = { type: 'tool_call', data: { id: 'tool_call_1', name: 'plan' } } + expect(shouldSkipToolCallEvent(event as any)).toBe(false) + expect(shouldSkipToolCallEvent(event as any)).toBe(true) + }) + + it.concurrent('dedupes tool_result events', () => { + const event = { type: 'tool_result', data: { id: 'tool_result_1', name: 'plan' } } + expect(shouldSkipToolResultEvent(event as any)).toBe(false) + expect(shouldSkipToolResultEvent(event as any)).toBe(true) + }) +}) diff --git a/apps/sim/lib/copilot/orchestrator/sse-utils.ts b/apps/sim/lib/copilot/orchestrator/sse-utils.ts new file mode 100644 index 000000000..afcbf2111 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/sse-utils.ts @@ -0,0 +1,124 @@ +import { STREAM_BUFFER_MAX_DEDUP_ENTRIES } from '@/lib/copilot/constants' +import type { SSEEvent } from '@/lib/copilot/orchestrator/types' + +type EventDataObject = Record | undefined + +/** Safely cast event.data to a record for property access. */ +export const asRecord = (data: unknown): Record => + (data && typeof data === 'object' && !Array.isArray(data) ? data : {}) as Record + +/** + * In-memory tool event dedupe with bounded size. + * + * NOTE: Process-local only. In a multi-instance setup (e.g., ECS), + * each task maintains its own dedupe cache. + */ +const seenToolCalls = new Set() +const seenToolResults = new Set() + +function addToSet(set: Set, id: string): void { + if (set.size >= STREAM_BUFFER_MAX_DEDUP_ENTRIES) { + const first = set.values().next().value + if (first) set.delete(first) + } + set.add(id) +} + +const parseEventData = (data: unknown): EventDataObject => { + if (!data) return undefined + if (typeof data !== 'string') { + return data as EventDataObject + } + try { + return JSON.parse(data) as EventDataObject + } catch { + return undefined + } +} + +const hasToolFields = (data: EventDataObject): boolean => { + if (!data) return false + return ( + data.id !== undefined || + data.toolCallId !== undefined || + data.name !== undefined || + data.success !== undefined || + data.result !== undefined || + data.arguments !== undefined + ) +} + +export const getEventData = (event: SSEEvent): EventDataObject => { + const topLevel = parseEventData(event.data) + if (!topLevel) return undefined + if (hasToolFields(topLevel)) return topLevel + const nested = parseEventData(topLevel.data) + return nested || topLevel +} + +function getToolCallIdFromEvent(event: SSEEvent): string | undefined { + const data = getEventData(event) + return ( + event.toolCallId || (data?.id as string | undefined) || (data?.toolCallId as string | undefined) + ) +} + +/** Normalizes SSE events so tool metadata is available at the top level. */ +export function normalizeSseEvent(event: SSEEvent): SSEEvent { + if (!event) return event + const data = getEventData(event) + if (!data) return event + const toolCallId = + event.toolCallId || (data.id as string | undefined) || (data.toolCallId as string | undefined) + const toolName = + event.toolName || (data.name as string | undefined) || (data.toolName as string | undefined) + const success = event.success ?? (data.success as boolean | undefined) + const result = event.result ?? data.result + const normalizedData = typeof event.data === 'string' ? data : event.data + return { + ...event, + data: normalizedData, + toolCallId, + toolName, + success, + result, + } +} + +function markToolCallSeen(toolCallId: string): void { + addToSet(seenToolCalls, toolCallId) +} + +function wasToolCallSeen(toolCallId: string): boolean { + return seenToolCalls.has(toolCallId) +} + +export function markToolResultSeen(toolCallId: string): void { + addToSet(seenToolResults, toolCallId) +} + +export function wasToolResultSeen(toolCallId: string): boolean { + return seenToolResults.has(toolCallId) +} + +export function shouldSkipToolCallEvent(event: SSEEvent): boolean { + if (event.type !== 'tool_call') return false + const toolCallId = getToolCallIdFromEvent(event) + if (!toolCallId) return false + const eventData = getEventData(event) + if (eventData?.partial === true) return false + if (wasToolResultSeen(toolCallId) || wasToolCallSeen(toolCallId)) { + return true + } + markToolCallSeen(toolCallId) + return false +} + +export function shouldSkipToolResultEvent(event: SSEEvent): boolean { + if (event.type !== 'tool_result') return false + const toolCallId = getToolCallIdFromEvent(event) + if (!toolCallId) return false + if (wasToolResultSeen(toolCallId)) return true + markToolResultSeen(toolCallId) + return false +} diff --git a/apps/sim/lib/copilot/orchestrator/stream-buffer.test.ts b/apps/sim/lib/copilot/orchestrator/stream-buffer.test.ts new file mode 100644 index 000000000..94458e452 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/stream-buffer.test.ts @@ -0,0 +1,119 @@ +/** + * @vitest-environment node + */ + +import { loggerMock } from '@sim/testing' +import { beforeEach, describe, expect, it, vi } from 'vitest' + +vi.mock('@sim/logger', () => loggerMock) + +type StoredEntry = { score: number; value: string } + +const createRedisStub = () => { + const events = new Map() + const counters = new Map() + + const readEntries = (key: string, min: number, max: number) => { + const list = events.get(key) || [] + return list + .filter((entry) => entry.score >= min && entry.score <= max) + .sort((a, b) => a.score - b.score) + .map((entry) => entry.value) + } + + return { + del: vi.fn().mockResolvedValue(1), + hset: vi.fn().mockResolvedValue(1), + hgetall: vi.fn().mockResolvedValue({}), + expire: vi.fn().mockResolvedValue(1), + eval: vi + .fn() + .mockImplementation( + ( + _lua: string, + _keysCount: number, + seqKey: string, + eventsKey: string, + _ttl: number, + _limit: number, + streamId: string, + eventJson: string + ) => { + const current = counters.get(seqKey) || 0 + const next = current + 1 + counters.set(seqKey, next) + const entry = JSON.stringify({ eventId: next, streamId, event: JSON.parse(eventJson) }) + const list = events.get(eventsKey) || [] + list.push({ score: next, value: entry }) + events.set(eventsKey, list) + return next + } + ), + incrby: vi.fn().mockImplementation((key: string, amount: number) => { + const current = counters.get(key) || 0 + const next = current + amount + counters.set(key, next) + return next + }), + zrangebyscore: vi.fn().mockImplementation((key: string, min: string, max: string) => { + const minVal = Number(min) + const maxVal = max === '+inf' ? Number.POSITIVE_INFINITY : Number(max) + return Promise.resolve(readEntries(key, minVal, maxVal)) + }), + pipeline: vi.fn().mockImplementation(() => { + const api: Record = {} + api.zadd = vi.fn().mockImplementation((key: string, ...args: Array) => { + const list = events.get(key) || [] + for (let i = 0; i < args.length; i += 2) { + list.push({ score: Number(args[i]), value: String(args[i + 1]) }) + } + events.set(key, list) + return api + }) + api.expire = vi.fn().mockReturnValue(api) + api.zremrangebyrank = vi.fn().mockReturnValue(api) + api.exec = vi.fn().mockResolvedValue([]) + return api + }), + } +} + +let mockRedis: ReturnType + +vi.mock('@/lib/core/config/redis', () => ({ + getRedisClient: () => mockRedis, +})) + +import { + appendStreamEvent, + createStreamEventWriter, + readStreamEvents, +} from '@/lib/copilot/orchestrator/stream-buffer' + +describe('stream-buffer', () => { + beforeEach(() => { + mockRedis = createRedisStub() + vi.clearAllMocks() + }) + + it.concurrent('replays events after a given event id', async () => { + await appendStreamEvent('stream-1', { type: 'content', data: 'hello' }) + await appendStreamEvent('stream-1', { type: 'content', data: 'world' }) + + const allEvents = await readStreamEvents('stream-1', 0) + expect(allEvents.map((entry) => entry.event.data)).toEqual(['hello', 'world']) + + const replayed = await readStreamEvents('stream-1', 1) + expect(replayed.map((entry) => entry.event.data)).toEqual(['world']) + }) + + it.concurrent('flushes buffered events for resume', async () => { + const writer = createStreamEventWriter('stream-2') + await writer.write({ type: 'content', data: 'a' }) + await writer.write({ type: 'content', data: 'b' }) + await writer.flush() + + const events = await readStreamEvents('stream-2', 0) + expect(events.map((entry) => entry.event.data)).toEqual(['a', 'b']) + }) +}) diff --git a/apps/sim/lib/copilot/orchestrator/stream-buffer.ts b/apps/sim/lib/copilot/orchestrator/stream-buffer.ts new file mode 100644 index 000000000..bc0524c4a --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/stream-buffer.ts @@ -0,0 +1,309 @@ +import { createLogger } from '@sim/logger' +import { REDIS_COPILOT_STREAM_PREFIX } from '@/lib/copilot/constants' +import { env } from '@/lib/core/config/env' +import { getRedisClient } from '@/lib/core/config/redis' + +const logger = createLogger('CopilotStreamBuffer') + +const STREAM_DEFAULTS = { + ttlSeconds: 60 * 60, + eventLimit: 5000, + reserveBatch: 200, + flushIntervalMs: 15, + flushMaxBatch: 200, +} + +export type StreamBufferConfig = { + ttlSeconds: number + eventLimit: number + reserveBatch: number + flushIntervalMs: number + flushMaxBatch: number +} + +const parseNumber = (value: number | string | undefined, fallback: number): number => { + if (typeof value === 'number' && Number.isFinite(value)) return value + const parsed = Number(value) + return Number.isFinite(parsed) ? parsed : fallback +} + +export function getStreamBufferConfig(): StreamBufferConfig { + return { + ttlSeconds: parseNumber(env.COPILOT_STREAM_TTL_SECONDS, STREAM_DEFAULTS.ttlSeconds), + eventLimit: parseNumber(env.COPILOT_STREAM_EVENT_LIMIT, STREAM_DEFAULTS.eventLimit), + reserveBatch: parseNumber(env.COPILOT_STREAM_RESERVE_BATCH, STREAM_DEFAULTS.reserveBatch), + flushIntervalMs: parseNumber( + env.COPILOT_STREAM_FLUSH_INTERVAL_MS, + STREAM_DEFAULTS.flushIntervalMs + ), + flushMaxBatch: parseNumber(env.COPILOT_STREAM_FLUSH_MAX_BATCH, STREAM_DEFAULTS.flushMaxBatch), + } +} + +const APPEND_STREAM_EVENT_LUA = ` +local seqKey = KEYS[1] +local eventsKey = KEYS[2] +local ttl = tonumber(ARGV[1]) +local limit = tonumber(ARGV[2]) +local streamId = ARGV[3] +local eventJson = ARGV[4] + +local id = redis.call('INCR', seqKey) +local entry = '{"eventId":' .. id .. ',"streamId":' .. cjson.encode(streamId) .. ',"event":' .. eventJson .. '}' +redis.call('ZADD', eventsKey, id, entry) +redis.call('EXPIRE', eventsKey, ttl) +redis.call('EXPIRE', seqKey, ttl) +if limit > 0 then + redis.call('ZREMRANGEBYRANK', eventsKey, 0, -limit-1) +end +return id +` + +function getStreamKeyPrefix(streamId: string) { + return `${REDIS_COPILOT_STREAM_PREFIX}${streamId}` +} + +function getEventsKey(streamId: string) { + return `${getStreamKeyPrefix(streamId)}:events` +} + +function getSeqKey(streamId: string) { + return `${getStreamKeyPrefix(streamId)}:seq` +} + +function getMetaKey(streamId: string) { + return `${getStreamKeyPrefix(streamId)}:meta` +} + +export type StreamStatus = 'active' | 'complete' | 'error' + +export type StreamMeta = { + status: StreamStatus + userId?: string + updatedAt?: string + error?: string +} + +export type StreamEventEntry = { + eventId: number + streamId: string + event: Record +} + +export type StreamEventWriter = { + write: (event: Record) => Promise + flush: () => Promise + close: () => Promise +} + +export async function resetStreamBuffer(streamId: string): Promise { + const redis = getRedisClient() + if (!redis) return + try { + await redis.del(getEventsKey(streamId), getSeqKey(streamId), getMetaKey(streamId)) + } catch (error) { + logger.warn('Failed to reset stream buffer', { + streamId, + error: error instanceof Error ? error.message : String(error), + }) + } +} + +export async function setStreamMeta(streamId: string, meta: StreamMeta): Promise { + const redis = getRedisClient() + if (!redis) return + try { + const config = getStreamBufferConfig() + const payload: Record = { + status: meta.status, + updatedAt: meta.updatedAt || new Date().toISOString(), + } + if (meta.userId) payload.userId = meta.userId + if (meta.error) payload.error = meta.error + await redis.hset(getMetaKey(streamId), payload) + await redis.expire(getMetaKey(streamId), config.ttlSeconds) + } catch (error) { + logger.warn('Failed to update stream meta', { + streamId, + error: error instanceof Error ? error.message : String(error), + }) + } +} + +export async function getStreamMeta(streamId: string): Promise { + const redis = getRedisClient() + if (!redis) return null + try { + const meta = await redis.hgetall(getMetaKey(streamId)) + if (!meta || Object.keys(meta).length === 0) return null + return meta as StreamMeta + } catch (error) { + logger.warn('Failed to read stream meta', { + streamId, + error: error instanceof Error ? error.message : String(error), + }) + return null + } +} + +export async function appendStreamEvent( + streamId: string, + event: Record +): Promise { + const redis = getRedisClient() + if (!redis) { + return { eventId: 0, streamId, event } + } + + try { + const config = getStreamBufferConfig() + const eventJson = JSON.stringify(event) + const nextId = await redis.eval( + APPEND_STREAM_EVENT_LUA, + 2, + getSeqKey(streamId), + getEventsKey(streamId), + config.ttlSeconds, + config.eventLimit, + streamId, + eventJson + ) + const eventId = typeof nextId === 'number' ? nextId : Number(nextId) + return { eventId, streamId, event } + } catch (error) { + logger.warn('Failed to append stream event', { + streamId, + error: error instanceof Error ? error.message : String(error), + }) + return { eventId: 0, streamId, event } + } +} + +export function createStreamEventWriter(streamId: string): StreamEventWriter { + const redis = getRedisClient() + if (!redis) { + return { + write: async (event) => ({ eventId: 0, streamId, event }), + flush: async () => {}, + close: async () => {}, + } + } + + const config = getStreamBufferConfig() + let pending: StreamEventEntry[] = [] + let nextEventId = 0 + let maxReservedId = 0 + let flushTimer: ReturnType | null = null + const scheduleFlush = () => { + if (flushTimer) return + flushTimer = setTimeout(() => { + flushTimer = null + void flush() + }, config.flushIntervalMs) + } + + const reserveIds = async (minCount: number) => { + const reserveCount = Math.max(config.reserveBatch, minCount) + const newMax = await redis.incrby(getSeqKey(streamId), reserveCount) + const startId = newMax - reserveCount + 1 + if (nextEventId === 0 || nextEventId > maxReservedId) { + nextEventId = startId + maxReservedId = newMax + } + } + + let flushPromise: Promise | null = null + let closed = false + + const doFlush = async () => { + if (pending.length === 0) return + const batch = pending + pending = [] + try { + const key = getEventsKey(streamId) + const zaddArgs: (string | number)[] = [] + for (const entry of batch) { + zaddArgs.push(entry.eventId, JSON.stringify(entry)) + } + const pipeline = redis.pipeline() + pipeline.zadd(key, ...(zaddArgs as [number, string])) + pipeline.expire(key, config.ttlSeconds) + pipeline.expire(getSeqKey(streamId), config.ttlSeconds) + pipeline.zremrangebyrank(key, 0, -config.eventLimit - 1) + await pipeline.exec() + } catch (error) { + logger.warn('Failed to flush stream events', { + streamId, + error: error instanceof Error ? error.message : String(error), + }) + pending = batch.concat(pending) + } + } + + const flush = async () => { + if (flushPromise) { + await flushPromise + return + } + flushPromise = doFlush() + try { + await flushPromise + } finally { + flushPromise = null + if (pending.length > 0) scheduleFlush() + } + } + + const write = async (event: Record) => { + if (closed) return { eventId: 0, streamId, event } + if (nextEventId === 0 || nextEventId > maxReservedId) { + await reserveIds(1) + } + const eventId = nextEventId++ + const entry: StreamEventEntry = { eventId, streamId, event } + pending.push(entry) + if (pending.length >= config.flushMaxBatch) { + await flush() + } else { + scheduleFlush() + } + return entry + } + + const close = async () => { + closed = true + if (flushTimer) { + clearTimeout(flushTimer) + flushTimer = null + } + await flush() + } + + return { write, flush, close } +} + +export async function readStreamEvents( + streamId: string, + afterEventId: number +): Promise { + const redis = getRedisClient() + if (!redis) return [] + try { + const raw = await redis.zrangebyscore(getEventsKey(streamId), afterEventId + 1, '+inf') + return raw + .map((entry) => { + try { + return JSON.parse(entry) as StreamEventEntry + } catch { + return null + } + }) + .filter((entry): entry is StreamEventEntry => Boolean(entry)) + } catch (error) { + logger.warn('Failed to read stream events', { + streamId, + error: error instanceof Error ? error.message : String(error), + }) + return [] + } +} diff --git a/apps/sim/lib/copilot/orchestrator/stream-core.ts b/apps/sim/lib/copilot/orchestrator/stream-core.ts new file mode 100644 index 000000000..e1dc2e2fc --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/stream-core.ts @@ -0,0 +1,182 @@ +import { createLogger } from '@sim/logger' +import { ORCHESTRATION_TIMEOUT_MS } from '@/lib/copilot/constants' +import { + handleSubagentRouting, + sseHandlers, + subAgentHandlers, +} from '@/lib/copilot/orchestrator/sse-handlers' +import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' +import { + normalizeSseEvent, + shouldSkipToolCallEvent, + shouldSkipToolResultEvent, +} from '@/lib/copilot/orchestrator/sse-utils' +import type { + ExecutionContext, + OrchestratorOptions, + SSEEvent, + StreamingContext, + ToolCallSummary, +} from '@/lib/copilot/orchestrator/types' + +const logger = createLogger('CopilotStreamCore') + +/** + * Options for the shared stream processing loop. + */ +export interface StreamLoopOptions extends OrchestratorOptions { + /** + * Called for each normalized event BEFORE standard handler dispatch. + * Return true to skip the default handler for this event. + */ + onBeforeDispatch?: (event: SSEEvent, context: StreamingContext) => boolean | undefined +} + +/** + * Create a fresh StreamingContext. + */ +export function createStreamingContext(overrides?: Partial): StreamingContext { + return { + chatId: undefined, + conversationId: undefined, + messageId: crypto.randomUUID(), + accumulatedContent: '', + contentBlocks: [], + toolCalls: new Map(), + currentThinkingBlock: null, + isInThinkingBlock: false, + subAgentParentToolCallId: undefined, + subAgentContent: {}, + subAgentToolCalls: {}, + pendingContent: '', + streamComplete: false, + wasAborted: false, + errors: [], + ...overrides, + } +} + +/** + * Run the SSE stream processing loop. + * + * Handles: fetch -> parse -> normalize -> dedupe -> subagent routing -> handler dispatch. + * Callers provide the fetch URL/options and can intercept events via onBeforeDispatch. + */ +export async function runStreamLoop( + fetchUrl: string, + fetchOptions: RequestInit, + context: StreamingContext, + execContext: ExecutionContext, + options: StreamLoopOptions +): Promise { + const { timeout = ORCHESTRATION_TIMEOUT_MS, abortSignal } = options + + const response = await fetch(fetchUrl, { + ...fetchOptions, + signal: abortSignal, + }) + + if (!response.ok) { + const errorText = await response.text().catch(() => '') + throw new Error( + `Copilot backend error (${response.status}): ${errorText || response.statusText}` + ) + } + + if (!response.body) { + throw new Error('Copilot backend response missing body') + } + + const reader = response.body.getReader() + const decoder = new TextDecoder() + + const timeoutId = setTimeout(() => { + context.errors.push('Request timed out') + context.streamComplete = true + reader.cancel().catch(() => {}) + }, timeout) + + try { + for await (const event of parseSSEStream(reader, decoder, abortSignal)) { + if (abortSignal?.aborted) { + context.wasAborted = true + break + } + + const normalizedEvent = normalizeSseEvent(event) + + // Skip duplicate tool events. + const shouldSkipToolCall = shouldSkipToolCallEvent(normalizedEvent) + const shouldSkipToolResult = shouldSkipToolResultEvent(normalizedEvent) + + if (!shouldSkipToolCall && !shouldSkipToolResult) { + try { + await options.onEvent?.(normalizedEvent) + } catch (error) { + logger.warn('Failed to forward SSE event', { + type: normalizedEvent.type, + error: error instanceof Error ? error.message : String(error), + }) + } + } + + // Let the caller intercept before standard dispatch. + if (options.onBeforeDispatch?.(normalizedEvent, context)) { + if (context.streamComplete) break + continue + } + + // Standard subagent start/end handling. + if (normalizedEvent.type === 'subagent_start') { + const eventData = normalizedEvent.data as Record | undefined + const toolCallId = eventData?.tool_call_id as string | undefined + if (toolCallId) { + context.subAgentParentToolCallId = toolCallId + context.subAgentContent[toolCallId] = '' + context.subAgentToolCalls[toolCallId] = [] + } + continue + } + + if (normalizedEvent.type === 'subagent_end') { + context.subAgentParentToolCallId = undefined + continue + } + + // Subagent event routing. + if (handleSubagentRouting(normalizedEvent, context)) { + const handler = subAgentHandlers[normalizedEvent.type] + if (handler) { + await handler(normalizedEvent, context, execContext, options) + } + if (context.streamComplete) break + continue + } + + // Main event handler dispatch. + const handler = sseHandlers[normalizedEvent.type] + if (handler) { + await handler(normalizedEvent, context, execContext, options) + } + if (context.streamComplete) break + } + } finally { + clearTimeout(timeoutId) + } +} + +/** + * Build a ToolCallSummary array from the streaming context. + */ +export function buildToolCallSummaries(context: StreamingContext): ToolCallSummary[] { + return Array.from(context.toolCalls.values()).map((toolCall) => ({ + id: toolCall.id, + name: toolCall.name, + status: toolCall.status, + params: toolCall.params, + result: toolCall.result?.output, + error: toolCall.error, + durationMs: + toolCall.endTime && toolCall.startTime ? toolCall.endTime - toolCall.startTime : undefined, + })) +} diff --git a/apps/sim/lib/copilot/orchestrator/subagent.ts b/apps/sim/lib/copilot/orchestrator/subagent.ts new file mode 100644 index 000000000..d997fcbb9 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/subagent.ts @@ -0,0 +1,137 @@ +import { createLogger } from '@sim/logger' +import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor' +import type { + ExecutionContext, + OrchestratorOptions, + SSEEvent, + StreamingContext, + ToolCallSummary, +} from '@/lib/copilot/orchestrator/types' +import { env } from '@/lib/core/config/env' +import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' +import { buildToolCallSummaries, createStreamingContext, runStreamLoop } from './stream-core' + +const logger = createLogger('CopilotSubagentOrchestrator') + +export interface SubagentOrchestratorOptions extends Omit { + userId: string + workflowId?: string + workspaceId?: string + onComplete?: (result: SubagentOrchestratorResult) => void | Promise +} + +export interface SubagentOrchestratorResult { + success: boolean + content: string + toolCalls: ToolCallSummary[] + structuredResult?: { + type?: string + summary?: string + data?: unknown + success?: boolean + } + error?: string + errors?: string[] +} + +export async function orchestrateSubagentStream( + agentId: string, + requestPayload: Record, + options: SubagentOrchestratorOptions +): Promise { + const { userId, workflowId, workspaceId } = options + const execContext = await buildExecutionContext(userId, workflowId, workspaceId) + + const msgId = requestPayload?.messageId + const context = createStreamingContext({ + messageId: typeof msgId === 'string' ? msgId : crypto.randomUUID(), + }) + + let structuredResult: SubagentOrchestratorResult['structuredResult'] + + try { + await runStreamLoop( + `${SIM_AGENT_API_URL}/api/subagent/${agentId}`, + { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), + }, + body: JSON.stringify({ ...requestPayload, userId, stream: true }), + }, + context, + execContext, + { + ...options, + onBeforeDispatch: (event: SSEEvent, ctx: StreamingContext) => { + // Handle structured_result / subagent_result - subagent-specific. + if (event.type === 'structured_result' || event.type === 'subagent_result') { + structuredResult = normalizeStructuredResult(event.data) + ctx.streamComplete = true + return true // skip default dispatch + } + + // For direct subagent calls, events may have the subagent field set + // but no subagent_start because this IS the top-level agent. + // Skip subagent routing for events where the subagent field matches + // the current agentId - these are top-level events. + if (event.subagent === agentId && !ctx.subAgentParentToolCallId) { + return false // let default dispatch handle it + } + + return false // let default dispatch handle it + }, + } + ) + + const result: SubagentOrchestratorResult = { + success: context.errors.length === 0 && !context.wasAborted, + content: context.accumulatedContent, + toolCalls: buildToolCallSummaries(context), + structuredResult, + errors: context.errors.length ? context.errors : undefined, + } + await options.onComplete?.(result) + return result + } catch (error) { + const err = error instanceof Error ? error : new Error('Subagent orchestration failed') + logger.error('Subagent orchestration failed', { error: err.message, agentId }) + await options.onError?.(err) + return { + success: false, + content: context.accumulatedContent, + toolCalls: [], + error: err.message, + } + } +} + +function normalizeStructuredResult(data: unknown): SubagentOrchestratorResult['structuredResult'] { + if (!data || typeof data !== 'object') return undefined + const d = data as Record + return { + type: (d.result_type || d.type) as string | undefined, + summary: d.summary as string | undefined, + data: d.data ?? d, + success: d.success as boolean | undefined, + } +} + +async function buildExecutionContext( + userId: string, + workflowId?: string, + workspaceId?: string +): Promise { + if (workflowId) { + return prepareExecutionContext(userId, workflowId) + } + const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId) + return { + userId, + workflowId: workflowId || '', + workspaceId, + decryptedEnvVars, + } +} diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/access.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/access.ts new file mode 100644 index 000000000..b19459afa --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/access.ts @@ -0,0 +1,129 @@ +import { db } from '@sim/db' +import { permissions, workflow, workspace } from '@sim/db/schema' +import { and, asc, desc, eq, inArray, or } from 'drizzle-orm' + +type WorkflowRecord = typeof workflow.$inferSelect + +export async function ensureWorkflowAccess( + workflowId: string, + userId: string +): Promise<{ + workflow: WorkflowRecord + workspaceId?: string | null +}> { + const [workflowRecord] = await db + .select() + .from(workflow) + .where(eq(workflow.id, workflowId)) + .limit(1) + if (!workflowRecord) { + throw new Error(`Workflow ${workflowId} not found`) + } + + if (workflowRecord.userId === userId) { + return { workflow: workflowRecord, workspaceId: workflowRecord.workspaceId } + } + + if (workflowRecord.workspaceId) { + const [permissionRow] = await db + .select({ permissionType: permissions.permissionType }) + .from(permissions) + .where( + and( + eq(permissions.entityType, 'workspace'), + eq(permissions.entityId, workflowRecord.workspaceId), + eq(permissions.userId, userId) + ) + ) + .limit(1) + if (permissionRow) { + return { workflow: workflowRecord, workspaceId: workflowRecord.workspaceId } + } + } + + throw new Error('Unauthorized workflow access') +} + +export async function getDefaultWorkspaceId(userId: string): Promise { + const workspaces = await db + .select({ workspaceId: workspace.id }) + .from(permissions) + .innerJoin(workspace, eq(permissions.entityId, workspace.id)) + .where(and(eq(permissions.userId, userId), eq(permissions.entityType, 'workspace'))) + .orderBy(desc(workspace.createdAt)) + .limit(1) + + const workspaceId = workspaces[0]?.workspaceId + if (!workspaceId) { + throw new Error('No workspace found for user') + } + + return workspaceId +} + +export async function ensureWorkspaceAccess( + workspaceId: string, + userId: string, + requireWrite: boolean +): Promise { + const [row] = await db + .select({ + permissionType: permissions.permissionType, + ownerId: workspace.ownerId, + }) + .from(permissions) + .innerJoin(workspace, eq(permissions.entityId, workspace.id)) + .where( + and( + eq(permissions.entityType, 'workspace'), + eq(permissions.entityId, workspaceId), + eq(permissions.userId, userId) + ) + ) + .limit(1) + + if (!row) { + throw new Error(`Workspace ${workspaceId} not found`) + } + + const isOwner = row.ownerId === userId + const permissionType = row.permissionType + const canWrite = isOwner || permissionType === 'admin' || permissionType === 'write' + + if (requireWrite && !canWrite) { + throw new Error('Write or admin access required for this workspace') + } + + if (!requireWrite && !canWrite && permissionType !== 'read') { + throw new Error('Access denied to workspace') + } +} + +export async function getAccessibleWorkflowsForUser( + userId: string, + options?: { workspaceId?: string; folderId?: string } +) { + const workspaceIds = await db + .select({ entityId: permissions.entityId }) + .from(permissions) + .where(and(eq(permissions.userId, userId), eq(permissions.entityType, 'workspace'))) + + const workspaceIdList = workspaceIds.map((row) => row.entityId) + + const workflowConditions = [eq(workflow.userId, userId)] + if (workspaceIdList.length > 0) { + workflowConditions.push(inArray(workflow.workspaceId, workspaceIdList)) + } + if (options?.workspaceId) { + workflowConditions.push(eq(workflow.workspaceId, options.workspaceId)) + } + if (options?.folderId) { + workflowConditions.push(eq(workflow.folderId, options.folderId)) + } + + return db + .select() + .from(workflow) + .where(or(...workflowConditions)) + .orderBy(asc(workflow.sortOrder), asc(workflow.createdAt), asc(workflow.id)) +} diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/deploy.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/deploy.ts new file mode 100644 index 000000000..9d8f2b783 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/deploy.ts @@ -0,0 +1,317 @@ +import crypto from 'crypto' +import { db } from '@sim/db' +import { chat, workflowMcpTool } from '@sim/db/schema' +import { and, eq } from 'drizzle-orm' +import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types' +import { getBaseUrl } from '@/lib/core/utils/urls' +import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema' +import { deployWorkflow, undeployWorkflow } from '@/lib/workflows/persistence/utils' +import { checkChatAccess, checkWorkflowAccessForChatCreation } from '@/app/api/chat/utils' +import { ensureWorkflowAccess } from '../access' +import type { DeployApiParams, DeployChatParams, DeployMcpParams } from '../param-types' + +export async function executeDeployApi( + params: DeployApiParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const action = params.action === 'undeploy' ? 'undeploy' : 'deploy' + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + + if (action === 'undeploy') { + const result = await undeployWorkflow({ workflowId }) + if (!result.success) { + return { success: false, error: result.error || 'Failed to undeploy workflow' } + } + return { success: true, output: { workflowId, isDeployed: false } } + } + + const result = await deployWorkflow({ + workflowId, + deployedBy: context.userId, + workflowName: workflowRecord.name || undefined, + }) + if (!result.success) { + return { success: false, error: result.error || 'Failed to deploy workflow' } + } + + const baseUrl = getBaseUrl() + return { + success: true, + output: { + workflowId, + isDeployed: true, + deployedAt: result.deployedAt, + version: result.version, + apiEndpoint: `${baseUrl}/api/workflows/${workflowId}/run`, + baseUrl, + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeDeployChat( + params: DeployChatParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + const action = params.action === 'undeploy' ? 'undeploy' : 'deploy' + if (action === 'undeploy') { + const existing = await db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1) + if (!existing.length) { + return { success: false, error: 'No active chat deployment found for this workflow' } + } + const { hasAccess } = await checkChatAccess(existing[0].id, context.userId) + if (!hasAccess) { + return { success: false, error: 'Unauthorized chat access' } + } + await db.delete(chat).where(eq(chat.id, existing[0].id)) + return { success: true, output: { success: true, action: 'undeploy', isDeployed: false } } + } + + const { hasAccess } = await checkWorkflowAccessForChatCreation(workflowId, context.userId) + if (!hasAccess) { + return { success: false, error: 'Workflow not found or access denied' } + } + + const existing = await db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1) + const existingDeployment = existing[0] || null + + const identifier = String(params.identifier || existingDeployment?.identifier || '').trim() + const title = String(params.title || existingDeployment?.title || '').trim() + if (!identifier || !title) { + return { success: false, error: 'Chat identifier and title are required' } + } + + const identifierPattern = /^[a-z0-9-]+$/ + if (!identifierPattern.test(identifier)) { + return { + success: false, + error: 'Identifier can only contain lowercase letters, numbers, and hyphens', + } + } + + const existingIdentifier = await db + .select() + .from(chat) + .where(eq(chat.identifier, identifier)) + .limit(1) + if (existingIdentifier.length > 0 && existingIdentifier[0].id !== existingDeployment?.id) { + return { success: false, error: 'Identifier already in use' } + } + + const deployResult = await deployWorkflow({ + workflowId, + deployedBy: context.userId, + }) + if (!deployResult.success) { + return { success: false, error: deployResult.error || 'Failed to deploy workflow' } + } + + const existingCustomizations = + (existingDeployment?.customizations as + | { primaryColor?: string; welcomeMessage?: string } + | undefined) || {} + + const payload = { + workflowId, + identifier, + title, + description: String(params.description || existingDeployment?.description || ''), + customizations: { + primaryColor: + params.customizations?.primaryColor || + existingCustomizations.primaryColor || + 'var(--brand-primary-hover-hex)', + welcomeMessage: + params.customizations?.welcomeMessage || + existingCustomizations.welcomeMessage || + 'Hi there! How can I help you today?', + }, + authType: params.authType || existingDeployment?.authType || 'public', + password: params.password, + allowedEmails: params.allowedEmails || existingDeployment?.allowedEmails || [], + outputConfigs: params.outputConfigs || existingDeployment?.outputConfigs || [], + } + + if (existingDeployment) { + await db + .update(chat) + .set({ + identifier: payload.identifier, + title: payload.title, + description: payload.description, + customizations: payload.customizations, + authType: payload.authType, + password: payload.password || existingDeployment.password, + allowedEmails: + payload.authType === 'email' || payload.authType === 'sso' ? payload.allowedEmails : [], + outputConfigs: payload.outputConfigs, + updatedAt: new Date(), + }) + .where(eq(chat.id, existingDeployment.id)) + } else { + await db.insert(chat).values({ + id: crypto.randomUUID(), + workflowId, + userId: context.userId, + identifier: payload.identifier, + title: payload.title, + description: payload.description, + customizations: payload.customizations, + isActive: true, + authType: payload.authType, + password: payload.password || null, + allowedEmails: + payload.authType === 'email' || payload.authType === 'sso' ? payload.allowedEmails : [], + outputConfigs: payload.outputConfigs, + createdAt: new Date(), + updatedAt: new Date(), + }) + } + + const baseUrl = getBaseUrl() + return { + success: true, + output: { + success: true, + action: 'deploy', + isDeployed: true, + identifier, + chatUrl: `${baseUrl}/chat/${identifier}`, + apiEndpoint: `${baseUrl}/api/workflows/${workflowId}/run`, + baseUrl, + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeDeployMcp( + params: DeployMcpParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const workspaceId = workflowRecord.workspaceId + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + + if (!workflowRecord.isDeployed) { + return { + success: false, + error: 'Workflow must be deployed before adding as an MCP tool. Use deploy_api first.', + } + } + + const serverId = params.serverId + if (!serverId) { + return { + success: false, + error: 'serverId is required. Use list_workspace_mcp_servers to get available servers.', + } + } + + const existingTool = await db + .select() + .from(workflowMcpTool) + .where( + and(eq(workflowMcpTool.serverId, serverId), eq(workflowMcpTool.workflowId, workflowId)) + ) + .limit(1) + + const toolName = sanitizeToolName( + params.toolName || workflowRecord.name || `workflow_${workflowId}` + ) + const toolDescription = + params.toolDescription || + workflowRecord.description || + `Execute ${workflowRecord.name} workflow` + const parameterSchema = params.parameterSchema || {} + + const baseUrl = getBaseUrl() + const mcpServerUrl = `${baseUrl}/api/mcp/serve/${serverId}` + + if (existingTool.length > 0) { + const toolId = existingTool[0].id + await db + .update(workflowMcpTool) + .set({ + toolName, + toolDescription, + parameterSchema, + updatedAt: new Date(), + }) + .where(eq(workflowMcpTool.id, toolId)) + return { + success: true, + output: { toolId, toolName, toolDescription, updated: true, mcpServerUrl, baseUrl }, + } + } + + const toolId = crypto.randomUUID() + await db.insert(workflowMcpTool).values({ + id: toolId, + serverId, + workflowId, + toolName, + toolDescription, + parameterSchema, + createdAt: new Date(), + updatedAt: new Date(), + }) + + return { + success: true, + output: { toolId, toolName, toolDescription, updated: false, mcpServerUrl, baseUrl }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeRedeploy(context: ExecutionContext): Promise { + try { + const workflowId = context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + await ensureWorkflowAccess(workflowId, context.userId) + + const result = await deployWorkflow({ workflowId, deployedBy: context.userId }) + if (!result.success) { + return { success: false, error: result.error || 'Failed to redeploy workflow' } + } + const baseUrl = getBaseUrl() + return { + success: true, + output: { + workflowId, + deployedAt: result.deployedAt || null, + version: result.version, + apiEndpoint: `${baseUrl}/api/workflows/${workflowId}/run`, + baseUrl, + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/index.ts new file mode 100644 index 000000000..9e490922b --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/index.ts @@ -0,0 +1,2 @@ +export * from './deploy' +export * from './manage' diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/manage.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/manage.ts new file mode 100644 index 000000000..dc5d7a988 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/deployment-tools/manage.ts @@ -0,0 +1,226 @@ +import crypto from 'crypto' +import { db } from '@sim/db' +import { chat, workflow, workflowMcpServer, workflowMcpTool } from '@sim/db/schema' +import { eq, inArray } from 'drizzle-orm' +import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types' +import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema' +import { hasValidStartBlock } from '@/lib/workflows/triggers/trigger-utils.server' +import { ensureWorkflowAccess } from '../access' +import type { + CheckDeploymentStatusParams, + CreateWorkspaceMcpServerParams, + ListWorkspaceMcpServersParams, +} from '../param-types' + +export async function executeCheckDeploymentStatus( + params: CheckDeploymentStatusParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const workspaceId = workflowRecord.workspaceId + + const [apiDeploy, chatDeploy] = await Promise.all([ + db.select().from(workflow).where(eq(workflow.id, workflowId)).limit(1), + db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1), + ]) + + const isApiDeployed = apiDeploy[0]?.isDeployed || false + const apiDetails = { + isDeployed: isApiDeployed, + deployedAt: apiDeploy[0]?.deployedAt || null, + endpoint: isApiDeployed ? `/api/workflows/${workflowId}/execute` : null, + apiKey: workflowRecord.workspaceId ? 'Workspace API keys' : 'Personal API keys', + needsRedeployment: false, + } + + const isChatDeployed = !!chatDeploy[0] + const chatCustomizations = + (chatDeploy[0]?.customizations as + | { welcomeMessage?: string; primaryColor?: string } + | undefined) || {} + const chatDetails = { + isDeployed: isChatDeployed, + chatId: chatDeploy[0]?.id || null, + identifier: chatDeploy[0]?.identifier || null, + chatUrl: isChatDeployed ? `/chat/${chatDeploy[0]?.identifier}` : null, + title: chatDeploy[0]?.title || null, + description: chatDeploy[0]?.description || null, + authType: chatDeploy[0]?.authType || null, + allowedEmails: chatDeploy[0]?.allowedEmails || null, + outputConfigs: chatDeploy[0]?.outputConfigs || null, + welcomeMessage: chatCustomizations.welcomeMessage || null, + primaryColor: chatCustomizations.primaryColor || null, + hasPassword: Boolean(chatDeploy[0]?.password), + } + + const mcpDetails: { + isDeployed: boolean + servers: Array<{ + serverId: string + serverName: string + toolName: string + toolDescription: string | null + parameterSchema: unknown + toolId: string + }> + } = { isDeployed: false, servers: [] } + if (workspaceId) { + const servers = await db + .select({ + serverId: workflowMcpServer.id, + serverName: workflowMcpServer.name, + toolName: workflowMcpTool.toolName, + toolDescription: workflowMcpTool.toolDescription, + parameterSchema: workflowMcpTool.parameterSchema, + toolId: workflowMcpTool.id, + }) + .from(workflowMcpTool) + .innerJoin(workflowMcpServer, eq(workflowMcpTool.serverId, workflowMcpServer.id)) + .where(eq(workflowMcpTool.workflowId, workflowId)) + + if (servers.length > 0) { + mcpDetails.isDeployed = true + mcpDetails.servers = servers + } + } + + const isDeployed = apiDetails.isDeployed || chatDetails.isDeployed || mcpDetails.isDeployed + return { + success: true, + output: { isDeployed, api: apiDetails, chat: chatDetails, mcp: mcpDetails }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeListWorkspaceMcpServers( + params: ListWorkspaceMcpServersParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const workspaceId = workflowRecord.workspaceId + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + + const servers = await db + .select({ + id: workflowMcpServer.id, + name: workflowMcpServer.name, + description: workflowMcpServer.description, + }) + .from(workflowMcpServer) + .where(eq(workflowMcpServer.workspaceId, workspaceId)) + + const serverIds = servers.map((server) => server.id) + const tools = + serverIds.length > 0 + ? await db + .select({ + serverId: workflowMcpTool.serverId, + toolName: workflowMcpTool.toolName, + }) + .from(workflowMcpTool) + .where(inArray(workflowMcpTool.serverId, serverIds)) + : [] + + const toolNamesByServer: Record = {} + for (const tool of tools) { + if (!toolNamesByServer[tool.serverId]) { + toolNamesByServer[tool.serverId] = [] + } + toolNamesByServer[tool.serverId].push(tool.toolName) + } + + const serversWithToolNames = servers.map((server) => ({ + ...server, + toolCount: toolNamesByServer[server.id]?.length || 0, + toolNames: toolNamesByServer[server.id] || [], + })) + + return { success: true, output: { servers: serversWithToolNames, count: servers.length } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeCreateWorkspaceMcpServer( + params: CreateWorkspaceMcpServerParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const workspaceId = workflowRecord.workspaceId + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + + const name = params.name?.trim() + if (!name) { + return { success: false, error: 'name is required' } + } + + const serverId = crypto.randomUUID() + const [server] = await db + .insert(workflowMcpServer) + .values({ + id: serverId, + workspaceId, + createdBy: context.userId, + name, + description: params.description?.trim() || null, + isPublic: params.isPublic ?? false, + createdAt: new Date(), + updatedAt: new Date(), + }) + .returning() + + const workflowIds: string[] = params.workflowIds || [] + const addedTools: Array<{ workflowId: string; toolName: string }> = [] + + if (workflowIds.length > 0) { + const workflows = await db.select().from(workflow).where(inArray(workflow.id, workflowIds)) + + for (const wf of workflows) { + if (wf.workspaceId !== workspaceId || !wf.isDeployed) { + continue + } + const hasStartBlock = await hasValidStartBlock(wf.id) + if (!hasStartBlock) { + continue + } + const toolName = sanitizeToolName(wf.name || `workflow_${wf.id}`) + await db.insert(workflowMcpTool).values({ + id: crypto.randomUUID(), + serverId, + workflowId: wf.id, + toolName, + toolDescription: wf.description || `Execute ${wf.name} workflow`, + parameterSchema: {}, + createdAt: new Date(), + updatedAt: new Date(), + }) + addedTools.push({ workflowId: wf.id, toolName }) + } + } + + return { success: true, output: { server, addedTools } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts new file mode 100644 index 000000000..2bd0e6611 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/index.ts @@ -0,0 +1,341 @@ +import { db } from '@sim/db' +import { workflow } from '@sim/db/schema' +import { createLogger } from '@sim/logger' +import { eq } from 'drizzle-orm' +import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' +import type { + ExecutionContext, + ToolCallResult, + ToolCallState, +} from '@/lib/copilot/orchestrator/types' +import { routeExecution } from '@/lib/copilot/tools/server/router' +import { env } from '@/lib/core/config/env' +import { getBaseUrl } from '@/lib/core/utils/urls' +import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' +import { getTool, resolveToolId } from '@/tools/utils' +import { + executeCheckDeploymentStatus, + executeCreateWorkspaceMcpServer, + executeDeployApi, + executeDeployChat, + executeDeployMcp, + executeListWorkspaceMcpServers, + executeRedeploy, +} from './deployment-tools' +import { executeIntegrationToolDirect } from './integration-tools' +import type { + CheckDeploymentStatusParams, + CreateFolderParams, + CreateWorkflowParams, + CreateWorkspaceMcpServerParams, + DeployApiParams, + DeployChatParams, + DeployMcpParams, + GenerateApiKeyParams, + GetBlockOutputsParams, + GetBlockUpstreamReferencesParams, + GetDeployedWorkflowStateParams, + GetUserWorkflowParams, + GetWorkflowDataParams, + GetWorkflowFromNameParams, + ListFoldersParams, + ListUserWorkflowsParams, + ListWorkspaceMcpServersParams, + MoveFolderParams, + MoveWorkflowParams, + RenameWorkflowParams, + RunBlockParams, + RunFromBlockParams, + RunWorkflowParams, + RunWorkflowUntilBlockParams, + SetGlobalWorkflowVariablesParams, +} from './param-types' +import { PLATFORM_ACTIONS_CONTENT } from './platform-actions' +import { + executeCreateFolder, + executeCreateWorkflow, + executeGenerateApiKey, + executeGetBlockOutputs, + executeGetBlockUpstreamReferences, + executeGetDeployedWorkflowState, + executeGetUserWorkflow, + executeGetWorkflowData, + executeGetWorkflowFromName, + executeListFolders, + executeListUserWorkflows, + executeListUserWorkspaces, + executeMoveFolder, + executeMoveWorkflow, + executeRenameWorkflow, + executeRunBlock, + executeRunFromBlock, + executeRunWorkflow, + executeRunWorkflowUntilBlock, + executeSetGlobalWorkflowVariables, +} from './workflow-tools' + +const logger = createLogger('CopilotToolExecutor') + +const SERVER_TOOLS = new Set([ + 'get_blocks_and_tools', + 'get_blocks_metadata', + 'get_block_options', + 'get_block_config', + 'get_trigger_blocks', + 'edit_workflow', + 'get_workflow_console', + 'search_documentation', + 'search_online', + 'set_environment_variables', + 'get_credentials', + 'make_api_request', + 'knowledge_base', +]) + +const SIM_WORKFLOW_TOOL_HANDLERS: Record< + string, + (params: Record, context: ExecutionContext) => Promise +> = { + get_user_workflow: (p, c) => executeGetUserWorkflow(p as GetUserWorkflowParams, c), + get_workflow_from_name: (p, c) => executeGetWorkflowFromName(p as GetWorkflowFromNameParams, c), + list_user_workflows: (p, c) => executeListUserWorkflows(p as ListUserWorkflowsParams, c), + list_user_workspaces: (_p, c) => executeListUserWorkspaces(c), + list_folders: (p, c) => executeListFolders(p as ListFoldersParams, c), + create_workflow: (p, c) => executeCreateWorkflow(p as CreateWorkflowParams, c), + create_folder: (p, c) => executeCreateFolder(p as CreateFolderParams, c), + rename_workflow: (p, c) => executeRenameWorkflow(p as unknown as RenameWorkflowParams, c), + move_workflow: (p, c) => executeMoveWorkflow(p as unknown as MoveWorkflowParams, c), + move_folder: (p, c) => executeMoveFolder(p as unknown as MoveFolderParams, c), + get_workflow_data: (p, c) => executeGetWorkflowData(p as GetWorkflowDataParams, c), + get_block_outputs: (p, c) => executeGetBlockOutputs(p as GetBlockOutputsParams, c), + get_block_upstream_references: (p, c) => + executeGetBlockUpstreamReferences(p as unknown as GetBlockUpstreamReferencesParams, c), + run_workflow: (p, c) => executeRunWorkflow(p as RunWorkflowParams, c), + run_workflow_until_block: (p, c) => + executeRunWorkflowUntilBlock(p as unknown as RunWorkflowUntilBlockParams, c), + run_from_block: (p, c) => executeRunFromBlock(p as unknown as RunFromBlockParams, c), + run_block: (p, c) => executeRunBlock(p as unknown as RunBlockParams, c), + get_deployed_workflow_state: (p, c) => + executeGetDeployedWorkflowState(p as GetDeployedWorkflowStateParams, c), + generate_api_key: (p, c) => executeGenerateApiKey(p as unknown as GenerateApiKeyParams, c), + get_platform_actions: () => + Promise.resolve({ + success: true, + output: { content: PLATFORM_ACTIONS_CONTENT }, + }), + set_global_workflow_variables: (p, c) => + executeSetGlobalWorkflowVariables(p as SetGlobalWorkflowVariablesParams, c), + deploy_api: (p, c) => executeDeployApi(p as DeployApiParams, c), + deploy_chat: (p, c) => executeDeployChat(p as DeployChatParams, c), + deploy_mcp: (p, c) => executeDeployMcp(p as DeployMcpParams, c), + redeploy: (_p, c) => executeRedeploy(c), + check_deployment_status: (p, c) => + executeCheckDeploymentStatus(p as CheckDeploymentStatusParams, c), + list_workspace_mcp_servers: (p, c) => + executeListWorkspaceMcpServers(p as ListWorkspaceMcpServersParams, c), + create_workspace_mcp_server: (p, c) => + executeCreateWorkspaceMcpServer(p as CreateWorkspaceMcpServerParams, c), + oauth_get_auth_link: async (p, _c) => { + const providerName = (p.providerName || p.provider_name || 'the provider') as string + try { + const baseUrl = getBaseUrl() + const settingsUrl = `${baseUrl}/workspace` + return { + success: true, + output: { + message: `To connect ${providerName}, the user must authorize via their browser.`, + oauth_url: settingsUrl, + instructions: `Open ${settingsUrl} in a browser and go to the workflow editor to connect ${providerName} credentials.`, + provider: providerName, + baseUrl, + }, + } + } catch { + return { + success: true, + output: { + message: `To connect ${providerName}, the user must authorize via their browser.`, + instructions: `Open the Sim workspace in a browser and go to the workflow editor to connect ${providerName} credentials.`, + provider: providerName, + }, + } + } + }, +} + +/** + * Check whether a tool can be executed on the Sim (TypeScript) side. + * + * Tools that are only available on the Go backend (e.g. search_patterns, + * search_errors, remember_debug) will return false. The subagent tool_call + * handler uses this to decide whether to execute a tool locally or let the + * Go backend's own tool_result SSE event handle it. + */ +export function isToolAvailableOnSimSide(toolName: string): boolean { + if (SERVER_TOOLS.has(toolName)) return true + if (toolName in SIM_WORKFLOW_TOOL_HANDLERS) return true + const resolvedToolName = resolveToolId(toolName) + return !!getTool(resolvedToolName) +} + +/** + * Check whether a tool is a user-installed integration tool (e.g. Gmail, Slack). + * These tools exist in the tool registry but are NOT copilot server tools or + * known workflow manipulation tools. They should require user approval in + * interactive mode. + */ +export function isIntegrationTool(toolName: string): boolean { + if (SERVER_TOOLS.has(toolName)) return false + if (toolName in SIM_WORKFLOW_TOOL_HANDLERS) return false + const resolvedToolName = resolveToolId(toolName) + return !!getTool(resolvedToolName) +} + +/** + * Execute a tool server-side without calling internal routes. + */ +export async function executeToolServerSide( + toolCall: ToolCallState, + context: ExecutionContext +): Promise { + const toolName = toolCall.name + const resolvedToolName = resolveToolId(toolName) + + if (SERVER_TOOLS.has(toolName)) { + return executeServerToolDirect(toolName, toolCall.params || {}, context) + } + + if (toolName in SIM_WORKFLOW_TOOL_HANDLERS) { + return executeSimWorkflowTool(toolName, toolCall.params || {}, context) + } + + const toolConfig = getTool(resolvedToolName) + if (!toolConfig) { + logger.warn('Tool not found in registry', { toolName, resolvedToolName }) + return { + success: false, + error: `Tool not found: ${toolName}`, + } + } + + return executeIntegrationToolDirect(toolCall, toolConfig, context) +} + +/** + * Execute a server tool directly via the server tool router. + */ +async function executeServerToolDirect( + toolName: string, + params: Record, + context: ExecutionContext +): Promise { + try { + // Inject workflowId from context if not provided in params + // This is needed for tools like set_environment_variables that require workflowId + const enrichedParams = { ...params } + if (!enrichedParams.workflowId && context.workflowId) { + enrichedParams.workflowId = context.workflowId + } + + const result = await routeExecution(toolName, enrichedParams, { userId: context.userId }) + return { success: true, output: result } + } catch (error) { + logger.error('Server tool execution failed', { + toolName, + error: error instanceof Error ? error.message : String(error), + }) + return { + success: false, + error: error instanceof Error ? error.message : 'Server tool execution failed', + } + } +} + +async function executeSimWorkflowTool( + toolName: string, + params: Record, + context: ExecutionContext +): Promise { + const handler = SIM_WORKFLOW_TOOL_HANDLERS[toolName] + if (!handler) return { success: false, error: `Unsupported workflow tool: ${toolName}` } + return handler(params, context) +} + +/** Timeout for the mark-complete POST to the copilot backend (30 s). */ +const MARK_COMPLETE_TIMEOUT_MS = 30_000 + +/** + * Notify the copilot backend that a tool has completed. + */ +export async function markToolComplete( + toolCallId: string, + toolName: string, + status: number, + message?: unknown, + data?: unknown +): Promise { + try { + const controller = new AbortController() + const timeoutId = setTimeout(() => controller.abort(), MARK_COMPLETE_TIMEOUT_MS) + + try { + const response = await fetch(`${SIM_AGENT_API_URL}/api/tools/mark-complete`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + ...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}), + }, + body: JSON.stringify({ + id: toolCallId, + name: toolName, + status, + message, + data, + }), + signal: controller.signal, + }) + + if (!response.ok) { + logger.warn('Mark-complete call failed', { toolCallId, toolName, status: response.status }) + return false + } + + return true + } finally { + clearTimeout(timeoutId) + } + } catch (error) { + const isTimeout = error instanceof DOMException && error.name === 'AbortError' + logger.error('Mark-complete call failed', { + toolCallId, + toolName, + timedOut: isTimeout, + error: error instanceof Error ? error.message : String(error), + }) + return false + } +} + +/** + * Prepare execution context with cached environment values. + */ +export async function prepareExecutionContext( + userId: string, + workflowId: string +): Promise { + const workflowResult = await db + .select({ workspaceId: workflow.workspaceId }) + .from(workflow) + .where(eq(workflow.id, workflowId)) + .limit(1) + const workspaceId = workflowResult[0]?.workspaceId ?? undefined + + const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId) + + return { + userId, + workflowId, + workspaceId, + decryptedEnvVars, + } +} diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/integration-tools.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/integration-tools.ts new file mode 100644 index 000000000..8464e42ca --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/integration-tools.ts @@ -0,0 +1,105 @@ +import { db } from '@sim/db' +import { account, workflow } from '@sim/db/schema' +import { and, eq } from 'drizzle-orm' +import type { + ExecutionContext, + ToolCallResult, + ToolCallState, +} from '@/lib/copilot/orchestrator/types' +import { generateRequestId } from '@/lib/core/utils/request' +import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' +import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils' +import { resolveEnvVarReferences } from '@/executor/utils/reference-validation' +import { executeTool } from '@/tools' +import { resolveToolId } from '@/tools/utils' + +export async function executeIntegrationToolDirect( + toolCall: ToolCallState, + toolConfig: { + oauth?: { required?: boolean; provider?: string } + params?: { apiKey?: { required?: boolean } } + }, + context: ExecutionContext +): Promise { + const { userId, workflowId } = context + const toolName = resolveToolId(toolCall.name) + const toolArgs = toolCall.params || {} + + let workspaceId = context.workspaceId + if (!workspaceId && workflowId) { + const workflowResult = await db + .select({ workspaceId: workflow.workspaceId }) + .from(workflow) + .where(eq(workflow.id, workflowId)) + .limit(1) + workspaceId = workflowResult[0]?.workspaceId ?? undefined + } + + const decryptedEnvVars = + context.decryptedEnvVars || (await getEffectiveDecryptedEnv(userId, workspaceId)) + + // Deep resolution walks nested objects to replace {{ENV_VAR}} references. + // Safe because tool arguments originate from the LLM (not direct user input) + // and env vars belong to the user themselves. + const executionParams = resolveEnvVarReferences(toolArgs, decryptedEnvVars, { + deep: true, + }) as Record + + if (toolConfig.oauth?.required && toolConfig.oauth.provider) { + const provider = toolConfig.oauth.provider + const accounts = await db + .select() + .from(account) + .where(and(eq(account.providerId, provider), eq(account.userId, userId))) + .limit(1) + + if (!accounts.length) { + return { + success: false, + error: `No ${provider} account connected. Please connect your account first.`, + } + } + + const acc = accounts[0] + const requestId = generateRequestId() + const { accessToken } = await refreshTokenIfNeeded(requestId, acc, acc.id) + + if (!accessToken) { + return { + success: false, + error: `OAuth token not available for ${provider}. Please reconnect your account.`, + } + } + + executionParams.accessToken = accessToken + } + + if (toolConfig.params?.apiKey?.required && !executionParams.apiKey) { + return { + success: false, + error: `API key not provided for ${toolName}. Use {{YOUR_API_KEY_ENV_VAR}} to reference your environment variable.`, + } + } + + executionParams._context = { + workflowId, + userId, + } + + if (toolName === 'function_execute') { + executionParams.envVars = decryptedEnvVars + executionParams.workflowVariables = {} + executionParams.blockData = {} + executionParams.blockNameMapping = {} + executionParams.language = executionParams.language || 'javascript' + executionParams.timeout = executionParams.timeout || 30000 + } + + const result = await executeTool(toolName, executionParams) + + return { + success: result.success, + output: result.output, + error: result.error, + } +} diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts new file mode 100644 index 000000000..1f49ab616 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts @@ -0,0 +1,187 @@ +/** + * Typed parameter interfaces for tool executor functions. + * Replaces Record with specific shapes based on actual property access. + */ + +// === Workflow Query Params === + +export interface GetUserWorkflowParams { + workflowId?: string +} + +export interface GetWorkflowFromNameParams { + workflow_name?: string +} + +export interface ListUserWorkflowsParams { + workspaceId?: string + folderId?: string +} + +export interface GetWorkflowDataParams { + workflowId?: string + data_type?: string + dataType?: string +} + +export interface GetBlockOutputsParams { + workflowId?: string + blockIds?: string[] +} + +export interface GetBlockUpstreamReferencesParams { + workflowId?: string + blockIds: string[] +} + +export interface ListFoldersParams { + workspaceId?: string +} + +// === Workflow Mutation Params === + +export interface CreateWorkflowParams { + name?: string + workspaceId?: string + folderId?: string + description?: string +} + +export interface CreateFolderParams { + name?: string + workspaceId?: string + parentId?: string +} + +export interface RunWorkflowParams { + workflowId?: string + workflow_input?: unknown + input?: unknown + /** When true, runs the deployed version instead of the draft. Default: false (draft). */ + useDeployedState?: boolean +} + +export interface RunWorkflowUntilBlockParams { + workflowId?: string + workflow_input?: unknown + input?: unknown + /** The block ID to stop after. Execution halts once this block completes. */ + stopAfterBlockId: string + /** When true, runs the deployed version instead of the draft. Default: false (draft). */ + useDeployedState?: boolean +} + +export interface RunFromBlockParams { + workflowId?: string + /** The block ID to start execution from. */ + startBlockId: string + /** Optional execution ID to load the snapshot from. If omitted, uses the latest execution. */ + executionId?: string + workflow_input?: unknown + input?: unknown + useDeployedState?: boolean +} + +export interface RunBlockParams { + workflowId?: string + /** The block ID to run. Only this block executes using cached upstream outputs. */ + blockId: string + /** Optional execution ID to load the snapshot from. If omitted, uses the latest execution. */ + executionId?: string + workflow_input?: unknown + input?: unknown + useDeployedState?: boolean +} + +export interface GetDeployedWorkflowStateParams { + workflowId?: string +} + +export interface GenerateApiKeyParams { + name: string + workspaceId?: string +} + +export interface VariableOperation { + name: string + operation: 'add' | 'edit' | 'delete' + value?: unknown + type?: string +} + +export interface SetGlobalWorkflowVariablesParams { + workflowId?: string + operations?: VariableOperation[] +} + +// === Deployment Params === + +export interface DeployApiParams { + workflowId?: string + action?: 'deploy' | 'undeploy' +} + +export interface DeployChatParams { + workflowId?: string + action?: 'deploy' | 'undeploy' | 'update' + identifier?: string + title?: string + description?: string + customizations?: { + primaryColor?: string + secondaryColor?: string + welcomeMessage?: string + iconUrl?: string + } + authType?: 'none' | 'password' | 'public' | 'email' | 'sso' + password?: string + subdomain?: string + allowedEmails?: string[] + outputConfigs?: unknown[] +} + +export interface DeployMcpParams { + workflowId?: string + action?: 'deploy' | 'undeploy' + toolName?: string + toolDescription?: string + serverId?: string + parameterSchema?: Record +} + +export interface CheckDeploymentStatusParams { + workflowId?: string +} + +export interface ListWorkspaceMcpServersParams { + workspaceId?: string + workflowId?: string +} + +export interface CreateWorkspaceMcpServerParams { + workflowId?: string + name?: string + description?: string + toolName?: string + toolDescription?: string + serverName?: string + isPublic?: boolean + workflowIds?: string[] +} + +// === Workflow Organization Params === + +export interface RenameWorkflowParams { + workflowId: string + name: string +} + +export interface MoveWorkflowParams { + workflowId: string + folderId: string | null +} + +export interface MoveFolderParams { + folderId: string + parentId: string | null +} diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/platform-actions.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/platform-actions.ts new file mode 100644 index 000000000..6465e74a2 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/platform-actions.ts @@ -0,0 +1,117 @@ +/** + * Static content for the get_platform_actions tool. + * Contains the Sim platform quick reference and keyboard shortcuts. + */ +export const PLATFORM_ACTIONS_CONTENT = `# Sim Platform Quick Reference & Keyboard Shortcuts + +## Keyboard Shortcuts +**Mod** = Cmd (macOS) / Ctrl (Windows/Linux). Shortcuts work when canvas is focused. + +### Workflow Actions +| Shortcut | Action | +|----------|--------| +| Mod+Enter | Run workflow (or cancel if running) | +| Mod+Z | Undo | +| Mod+Shift+Z | Redo | +| Mod+C | Copy selected blocks | +| Mod+V | Paste blocks | +| Delete/Backspace | Delete selected blocks or edges | +| Shift+L | Auto-layout canvas | +| Mod+Shift+F | Fit to view | +| Mod+Shift+Enter | Accept Copilot changes | + +### Panel Navigation +| Shortcut | Action | +|----------|--------| +| C | Focus Copilot tab | +| T | Focus Toolbar tab | +| E | Focus Editor tab | +| Mod+F | Focus Toolbar search | + +### Global Navigation +| Shortcut | Action | +|----------|--------| +| Mod+K | Open search | +| Mod+Shift+A | Add new agent workflow | +| Mod+Y | Go to templates | +| Mod+L | Go to logs | + +### Utility +| Shortcut | Action | +|----------|--------| +| Mod+D | Clear terminal console | +| Mod+E | Clear notifications | + +### Mouse Controls +| Action | Control | +|--------|---------| +| Pan/move canvas | Left-drag on empty space, scroll, or trackpad | +| Select multiple blocks | Right-drag to draw selection box | +| Drag block | Left-drag on block header | +| Add to selection | Mod+Click on blocks | + +## Quick Reference — Workspaces +| Action | How | +|--------|-----| +| Create workspace | Click workspace dropdown → New Workspace | +| Switch workspaces | Click workspace dropdown → Select workspace | +| Invite team members | Sidebar → Invite | +| Rename/Duplicate/Export/Delete workspace | Right-click workspace → action | + +## Quick Reference — Workflows +| Action | How | +|--------|-----| +| Create workflow | Click + button in sidebar | +| Reorder/move workflows | Drag workflow up/down or onto a folder | +| Import workflow | Click import button in sidebar → Select file | +| Multi-select workflows | Mod+Click or Shift+Click workflows in sidebar | +| Open in new tab | Right-click workflow → Open in New Tab | +| Rename/Color/Duplicate/Export/Delete | Right-click workflow → action | + +## Quick Reference — Blocks +| Action | How | +|--------|-----| +| Add a block | Drag from Toolbar panel, or right-click canvas → Add Block | +| Multi-select blocks | Mod+Click additional blocks, or shift-drag selection box | +| Copy/Paste blocks | Mod+C / Mod+V | +| Duplicate/Delete blocks | Right-click → action | +| Rename a block | Click block name in header | +| Enable/Disable block | Right-click → Enable/Disable | +| Lock/Unlock block | Hover block → Click lock icon (Admin only) | +| Toggle handle orientation | Right-click → Toggle Handles | +| Configure a block | Select block → use Editor panel on right | + +## Quick Reference — Connections +| Action | How | +|--------|-----| +| Create connection | Drag from output handle to input handle | +| Delete connection | Click edge to select → Delete key | +| Use output in another block | Drag connection tag into input field | + +## Quick Reference — Running & Testing +| Action | How | +|--------|-----| +| Run workflow | Click Run Workflow button or Mod+Enter | +| Stop workflow | Click Stop button or Mod+Enter while running | +| Test with chat | Use Chat panel on the right side | +| Run from block | Hover block → Click play button, or right-click → Run from block | +| Run until block | Right-click block → Run until block | +| View execution logs | Open terminal panel at bottom, or Mod+L | +| Filter/Search/Copy/Clear logs | Terminal panel controls | + +## Quick Reference — Deployment +| Action | How | +|--------|-----| +| Deploy workflow | Click Deploy button in panel | +| Update deployment | Click Update when changes are detected | +| Revert deployment | Previous versions in Deploy tab → Promote to live | +| Copy API endpoint | Deploy tab → API → Copy API cURL | + +## Quick Reference — Variables +| Action | How | +|--------|-----| +| Add/Edit/Delete workflow variable | Panel → Variables → Add Variable | +| Add environment variable | Settings → Environment Variables → Add | +| Reference workflow variable | Use syntax | +| Reference environment variable | Use {{ENV_VAR}} syntax | +` diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/index.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/index.ts new file mode 100644 index 000000000..b908b0710 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/index.ts @@ -0,0 +1,2 @@ +export * from './mutations' +export * from './queries' diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts new file mode 100644 index 000000000..2b9d5142c --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/mutations.ts @@ -0,0 +1,624 @@ +import crypto from 'crypto' +import { db } from '@sim/db' +import { apiKey, workflow, workflowFolder } from '@sim/db/schema' +import { createLogger } from '@sim/logger' +import { and, eq, isNull, max } from 'drizzle-orm' +import { nanoid } from 'nanoid' +import { createApiKey } from '@/lib/api-key/auth' +import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types' +import { generateRequestId } from '@/lib/core/utils/request' +import { buildDefaultWorkflowArtifacts } from '@/lib/workflows/defaults' +import { executeWorkflow } from '@/lib/workflows/executor/execute-workflow' +import { + getExecutionState, + getLatestExecutionState, +} from '@/lib/workflows/executor/execution-state' +import { saveWorkflowToNormalizedTables } from '@/lib/workflows/persistence/utils' +import { ensureWorkflowAccess, ensureWorkspaceAccess, getDefaultWorkspaceId } from '../access' +import type { + CreateFolderParams, + CreateWorkflowParams, + GenerateApiKeyParams, + MoveFolderParams, + MoveWorkflowParams, + RenameWorkflowParams, + RunBlockParams, + RunFromBlockParams, + RunWorkflowParams, + RunWorkflowUntilBlockParams, + SetGlobalWorkflowVariablesParams, + VariableOperation, +} from '../param-types' + +const logger = createLogger('WorkflowMutations') + +export async function executeCreateWorkflow( + params: CreateWorkflowParams, + context: ExecutionContext +): Promise { + try { + const name = typeof params?.name === 'string' ? params.name.trim() : '' + if (!name) { + return { success: false, error: 'name is required' } + } + if (name.length > 200) { + return { success: false, error: 'Workflow name must be 200 characters or less' } + } + const description = typeof params?.description === 'string' ? params.description : null + if (description && description.length > 2000) { + return { success: false, error: 'Description must be 2000 characters or less' } + } + + const workspaceId = params?.workspaceId || (await getDefaultWorkspaceId(context.userId)) + const folderId = params?.folderId || null + + await ensureWorkspaceAccess(workspaceId, context.userId, true) + + const workflowId = crypto.randomUUID() + const now = new Date() + + const folderCondition = folderId ? eq(workflow.folderId, folderId) : isNull(workflow.folderId) + const [maxResult] = await db + .select({ maxOrder: max(workflow.sortOrder) }) + .from(workflow) + .where(and(eq(workflow.workspaceId, workspaceId), folderCondition)) + const sortOrder = (maxResult?.maxOrder ?? 0) + 1 + + await db.insert(workflow).values({ + id: workflowId, + userId: context.userId, + workspaceId, + folderId, + sortOrder, + name, + description, + color: '#3972F6', + lastSynced: now, + createdAt: now, + updatedAt: now, + isDeployed: false, + runCount: 0, + variables: {}, + }) + + const { workflowState } = buildDefaultWorkflowArtifacts() + const saveResult = await saveWorkflowToNormalizedTables(workflowId, workflowState) + if (!saveResult.success) { + throw new Error(saveResult.error || 'Failed to save workflow state') + } + + return { + success: true, + output: { + workflowId, + workflowName: name, + workspaceId, + folderId, + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeCreateFolder( + params: CreateFolderParams, + context: ExecutionContext +): Promise { + try { + const name = typeof params?.name === 'string' ? params.name.trim() : '' + if (!name) { + return { success: false, error: 'name is required' } + } + if (name.length > 200) { + return { success: false, error: 'Folder name must be 200 characters or less' } + } + + const workspaceId = params?.workspaceId || (await getDefaultWorkspaceId(context.userId)) + const parentId = params?.parentId || null + + await ensureWorkspaceAccess(workspaceId, context.userId, true) + + const [maxResult] = await db + .select({ maxOrder: max(workflowFolder.sortOrder) }) + .from(workflowFolder) + .where( + and( + eq(workflowFolder.workspaceId, workspaceId), + parentId ? eq(workflowFolder.parentId, parentId) : isNull(workflowFolder.parentId) + ) + ) + const sortOrder = (maxResult?.maxOrder ?? 0) + 1 + + const folderId = crypto.randomUUID() + await db.insert(workflowFolder).values({ + id: folderId, + userId: context.userId, + workspaceId, + parentId, + name, + sortOrder, + createdAt: new Date(), + updatedAt: new Date(), + }) + + return { success: true, output: { folderId, name, workspaceId, parentId } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeRunWorkflow( + params: RunWorkflowParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + + const useDraftState = !params.useDeployedState + + const result = await executeWorkflow( + { + id: workflowRecord.id, + userId: workflowRecord.userId, + workspaceId: workflowRecord.workspaceId, + variables: workflowRecord.variables || {}, + }, + generateRequestId(), + params.workflow_input || params.input || undefined, + context.userId, + { enabled: true, useDraftState } + ) + + return { + success: result.success, + output: { + executionId: result.metadata?.executionId, + success: result.success, + output: result.output, + logs: result.logs, + }, + error: result.success ? undefined : result.error || 'Workflow execution failed', + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeSetGlobalWorkflowVariables( + params: SetGlobalWorkflowVariablesParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const operations: VariableOperation[] = Array.isArray(params.operations) + ? params.operations + : [] + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + + interface WorkflowVariable { + id: string + workflowId?: string + name: string + type: string + value?: unknown + } + const currentVarsRecord = (workflowRecord.variables as Record) || {} + const byName: Record = {} + Object.values(currentVarsRecord).forEach((v) => { + if (v && typeof v === 'object' && 'id' in v && 'name' in v) { + const variable = v as WorkflowVariable + byName[String(variable.name)] = variable + } + }) + + for (const op of operations) { + const key = String(op?.name || '') + if (!key) continue + const nextType = op?.type || byName[key]?.type || 'plain' + const coerceValue = (value: unknown, type: string): unknown => { + if (value === undefined) return value + if (type === 'number') { + const n = Number(value) + return Number.isNaN(n) ? value : n + } + if (type === 'boolean') { + const v = String(value).trim().toLowerCase() + if (v === 'true') return true + if (v === 'false') return false + return value + } + if (type === 'array' || type === 'object') { + try { + const parsed = JSON.parse(String(value)) + if (type === 'array' && Array.isArray(parsed)) return parsed + if (type === 'object' && parsed && typeof parsed === 'object' && !Array.isArray(parsed)) + return parsed + } catch (error) { + logger.warn('Failed to parse JSON value for variable coercion', { + error: error instanceof Error ? error.message : String(error), + }) + } + return value + } + return value + } + + if (op.operation === 'delete') { + delete byName[key] + continue + } + const typedValue = coerceValue(op.value, nextType) + if (op.operation === 'add') { + byName[key] = { + id: crypto.randomUUID(), + workflowId, + name: key, + type: nextType, + value: typedValue, + } + continue + } + if (op.operation === 'edit') { + if (!byName[key]) { + byName[key] = { + id: crypto.randomUUID(), + workflowId, + name: key, + type: nextType, + value: typedValue, + } + } else { + byName[key] = { + ...byName[key], + type: nextType, + value: typedValue, + } + } + } + } + + const nextVarsRecord = Object.fromEntries(Object.values(byName).map((v) => [String(v.id), v])) + + await db + .update(workflow) + .set({ variables: nextVarsRecord, updatedAt: new Date() }) + .where(eq(workflow.id, workflowId)) + + return { success: true, output: { updated: Object.values(byName).length } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeRenameWorkflow( + params: RenameWorkflowParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + const name = typeof params.name === 'string' ? params.name.trim() : '' + if (!name) { + return { success: false, error: 'name is required' } + } + if (name.length > 200) { + return { success: false, error: 'Workflow name must be 200 characters or less' } + } + + await ensureWorkflowAccess(workflowId, context.userId) + + await db + .update(workflow) + .set({ name, updatedAt: new Date() }) + .where(eq(workflow.id, workflowId)) + + return { success: true, output: { workflowId, name } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeMoveWorkflow( + params: MoveWorkflowParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + await ensureWorkflowAccess(workflowId, context.userId) + + const folderId = params.folderId || null + + await db + .update(workflow) + .set({ folderId, updatedAt: new Date() }) + .where(eq(workflow.id, workflowId)) + + return { success: true, output: { workflowId, folderId } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeMoveFolder( + params: MoveFolderParams, + context: ExecutionContext +): Promise { + try { + const folderId = params.folderId + if (!folderId) { + return { success: false, error: 'folderId is required' } + } + + const parentId = params.parentId || null + + if (parentId === folderId) { + return { success: false, error: 'A folder cannot be moved into itself' } + } + + await db + .update(workflowFolder) + .set({ parentId, updatedAt: new Date() }) + .where(eq(workflowFolder.id, folderId)) + + return { success: true, output: { folderId, parentId } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeRunWorkflowUntilBlock( + params: RunWorkflowUntilBlockParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + if (!params.stopAfterBlockId) { + return { success: false, error: 'stopAfterBlockId is required' } + } + + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + + const useDraftState = !params.useDeployedState + + const result = await executeWorkflow( + { + id: workflowRecord.id, + userId: workflowRecord.userId, + workspaceId: workflowRecord.workspaceId, + variables: workflowRecord.variables || {}, + }, + generateRequestId(), + params.workflow_input || params.input || undefined, + context.userId, + { enabled: true, useDraftState, stopAfterBlockId: params.stopAfterBlockId } + ) + + return { + success: result.success, + output: { + executionId: result.metadata?.executionId, + success: result.success, + stoppedAfterBlockId: params.stopAfterBlockId, + output: result.output, + logs: result.logs, + }, + error: result.success ? undefined : result.error || 'Workflow execution failed', + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeGenerateApiKey( + params: GenerateApiKeyParams, + context: ExecutionContext +): Promise { + try { + const name = typeof params.name === 'string' ? params.name.trim() : '' + if (!name) { + return { success: false, error: 'name is required' } + } + if (name.length > 200) { + return { success: false, error: 'API key name must be 200 characters or less' } + } + + const workspaceId = params.workspaceId || (await getDefaultWorkspaceId(context.userId)) + await ensureWorkspaceAccess(workspaceId, context.userId, true) + + const existingKey = await db + .select({ id: apiKey.id }) + .from(apiKey) + .where( + and( + eq(apiKey.workspaceId, workspaceId), + eq(apiKey.name, name), + eq(apiKey.type, 'workspace') + ) + ) + .limit(1) + + if (existingKey.length > 0) { + return { + success: false, + error: `A workspace API key named "${name}" already exists. Choose a different name.`, + } + } + + const { key: plainKey, encryptedKey } = await createApiKey(true) + if (!encryptedKey) { + return { success: false, error: 'Failed to encrypt API key for storage' } + } + + const [newKey] = await db + .insert(apiKey) + .values({ + id: nanoid(), + workspaceId, + userId: context.userId, + createdBy: context.userId, + name, + key: encryptedKey, + type: 'workspace', + createdAt: new Date(), + updatedAt: new Date(), + }) + .returning({ id: apiKey.id, name: apiKey.name, createdAt: apiKey.createdAt }) + + return { + success: true, + output: { + id: newKey.id, + name: newKey.name, + key: plainKey, + workspaceId, + message: + 'API key created successfully. Copy this key now — it will not be shown again. Use this key in the x-api-key header when calling workflow API endpoints.', + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeRunFromBlock( + params: RunFromBlockParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + if (!params.startBlockId) { + return { success: false, error: 'startBlockId is required' } + } + + const snapshot = params.executionId + ? await getExecutionState(params.executionId) + : await getLatestExecutionState(workflowId) + + if (!snapshot) { + return { + success: false, + error: params.executionId + ? `No execution state found for execution ${params.executionId}. Run the full workflow first.` + : `No execution state found for workflow ${workflowId}. Run the full workflow first to create a snapshot.`, + } + } + + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const useDraftState = !params.useDeployedState + + const result = await executeWorkflow( + { + id: workflowRecord.id, + userId: workflowRecord.userId, + workspaceId: workflowRecord.workspaceId, + variables: workflowRecord.variables || {}, + }, + generateRequestId(), + params.workflow_input || params.input || undefined, + context.userId, + { + enabled: true, + useDraftState, + runFromBlock: { startBlockId: params.startBlockId, sourceSnapshot: snapshot }, + } + ) + + return { + success: result.success, + output: { + executionId: result.metadata?.executionId, + success: result.success, + startBlockId: params.startBlockId, + output: result.output, + logs: result.logs, + }, + error: result.success ? undefined : result.error || 'Workflow execution failed', + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeRunBlock( + params: RunBlockParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + if (!params.blockId) { + return { success: false, error: 'blockId is required' } + } + + const snapshot = params.executionId + ? await getExecutionState(params.executionId) + : await getLatestExecutionState(workflowId) + + if (!snapshot) { + return { + success: false, + error: params.executionId + ? `No execution state found for execution ${params.executionId}. Run the full workflow first.` + : `No execution state found for workflow ${workflowId}. Run the full workflow first to create a snapshot.`, + } + } + + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + const useDraftState = !params.useDeployedState + + const result = await executeWorkflow( + { + id: workflowRecord.id, + userId: workflowRecord.userId, + workspaceId: workflowRecord.workspaceId, + variables: workflowRecord.variables || {}, + }, + generateRequestId(), + params.workflow_input || params.input || undefined, + context.userId, + { + enabled: true, + useDraftState, + runFromBlock: { startBlockId: params.blockId, sourceSnapshot: snapshot }, + stopAfterBlockId: params.blockId, + } + ) + + return { + success: result.success, + output: { + executionId: result.metadata?.executionId, + success: result.success, + blockId: params.blockId, + output: result.output, + logs: result.logs, + }, + error: result.success ? undefined : result.error || 'Workflow execution failed', + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} diff --git a/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts new file mode 100644 index 000000000..ea8ce3187 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/tool-executor/workflow-tools/queries.ts @@ -0,0 +1,615 @@ +import { db } from '@sim/db' +import { customTools, permissions, workflow, workflowFolder, workspace } from '@sim/db/schema' +import { and, asc, desc, eq, isNull, or } from 'drizzle-orm' +import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types' +import { + formatNormalizedWorkflowForCopilot, + normalizeWorkflowName, +} from '@/lib/copilot/tools/shared/workflow-utils' +import { mcpService } from '@/lib/mcp/service' +import { listWorkspaceFiles } from '@/lib/uploads/contexts/workspace' +import { getBlockOutputPaths } from '@/lib/workflows/blocks/block-outputs' +import { BlockPathCalculator } from '@/lib/workflows/blocks/block-path-calculator' +import { + loadDeployedWorkflowState, + loadWorkflowFromNormalizedTables, +} from '@/lib/workflows/persistence/utils' +import { isInputDefinitionTrigger } from '@/lib/workflows/triggers/input-definition-triggers' +import { normalizeName } from '@/executor/constants' +import type { Loop, Parallel } from '@/stores/workflows/workflow/types' +import { + ensureWorkflowAccess, + ensureWorkspaceAccess, + getAccessibleWorkflowsForUser, + getDefaultWorkspaceId, +} from '../access' +import type { + GetBlockOutputsParams, + GetBlockUpstreamReferencesParams, + GetDeployedWorkflowStateParams, + GetUserWorkflowParams, + GetWorkflowDataParams, + GetWorkflowFromNameParams, + ListFoldersParams, + ListUserWorkflowsParams, +} from '../param-types' + +export async function executeGetUserWorkflow( + params: GetUserWorkflowParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + const { workflow: workflowRecord, workspaceId } = await ensureWorkflowAccess( + workflowId, + context.userId + ) + + const normalized = await loadWorkflowFromNormalizedTables(workflowId) + const userWorkflow = formatNormalizedWorkflowForCopilot(normalized) + if (!userWorkflow) { + return { success: false, error: 'Workflow has no normalized data' } + } + + return { + success: true, + output: { + workflowId, + workflowName: workflowRecord.name || '', + workspaceId, + userWorkflow, + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeGetWorkflowFromName( + params: GetWorkflowFromNameParams, + context: ExecutionContext +): Promise { + try { + const workflowName = typeof params.workflow_name === 'string' ? params.workflow_name.trim() : '' + if (!workflowName) { + return { success: false, error: 'workflow_name is required' } + } + + const workflows = await getAccessibleWorkflowsForUser(context.userId) + + const targetName = normalizeWorkflowName(workflowName) + const match = workflows.find((w) => normalizeWorkflowName(w.name) === targetName) + if (!match) { + return { success: false, error: `Workflow not found: ${workflowName}` } + } + + const normalized = await loadWorkflowFromNormalizedTables(match.id) + const userWorkflow = formatNormalizedWorkflowForCopilot(normalized) + if (!userWorkflow) { + return { success: false, error: 'Workflow has no normalized data' } + } + + return { + success: true, + output: { + workflowId: match.id, + workflowName: match.name || '', + workspaceId: match.workspaceId, + userWorkflow, + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeListUserWorkflows( + params: ListUserWorkflowsParams, + context: ExecutionContext +): Promise { + try { + const workspaceId = params?.workspaceId as string | undefined + const folderId = params?.folderId as string | undefined + + const workflows = await getAccessibleWorkflowsForUser(context.userId, { workspaceId, folderId }) + + const workflowList = workflows.map((w) => ({ + workflowId: w.id, + workflowName: w.name || '', + workspaceId: w.workspaceId, + folderId: w.folderId, + })) + + return { success: true, output: { workflows: workflowList } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeListUserWorkspaces( + context: ExecutionContext +): Promise { + try { + const workspaces = await db + .select({ + workspaceId: workspace.id, + workspaceName: workspace.name, + ownerId: workspace.ownerId, + permissionType: permissions.permissionType, + }) + .from(permissions) + .innerJoin(workspace, eq(permissions.entityId, workspace.id)) + .where(and(eq(permissions.userId, context.userId), eq(permissions.entityType, 'workspace'))) + .orderBy(desc(workspace.createdAt)) + + const output = workspaces.map((row) => ({ + workspaceId: row.workspaceId, + workspaceName: row.workspaceName, + role: row.ownerId === context.userId ? 'owner' : row.permissionType, + })) + + return { success: true, output: { workspaces: output } } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeListFolders( + params: ListFoldersParams, + context: ExecutionContext +): Promise { + try { + const workspaceId = + (params?.workspaceId as string | undefined) || (await getDefaultWorkspaceId(context.userId)) + + await ensureWorkspaceAccess(workspaceId, context.userId, false) + + const folders = await db + .select({ + folderId: workflowFolder.id, + folderName: workflowFolder.name, + parentId: workflowFolder.parentId, + sortOrder: workflowFolder.sortOrder, + }) + .from(workflowFolder) + .where(eq(workflowFolder.workspaceId, workspaceId)) + .orderBy(asc(workflowFolder.sortOrder), asc(workflowFolder.createdAt)) + + return { + success: true, + output: { + workspaceId, + folders, + }, + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeGetWorkflowData( + params: GetWorkflowDataParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + const dataType = params.data_type || params.dataType || '' + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + if (!dataType) { + return { success: false, error: 'data_type is required' } + } + + const { workflow: workflowRecord, workspaceId } = await ensureWorkflowAccess( + workflowId, + context.userId + ) + + if (dataType === 'global_variables') { + const variablesRecord = (workflowRecord.variables as Record) || {} + const variables = Object.values(variablesRecord).map((v) => { + const variable = v as Record | null + return { + id: String(variable?.id || ''), + name: String(variable?.name || ''), + value: variable?.value, + } + }) + return { success: true, output: { variables } } + } + + if (dataType === 'custom_tools') { + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + const conditions = [ + eq(customTools.workspaceId, workspaceId), + and(eq(customTools.userId, context.userId), isNull(customTools.workspaceId)), + ] + const toolsRows = await db + .select() + .from(customTools) + .where(or(...conditions)) + .orderBy(desc(customTools.createdAt)) + + const customToolsData = toolsRows.map((tool) => { + const schema = tool.schema as Record | null + const fn = (schema?.function ?? {}) as Record + return { + id: String(tool.id || ''), + title: String(tool.title || ''), + functionName: String(fn.name || ''), + description: String(fn.description || ''), + parameters: fn.parameters, + } + }) + + return { success: true, output: { customTools: customToolsData } } + } + + if (dataType === 'mcp_tools') { + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + const tools = await mcpService.discoverTools(context.userId, workspaceId, false) + const mcpTools = tools.map((tool) => ({ + name: String(tool.name || ''), + serverId: String(tool.serverId || ''), + serverName: String(tool.serverName || ''), + description: String(tool.description || ''), + inputSchema: tool.inputSchema, + })) + return { success: true, output: { mcpTools } } + } + + if (dataType === 'files') { + if (!workspaceId) { + return { success: false, error: 'workspaceId is required' } + } + const files = await listWorkspaceFiles(workspaceId) + const fileResults = files.map((file) => ({ + id: String(file.id || ''), + name: String(file.name || ''), + key: String(file.key || ''), + path: String(file.path || ''), + size: Number(file.size || 0), + type: String(file.type || ''), + uploadedAt: String(file.uploadedAt || ''), + })) + return { success: true, output: { files: fileResults } } + } + + return { success: false, error: `Unknown data_type: ${dataType}` } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeGetBlockOutputs( + params: GetBlockOutputsParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + await ensureWorkflowAccess(workflowId, context.userId) + + const normalized = await loadWorkflowFromNormalizedTables(workflowId) + if (!normalized) { + return { success: false, error: 'Workflow has no normalized data' } + } + + const blocks = normalized.blocks || {} + const loops = normalized.loops || {} + const parallels = normalized.parallels || {} + const blockIds = + Array.isArray(params.blockIds) && params.blockIds.length > 0 + ? params.blockIds + : Object.keys(blocks) + + const results: Array<{ + blockId: string + blockName: string + blockType: string + outputs: string[] + insideSubflowOutputs?: string[] + outsideSubflowOutputs?: string[] + triggerMode?: boolean + }> = [] + + for (const blockId of blockIds) { + const block = blocks[blockId] + if (!block?.type) continue + const blockName = block.name || block.type + + if (block.type === 'loop' || block.type === 'parallel') { + const insidePaths = getSubflowInsidePaths(block.type, blockId, loops, parallels) + results.push({ + blockId, + blockName, + blockType: block.type, + outputs: [], + insideSubflowOutputs: formatOutputsWithPrefix(insidePaths, blockName), + outsideSubflowOutputs: formatOutputsWithPrefix(['results'], blockName), + triggerMode: block.triggerMode, + }) + continue + } + + const outputs = getBlockOutputPaths(block.type, block.subBlocks, block.triggerMode) + results.push({ + blockId, + blockName, + blockType: block.type, + outputs: formatOutputsWithPrefix(outputs, blockName), + triggerMode: block.triggerMode, + }) + } + + const variables = await getWorkflowVariablesForTool(workflowId) + + const payload = { blocks: results, variables } + return { success: true, output: payload } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +export async function executeGetBlockUpstreamReferences( + params: GetBlockUpstreamReferencesParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + if (!Array.isArray(params.blockIds) || params.blockIds.length === 0) { + return { success: false, error: 'blockIds array is required' } + } + await ensureWorkflowAccess(workflowId, context.userId) + + const normalized = await loadWorkflowFromNormalizedTables(workflowId) + if (!normalized) { + return { success: false, error: 'Workflow has no normalized data' } + } + + const blocks = normalized.blocks || {} + const edges = normalized.edges || [] + const loops = normalized.loops || {} + const parallels = normalized.parallels || {} + + const graphEdges = edges.map((edge) => ({ source: edge.source, target: edge.target })) + const variableOutputs = await getWorkflowVariablesForTool(workflowId) + + interface AccessibleBlockEntry { + blockId: string + blockName: string + blockType: string + outputs: string[] + triggerMode?: boolean + accessContext?: 'inside' | 'outside' + } + + interface UpstreamReferenceResult { + blockId: string + blockName: string + blockType: string + accessibleBlocks: AccessibleBlockEntry[] + insideSubflows: Array<{ blockId: string; blockName: string; blockType: string }> + variables: Array<{ id: string; name: string; type: string; tag: string }> + } + + const results: UpstreamReferenceResult[] = [] + + for (const blockId of params.blockIds) { + const targetBlock = blocks[blockId] + if (!targetBlock) continue + + const insideSubflows: Array<{ blockId: string; blockName: string; blockType: string }> = [] + const containingLoopIds = new Set() + const containingParallelIds = new Set() + + Object.values(loops).forEach((loop) => { + if (loop?.nodes?.includes(blockId)) { + containingLoopIds.add(loop.id) + const loopBlock = blocks[loop.id] + if (loopBlock) { + insideSubflows.push({ + blockId: loop.id, + blockName: loopBlock.name || loopBlock.type, + blockType: 'loop', + }) + } + } + }) + + Object.values(parallels).forEach((parallel) => { + if (parallel?.nodes?.includes(blockId)) { + containingParallelIds.add(parallel.id) + const parallelBlock = blocks[parallel.id] + if (parallelBlock) { + insideSubflows.push({ + blockId: parallel.id, + blockName: parallelBlock.name || parallelBlock.type, + blockType: 'parallel', + }) + } + } + }) + + const ancestorIds = BlockPathCalculator.findAllPathNodes(graphEdges, blockId) + const accessibleIds = new Set(ancestorIds) + accessibleIds.add(blockId) + + const starterBlock = Object.values(blocks).find((b) => isInputDefinitionTrigger(b.type)) + if (starterBlock && ancestorIds.includes(starterBlock.id)) { + accessibleIds.add(starterBlock.id) + } + + containingLoopIds.forEach((loopId) => { + accessibleIds.add(loopId) + loops[loopId]?.nodes?.forEach((nodeId: string) => accessibleIds.add(nodeId)) + }) + + containingParallelIds.forEach((parallelId) => { + accessibleIds.add(parallelId) + parallels[parallelId]?.nodes?.forEach((nodeId: string) => accessibleIds.add(nodeId)) + }) + + const accessibleBlocks: AccessibleBlockEntry[] = [] + + for (const accessibleBlockId of accessibleIds) { + const block = blocks[accessibleBlockId] + if (!block?.type) continue + const canSelfReference = block.type === 'approval' || block.type === 'human_in_the_loop' + if (accessibleBlockId === blockId && !canSelfReference) continue + + const blockName = block.name || block.type + let accessContext: 'inside' | 'outside' | undefined + let outputPaths: string[] + + if (block.type === 'loop' || block.type === 'parallel') { + const isInside = + (block.type === 'loop' && containingLoopIds.has(accessibleBlockId)) || + (block.type === 'parallel' && containingParallelIds.has(accessibleBlockId)) + accessContext = isInside ? 'inside' : 'outside' + outputPaths = isInside + ? getSubflowInsidePaths(block.type, accessibleBlockId, loops, parallels) + : ['results'] + } else { + outputPaths = getBlockOutputPaths(block.type, block.subBlocks, block.triggerMode) + } + + const formattedOutputs = formatOutputsWithPrefix(outputPaths, blockName) + const entry: AccessibleBlockEntry = { + blockId: accessibleBlockId, + blockName, + blockType: block.type, + outputs: formattedOutputs, + ...(block.triggerMode ? { triggerMode: true } : {}), + ...(accessContext ? { accessContext } : {}), + } + accessibleBlocks.push(entry) + } + + results.push({ + blockId, + blockName: targetBlock.name || targetBlock.type, + blockType: targetBlock.type, + accessibleBlocks, + insideSubflows, + variables: variableOutputs, + }) + } + + const payload = { results } + return { success: true, output: payload } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} + +async function getWorkflowVariablesForTool( + workflowId: string +): Promise> { + const [workflowRecord] = await db + .select({ variables: workflow.variables }) + .from(workflow) + .where(eq(workflow.id, workflowId)) + .limit(1) + + const variablesRecord = (workflowRecord?.variables as Record) || {} + return Object.values(variablesRecord) + .filter((v): v is Record => { + if (!v || typeof v !== 'object') return false + const variable = v as Record + return !!variable.name && String(variable.name).trim() !== '' + }) + .map((v) => ({ + id: String(v.id || ''), + name: String(v.name || ''), + type: String(v.type || 'plain'), + tag: `variable.${normalizeName(String(v.name || ''))}`, + })) +} + +function getSubflowInsidePaths( + blockType: 'loop' | 'parallel', + blockId: string, + loops: Record, + parallels: Record +): string[] { + const paths = ['index'] + if (blockType === 'loop') { + const loopType = loops[blockId]?.loopType || 'for' + if (loopType === 'forEach') { + paths.push('currentItem', 'items') + } + } else { + const parallelType = parallels[blockId]?.parallelType || 'count' + if (parallelType === 'collection') { + paths.push('currentItem', 'items') + } + } + return paths +} + +function formatOutputsWithPrefix(paths: string[], blockName: string): string[] { + const normalizedName = normalizeName(blockName) + return paths.map((path) => `${normalizedName}.${path}`) +} + +export async function executeGetDeployedWorkflowState( + params: GetDeployedWorkflowStateParams, + context: ExecutionContext +): Promise { + try { + const workflowId = params.workflowId || context.workflowId + if (!workflowId) { + return { success: false, error: 'workflowId is required' } + } + + const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId) + + try { + const deployedState = await loadDeployedWorkflowState(workflowId) + const formatted = formatNormalizedWorkflowForCopilot({ + blocks: deployedState.blocks, + edges: deployedState.edges, + loops: deployedState.loops as Record, + parallels: deployedState.parallels as Record, + }) + + return { + success: true, + output: { + workflowId, + workflowName: workflowRecord.name || '', + isDeployed: true, + deploymentVersionId: deployedState.deploymentVersionId, + deployedState: formatted, + }, + } + } catch { + return { + success: true, + output: { + workflowId, + workflowName: workflowRecord.name || '', + isDeployed: false, + message: 'Workflow has not been deployed yet.', + }, + } + } + } catch (error) { + return { success: false, error: error instanceof Error ? error.message : String(error) } + } +} diff --git a/apps/sim/lib/copilot/orchestrator/types.ts b/apps/sim/lib/copilot/orchestrator/types.ts new file mode 100644 index 000000000..eebc806a7 --- /dev/null +++ b/apps/sim/lib/copilot/orchestrator/types.ts @@ -0,0 +1,150 @@ +import type { CopilotProviderConfig } from '@/lib/copilot/types' + +export type SSEEventType = + | 'chat_id' + | 'title_updated' + | 'content' + | 'reasoning' + | 'tool_call' + | 'tool_generating' + | 'tool_result' + | 'tool_error' + | 'subagent_start' + | 'subagent_end' + | 'structured_result' + | 'subagent_result' + | 'done' + | 'error' + | 'start' + +export interface SSEEvent { + type: SSEEventType + data?: Record + subagent?: string + toolCallId?: string + toolName?: string + success?: boolean + result?: unknown + /** Set on chat_id events */ + chatId?: string + /** Set on title_updated events */ + title?: string + /** Set on error events */ + error?: string + /** Set on content/reasoning events */ + content?: string + /** Set on reasoning events */ + phase?: string + /** Set on tool_result events */ + failedDependency?: boolean +} + +export type ToolCallStatus = 'pending' | 'executing' | 'success' | 'error' | 'skipped' | 'rejected' + +export interface ToolCallState { + id: string + name: string + status: ToolCallStatus + params?: Record + result?: ToolCallResult + error?: string + startTime?: number + endTime?: number +} + +export interface ToolCallResult { + success: boolean + output?: T + error?: string +} + +export type ContentBlockType = 'text' | 'thinking' | 'tool_call' | 'subagent_text' + +export interface ContentBlock { + type: ContentBlockType + content?: string + toolCall?: ToolCallState + timestamp: number +} + +export interface StreamingContext { + chatId?: string + conversationId?: string + messageId: string + accumulatedContent: string + contentBlocks: ContentBlock[] + toolCalls: Map + currentThinkingBlock: ContentBlock | null + isInThinkingBlock: boolean + subAgentParentToolCallId?: string + subAgentContent: Record + subAgentToolCalls: Record + pendingContent: string + streamComplete: boolean + wasAborted: boolean + errors: string[] +} + +export interface FileAttachment { + id: string + key: string + name: string + mimeType: string + size: number +} + +export interface OrchestratorRequest { + message: string + workflowId: string + userId: string + chatId?: string + mode?: 'agent' | 'ask' | 'plan' + model?: string + conversationId?: string + contexts?: Array<{ type: string; content: string }> + fileAttachments?: FileAttachment[] + commands?: string[] + provider?: CopilotProviderConfig + streamToolCalls?: boolean + version?: string + prefetch?: boolean + userName?: string +} + +export interface OrchestratorOptions { + autoExecuteTools?: boolean + timeout?: number + onEvent?: (event: SSEEvent) => void | Promise + onComplete?: (result: OrchestratorResult) => void | Promise + onError?: (error: Error) => void | Promise + abortSignal?: AbortSignal + interactive?: boolean +} + +export interface OrchestratorResult { + success: boolean + content: string + contentBlocks: ContentBlock[] + toolCalls: ToolCallSummary[] + chatId?: string + conversationId?: string + error?: string + errors?: string[] +} + +export interface ToolCallSummary { + id: string + name: string + status: ToolCallStatus + params?: Record + result?: unknown + error?: string + durationMs?: number +} + +export interface ExecutionContext { + userId: string + workflowId: string + workspaceId?: string + decryptedEnvVars?: Record +} diff --git a/apps/sim/lib/copilot/process-contents.ts b/apps/sim/lib/copilot/process-contents.ts index 13a0015f0..9e1eeb079 100644 --- a/apps/sim/lib/copilot/process-contents.ts +++ b/apps/sim/lib/copilot/process-contents.ts @@ -44,29 +44,20 @@ export async function processContexts( ctx.kind ) } - if (ctx.kind === 'knowledge' && (ctx as any).knowledgeId) { - return await processKnowledgeFromDb( - (ctx as any).knowledgeId, - ctx.label ? `@${ctx.label}` : '@' - ) + if (ctx.kind === 'knowledge' && ctx.knowledgeId) { + return await processKnowledgeFromDb(ctx.knowledgeId, ctx.label ? `@${ctx.label}` : '@') } - if (ctx.kind === 'blocks' && (ctx as any).blockId) { - return await processBlockMetadata((ctx as any).blockId, ctx.label ? `@${ctx.label}` : '@') + if (ctx.kind === 'blocks' && ctx.blockIds?.length > 0) { + return await processBlockMetadata(ctx.blockIds[0], ctx.label ? `@${ctx.label}` : '@') } - if (ctx.kind === 'templates' && (ctx as any).templateId) { - return await processTemplateFromDb( - (ctx as any).templateId, - ctx.label ? `@${ctx.label}` : '@' - ) + if (ctx.kind === 'templates' && ctx.templateId) { + return await processTemplateFromDb(ctx.templateId, ctx.label ? `@${ctx.label}` : '@') } - if (ctx.kind === 'logs' && (ctx as any).executionId) { - return await processExecutionLogFromDb( - (ctx as any).executionId, - ctx.label ? `@${ctx.label}` : '@' - ) + if (ctx.kind === 'logs' && ctx.executionId) { + return await processExecutionLogFromDb(ctx.executionId, ctx.label ? `@${ctx.label}` : '@') } - if (ctx.kind === 'workflow_block' && ctx.workflowId && (ctx as any).blockId) { - return await processWorkflowBlockFromDb(ctx.workflowId, (ctx as any).blockId, ctx.label) + if (ctx.kind === 'workflow_block' && ctx.workflowId && ctx.blockId) { + return await processWorkflowBlockFromDb(ctx.workflowId, ctx.blockId, ctx.label) } // Other kinds can be added here: workflow, blocks, logs, knowledge, templates, docs return null @@ -99,33 +90,24 @@ export async function processContextsServer( ctx.kind ) } - if (ctx.kind === 'knowledge' && (ctx as any).knowledgeId) { - return await processKnowledgeFromDb( - (ctx as any).knowledgeId, - ctx.label ? `@${ctx.label}` : '@' - ) + if (ctx.kind === 'knowledge' && ctx.knowledgeId) { + return await processKnowledgeFromDb(ctx.knowledgeId, ctx.label ? `@${ctx.label}` : '@') } - if (ctx.kind === 'blocks' && (ctx as any).blockId) { + if (ctx.kind === 'blocks' && ctx.blockIds?.length > 0) { return await processBlockMetadata( - (ctx as any).blockId, + ctx.blockIds[0], ctx.label ? `@${ctx.label}` : '@', userId ) } - if (ctx.kind === 'templates' && (ctx as any).templateId) { - return await processTemplateFromDb( - (ctx as any).templateId, - ctx.label ? `@${ctx.label}` : '@' - ) + if (ctx.kind === 'templates' && ctx.templateId) { + return await processTemplateFromDb(ctx.templateId, ctx.label ? `@${ctx.label}` : '@') } - if (ctx.kind === 'logs' && (ctx as any).executionId) { - return await processExecutionLogFromDb( - (ctx as any).executionId, - ctx.label ? `@${ctx.label}` : '@' - ) + if (ctx.kind === 'logs' && ctx.executionId) { + return await processExecutionLogFromDb(ctx.executionId, ctx.label ? `@${ctx.label}` : '@') } - if (ctx.kind === 'workflow_block' && ctx.workflowId && (ctx as any).blockId) { - return await processWorkflowBlockFromDb(ctx.workflowId, (ctx as any).blockId, ctx.label) + if (ctx.kind === 'workflow_block' && ctx.workflowId && ctx.blockId) { + return await processWorkflowBlockFromDb(ctx.workflowId, ctx.blockId, ctx.label) } if (ctx.kind === 'docs') { try { diff --git a/apps/sim/lib/copilot/store-utils.ts b/apps/sim/lib/copilot/store-utils.ts new file mode 100644 index 000000000..267d368c2 --- /dev/null +++ b/apps/sim/lib/copilot/store-utils.ts @@ -0,0 +1,201 @@ +import { createLogger } from '@sim/logger' +import { Loader2 } from 'lucide-react' +import { + ClientToolCallState, + type ClientToolDisplay, + TOOL_DISPLAY_REGISTRY, +} from '@/lib/copilot/tools/client/tool-display-registry' +import type { CopilotStore } from '@/stores/panel/copilot/types' + +const logger = createLogger('CopilotStoreUtils') + +type StoreSet = ( + partial: Partial | ((state: CopilotStore) => Partial) +) => void + +/** Respond tools are internal to copilot subagents and should never be shown in the UI */ +const HIDDEN_TOOL_SUFFIX = '_respond' + +export function resolveToolDisplay( + toolName: string | undefined, + state: ClientToolCallState, + _toolCallId?: string, + params?: Record +): ClientToolDisplay | undefined { + if (!toolName) return undefined + if (toolName.endsWith(HIDDEN_TOOL_SUFFIX)) return undefined + const entry = TOOL_DISPLAY_REGISTRY[toolName] + if (!entry) return humanizedFallback(toolName, state) + + if (entry.uiConfig?.dynamicText && params) { + const dynamicText = entry.uiConfig.dynamicText(params, state) + const stateDisplay = entry.displayNames[state] + if (dynamicText && stateDisplay?.icon) { + return { text: dynamicText, icon: stateDisplay.icon } + } + } + + const display = entry.displayNames[state] + if (display?.text || display?.icon) return display + + const fallbackOrder = [ + ClientToolCallState.generating, + ClientToolCallState.executing, + ClientToolCallState.success, + ] + for (const fallbackState of fallbackOrder) { + const fallback = entry.displayNames[fallbackState] + if (fallback?.text || fallback?.icon) return fallback + } + + return humanizedFallback(toolName, state) +} + +export function humanizedFallback( + toolName: string, + state: ClientToolCallState +): ClientToolDisplay | undefined { + const formattedName = toolName.replace(/_/g, ' ').replace(/\b\w/g, (c) => c.toUpperCase()) + const stateVerb = + state === ClientToolCallState.success + ? 'Executed' + : state === ClientToolCallState.error + ? 'Failed' + : state === ClientToolCallState.rejected || state === ClientToolCallState.aborted + ? 'Skipped' + : 'Executing' + return { text: `${stateVerb} ${formattedName}`, icon: Loader2 } +} + +export function isRejectedState(state: string): boolean { + return state === 'rejected' +} + +export function isReviewState(state: string): boolean { + return state === 'review' +} + +export function isBackgroundState(state: string): boolean { + return state === 'background' +} + +export function isTerminalState(state: string): boolean { + return ( + state === ClientToolCallState.success || + state === ClientToolCallState.error || + state === ClientToolCallState.rejected || + state === ClientToolCallState.aborted || + isReviewState(state) || + isBackgroundState(state) + ) +} + +/** + * Resolves the appropriate terminal state for a non-terminal tool call. + * 'executing' → 'success': the server was running it, assume it completed. + * Everything else → 'aborted': never reached execution. + */ +function resolveAbortState(currentState: string): ClientToolCallState { + return currentState === ClientToolCallState.executing + ? ClientToolCallState.success + : ClientToolCallState.aborted +} + +export function abortAllInProgressTools(set: StoreSet, get: () => CopilotStore) { + try { + const { toolCallsById, messages } = get() + const updatedMap = { ...toolCallsById } + const resolvedIds = new Map() + let hasUpdates = false + for (const [id, tc] of Object.entries(toolCallsById)) { + const st = tc.state + const isTerminal = + st === ClientToolCallState.success || + st === ClientToolCallState.error || + st === ClientToolCallState.rejected || + st === ClientToolCallState.aborted + if (!isTerminal || isReviewState(st)) { + const resolved = resolveAbortState(st) + resolvedIds.set(id, resolved) + updatedMap[id] = { + ...tc, + state: resolved, + subAgentStreaming: false, + display: resolveToolDisplay(tc.name, resolved, id, tc.params), + } + hasUpdates = true + } else if (tc.subAgentStreaming) { + updatedMap[id] = { + ...tc, + subAgentStreaming: false, + } + hasUpdates = true + } + } + if (resolvedIds.size > 0 || hasUpdates) { + set({ toolCallsById: updatedMap }) + set((s: CopilotStore) => { + const msgs = [...s.messages] + for (let mi = msgs.length - 1; mi >= 0; mi--) { + const m = msgs[mi] + if (m.role !== 'assistant' || !Array.isArray(m.contentBlocks)) continue + let changed = false + const blocks = m.contentBlocks.map((b: any) => { + if (b?.type === 'tool_call' && b.toolCall?.id && resolvedIds.has(b.toolCall.id)) { + changed = true + const prev = b.toolCall + const resolved = resolvedIds.get(b.toolCall.id)! + return { + ...b, + toolCall: { + ...prev, + state: resolved, + display: resolveToolDisplay(prev?.name, resolved, prev?.id, prev?.params), + }, + } + } + return b + }) + if (changed) { + msgs[mi] = { ...m, contentBlocks: blocks } + break + } + } + return { messages: msgs } + }) + } + } catch (error) { + logger.warn('Failed to abort in-progress tools', { + error: error instanceof Error ? error.message : String(error), + }) + } +} + +export function cleanupActiveState( + set: (partial: Record) => void, + get: () => Record +): void { + abortAllInProgressTools(set as unknown as StoreSet, get as unknown as () => CopilotStore) + try { + const { useWorkflowDiffStore } = require('@/stores/workflow-diff/store') as { + useWorkflowDiffStore: { + getState: () => { clearDiff: (options?: { restoreBaseline?: boolean }) => void } + } + } + useWorkflowDiffStore.getState().clearDiff({ restoreBaseline: false }) + } catch (error) { + logger.warn('Failed to clear diff during cleanup', { + error: error instanceof Error ? error.message : String(error), + }) + } +} + +export function stripTodoTags(text: string): string { + if (!text) return text + return text + .replace(/[\s\S]*?<\/marktodo>/g, '') + .replace(/[\s\S]*?<\/checkofftodo>/g, '') + .replace(/[\s\S]*?<\/design_workflow>/g, '') + .replace(/[ \t]+\n/g, '\n') + .replace(/\n{2,}/g, '\n') +} diff --git a/apps/sim/lib/copilot/tools/client/base-subagent-tool.ts b/apps/sim/lib/copilot/tools/client/base-subagent-tool.ts deleted file mode 100644 index 7a843dd88..000000000 --- a/apps/sim/lib/copilot/tools/client/base-subagent-tool.ts +++ /dev/null @@ -1,120 +0,0 @@ -/** - * Base class for subagent tools. - * - * Subagent tools spawn a server-side subagent that does the actual work. - * The tool auto-executes and the subagent's output is streamed back - * as nested content under the tool call. - * - * Examples: edit, plan, debug, evaluate, research, etc. - */ -import type { LucideIcon } from 'lucide-react' -import { BaseClientTool, type BaseClientToolMetadata, ClientToolCallState } from './base-tool' -import type { SubagentConfig, ToolUIConfig } from './ui-config' -import { registerToolUIConfig } from './ui-config' - -/** - * Configuration for creating a subagent tool - */ -export interface SubagentToolConfig { - /** Unique tool ID */ - id: string - /** Display names per state */ - displayNames: { - streaming: { text: string; icon: LucideIcon } - success: { text: string; icon: LucideIcon } - error: { text: string; icon: LucideIcon } - } - /** Subagent UI configuration */ - subagent: SubagentConfig - /** - * Optional: Whether this is a "special" tool (gets gradient styling). - * Default: false - */ - isSpecial?: boolean -} - -/** - * Create metadata for a subagent tool from config - */ -function createSubagentMetadata(config: SubagentToolConfig): BaseClientToolMetadata { - const { displayNames, subagent, isSpecial } = config - const { streaming, success, error } = displayNames - - const uiConfig: ToolUIConfig = { - isSpecial: isSpecial ?? false, - subagent, - } - - return { - displayNames: { - [ClientToolCallState.generating]: streaming, - [ClientToolCallState.pending]: streaming, - [ClientToolCallState.executing]: streaming, - [ClientToolCallState.success]: success, - [ClientToolCallState.error]: error, - [ClientToolCallState.rejected]: { - text: `${config.id.charAt(0).toUpperCase() + config.id.slice(1)} skipped`, - icon: error.icon, - }, - [ClientToolCallState.aborted]: { - text: `${config.id.charAt(0).toUpperCase() + config.id.slice(1)} aborted`, - icon: error.icon, - }, - }, - uiConfig, - } -} - -/** - * Base class for subagent tools. - * Extends BaseClientTool with subagent-specific behavior. - */ -export abstract class BaseSubagentTool extends BaseClientTool { - /** - * Subagent configuration. - * Override in subclasses to customize behavior. - */ - static readonly subagentConfig: SubagentToolConfig - - constructor(toolCallId: string, config: SubagentToolConfig) { - super(toolCallId, config.id, createSubagentMetadata(config)) - // Register UI config for this tool - registerToolUIConfig(config.id, this.metadata.uiConfig!) - } - - /** - * Execute the subagent tool. - * Immediately transitions to executing state - the actual work - * is done server-side by the subagent. - */ - async execute(_args?: Record): Promise { - this.setState(ClientToolCallState.executing) - // The tool result will come from the server via tool_result event - // when the subagent completes its work - } -} - -/** - * Factory function to create a subagent tool class. - * Use this for simple subagent tools that don't need custom behavior. - */ -export function createSubagentToolClass(config: SubagentToolConfig) { - // Register UI config at class creation time - const uiConfig: ToolUIConfig = { - isSpecial: config.isSpecial ?? false, - subagent: config.subagent, - } - registerToolUIConfig(config.id, uiConfig) - - return class extends BaseClientTool { - static readonly id = config.id - - constructor(toolCallId: string) { - super(toolCallId, config.id, createSubagentMetadata(config)) - } - - async execute(_args?: Record): Promise { - this.setState(ClientToolCallState.executing) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/base-tool.ts b/apps/sim/lib/copilot/tools/client/base-tool.ts index 8d7d396f9..32060e87a 100644 --- a/apps/sim/lib/copilot/tools/client/base-tool.ts +++ b/apps/sim/lib/copilot/tools/client/base-tool.ts @@ -1,15 +1,5 @@ -// Lazy require in setState to avoid circular init issues -import { createLogger } from '@sim/logger' import type { LucideIcon } from 'lucide-react' -import type { ToolUIConfig } from './ui-config' -const baseToolLogger = createLogger('BaseClientTool') - -const DEFAULT_TOOL_TIMEOUT_MS = 5 * 60 * 1000 - -export const WORKFLOW_EXECUTION_TIMEOUT_MS = 10 * 60 * 1000 - -// Client tool call states used by the new runtime export enum ClientToolCallState { generating = 'generating', pending = 'pending', @@ -22,252 +12,32 @@ export enum ClientToolCallState { background = 'background', } -// Display configuration for a given state export interface ClientToolDisplay { text: string icon: LucideIcon } -/** - * Function to generate dynamic display text based on tool parameters and state - * @param params - The tool call parameters - * @param state - The current tool call state - * @returns The dynamic text to display, or undefined to use the default text - */ +export interface BaseClientToolMetadata { + displayNames: Partial> + uiConfig?: Record + getDynamicText?: ( + params: Record, + state: ClientToolCallState + ) => string | undefined +} + export type DynamicTextFormatter = ( - params: Record, + params: Record, state: ClientToolCallState ) => string | undefined -export interface BaseClientToolMetadata { - displayNames: Partial> - interrupt?: { - accept: ClientToolDisplay - reject: ClientToolDisplay - } - /** - * Optional function to generate dynamic display text based on parameters - * If provided, this will override the default text in displayNames - */ - getDynamicText?: DynamicTextFormatter - /** - * UI configuration for how this tool renders in the tool-call component. - * This replaces hardcoded logic in tool-call.tsx with declarative config. - */ - uiConfig?: ToolUIConfig -} - -export class BaseClientTool { - readonly toolCallId: string - readonly name: string - protected state: ClientToolCallState - protected metadata: BaseClientToolMetadata - protected isMarkedComplete = false - protected timeoutMs: number = DEFAULT_TOOL_TIMEOUT_MS - - constructor(toolCallId: string, name: string, metadata: BaseClientToolMetadata) { - this.toolCallId = toolCallId - this.name = name - this.metadata = metadata - this.state = ClientToolCallState.generating - } - - /** - * Set a custom timeout for this tool (in milliseconds) - */ - setTimeoutMs(ms: number): void { - this.timeoutMs = ms - } - - /** - * Check if this tool has been marked complete - */ - hasBeenMarkedComplete(): boolean { - return this.isMarkedComplete - } - - /** - * Ensure the tool is marked complete. If not already marked, marks it with error. - * This should be called in finally blocks to prevent leaked tool calls. - */ - async ensureMarkedComplete( - fallbackMessage = 'Tool execution did not complete properly' - ): Promise { - if (!this.isMarkedComplete) { - baseToolLogger.warn('Tool was not marked complete, marking with error', { - toolCallId: this.toolCallId, - toolName: this.name, - state: this.state, - }) - await this.markToolComplete(500, fallbackMessage) - this.setState(ClientToolCallState.error) - } - } - - /** - * Execute with timeout protection. Wraps the execution in a timeout and ensures - * markToolComplete is always called. - */ - async executeWithTimeout(executeFn: () => Promise, timeoutMs?: number): Promise { - const timeout = timeoutMs ?? this.timeoutMs - let timeoutId: NodeJS.Timeout | null = null - - try { - await Promise.race([ - executeFn(), - new Promise((_, reject) => { - timeoutId = setTimeout(() => { - reject(new Error(`Tool execution timed out after ${timeout / 1000} seconds`)) - }, timeout) - }), - ]) - } catch (error) { - const message = error instanceof Error ? error.message : String(error) - baseToolLogger.error('Tool execution failed or timed out', { - toolCallId: this.toolCallId, - toolName: this.name, - error: message, - }) - // Only mark complete if not already marked - if (!this.isMarkedComplete) { - await this.markToolComplete(500, message) - this.setState(ClientToolCallState.error) - } - } finally { - if (timeoutId) clearTimeout(timeoutId) - // Ensure tool is always marked complete - await this.ensureMarkedComplete() - } - } - - // Intentionally left empty - specific tools can override - // eslint-disable-next-line @typescript-eslint/no-unused-vars - async execute(_args?: Record): Promise { - return - } - - /** - * Mark a tool as complete on the server (proxies to server-side route). - * Once called, the tool is considered complete and won't be marked again. - */ - async markToolComplete(status: number, message?: any, data?: any): Promise { - // Prevent double-marking - if (this.isMarkedComplete) { - baseToolLogger.warn('markToolComplete called but tool already marked complete', { - toolCallId: this.toolCallId, - toolName: this.name, - existingState: this.state, - attemptedStatus: status, - }) - return true - } - - this.isMarkedComplete = true - - try { - baseToolLogger.info('markToolComplete called', { - toolCallId: this.toolCallId, - toolName: this.name, - state: this.state, - status, - hasMessage: message !== undefined, - hasData: data !== undefined, - }) - } catch {} - - try { - const res = await fetch('/api/copilot/tools/mark-complete', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - id: this.toolCallId, - name: this.name, - status, - message, - data, - }), - }) - - if (!res.ok) { - // Try to surface server error - let errorText = `Failed to mark tool complete (status ${res.status})` - try { - const { error } = await res.json() - if (error) errorText = String(error) - } catch {} - throw new Error(errorText) - } - - const json = (await res.json()) as { success?: boolean } - return json?.success === true - } catch (e) { - // Default failure path - but tool is still marked complete locally - baseToolLogger.error('Failed to mark tool complete on server', { - toolCallId: this.toolCallId, - error: e instanceof Error ? e.message : String(e), - }) - return false - } - } - - // Accept (continue) for interrupt flows: move pending -> executing - async handleAccept(): Promise { - this.setState(ClientToolCallState.executing) - } - - // Reject (skip) for interrupt flows: mark complete with a standard skip message - async handleReject(): Promise { - await this.markToolComplete(200, 'Tool execution was skipped by the user') - this.setState(ClientToolCallState.rejected) - } - - // Return the display configuration for the current state - getDisplayState(): ClientToolDisplay | undefined { - return this.metadata.displayNames[this.state] - } - - // Return interrupt display config (labels/icons) if defined - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - return this.metadata.interrupt - } - - // Transition to a new state (also sync to Copilot store) - setState(next: ClientToolCallState, options?: { result?: any }): void { - const prev = this.state - this.state = next - - // Notify store via manager to avoid import cycles - try { - const { syncToolState } = require('@/lib/copilot/tools/client/manager') - syncToolState(this.toolCallId, next, options) - } catch {} - - // Log transition after syncing - try { - baseToolLogger.info('setState transition', { - toolCallId: this.toolCallId, - toolName: this.name, - prev, - next, - hasResult: options?.result !== undefined, - }) - } catch {} - } - - // Expose current state - getState(): ClientToolCallState { - return this.state - } - - hasInterrupt(): boolean { - return !!this.metadata.interrupt - } - - /** - * Get UI configuration for this tool. - * Used by tool-call component to determine rendering behavior. - */ - getUIConfig(): ToolUIConfig | undefined { - return this.metadata.uiConfig - } +export const WORKFLOW_EXECUTION_TIMEOUT_MS = 10 * 60 * 1000 + +/** Event detail for OAuth connect events dispatched by the copilot. */ +export interface OAuthConnectEventDetail { + providerName: string + serviceId: string + providerId: string + requiredScopes: string[] + newScopes?: string[] } diff --git a/apps/sim/lib/copilot/tools/client/blocks/get-block-config.ts b/apps/sim/lib/copilot/tools/client/blocks/get-block-config.ts deleted file mode 100644 index a76971df0..000000000 --- a/apps/sim/lib/copilot/tools/client/blocks/get-block-config.ts +++ /dev/null @@ -1,100 +0,0 @@ -import { createLogger } from '@sim/logger' -import { FileCode, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { - ExecuteResponseSuccessSchema, - GetBlockConfigInput, - GetBlockConfigResult, -} from '@/lib/copilot/tools/shared/schemas' -import { getLatestBlock } from '@/blocks/registry' - -interface GetBlockConfigArgs { - blockType: string - operation?: string - trigger?: boolean -} - -export class GetBlockConfigClientTool extends BaseClientTool { - static readonly id = 'get_block_config' - - constructor(toolCallId: string) { - super(toolCallId, GetBlockConfigClientTool.id, GetBlockConfigClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting block config', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting block config', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Getting block config', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Retrieved block config', icon: FileCode }, - [ClientToolCallState.error]: { text: 'Failed to get block config', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted getting block config', icon: XCircle }, - [ClientToolCallState.rejected]: { - text: 'Skipped getting block config', - icon: MinusCircle, - }, - }, - getDynamicText: (params, state) => { - if (params?.blockType && typeof params.blockType === 'string') { - const blockConfig = getLatestBlock(params.blockType) - const blockName = (blockConfig?.name ?? params.blockType.replace(/_/g, ' ')).toLowerCase() - const opSuffix = params.operation ? ` (${params.operation})` : '' - - switch (state) { - case ClientToolCallState.success: - return `Retrieved ${blockName}${opSuffix} config` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Retrieving ${blockName}${opSuffix} config` - case ClientToolCallState.error: - return `Failed to retrieve ${blockName}${opSuffix} config` - case ClientToolCallState.aborted: - return `Aborted retrieving ${blockName}${opSuffix} config` - case ClientToolCallState.rejected: - return `Skipped retrieving ${blockName}${opSuffix} config` - } - } - return undefined - }, - } - - async execute(args?: GetBlockConfigArgs): Promise { - const logger = createLogger('GetBlockConfigClientTool') - try { - this.setState(ClientToolCallState.executing) - - const { blockType, operation, trigger } = GetBlockConfigInput.parse(args || {}) - - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - toolName: 'get_block_config', - payload: { blockType, operation, trigger }, - }), - }) - if (!res.ok) { - const errorText = await res.text().catch(() => '') - throw new Error(errorText || `Server error (${res.status})`) - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - const result = GetBlockConfigResult.parse(parsed.result) - - const inputCount = Object.keys(result.inputs).length - const outputCount = Object.keys(result.outputs).length - await this.markToolComplete(200, { inputs: inputCount, outputs: outputCount }, result) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - logger.error('Execute failed', { message }) - await this.markToolComplete(500, message) - this.setState(ClientToolCallState.error) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/blocks/get-block-options.ts b/apps/sim/lib/copilot/tools/client/blocks/get-block-options.ts deleted file mode 100644 index 06efb6ffc..000000000 --- a/apps/sim/lib/copilot/tools/client/blocks/get-block-options.ts +++ /dev/null @@ -1,110 +0,0 @@ -import { createLogger } from '@sim/logger' -import { ListFilter, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { - ExecuteResponseSuccessSchema, - GetBlockOptionsInput, - GetBlockOptionsResult, -} from '@/lib/copilot/tools/shared/schemas' -import { getLatestBlock } from '@/blocks/registry' - -interface GetBlockOptionsArgs { - blockId: string -} - -export class GetBlockOptionsClientTool extends BaseClientTool { - static readonly id = 'get_block_options' - - constructor(toolCallId: string) { - super(toolCallId, GetBlockOptionsClientTool.id, GetBlockOptionsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting block operations', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting block operations', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Getting block operations', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Retrieved block operations', icon: ListFilter }, - [ClientToolCallState.error]: { text: 'Failed to get block operations', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted getting block operations', icon: XCircle }, - [ClientToolCallState.rejected]: { - text: 'Skipped getting block operations', - icon: MinusCircle, - }, - }, - getDynamicText: (params, state) => { - const blockId = - (params as any)?.blockId || - (params as any)?.blockType || - (params as any)?.block_id || - (params as any)?.block_type - if (typeof blockId === 'string') { - const blockConfig = getLatestBlock(blockId) - const blockName = (blockConfig?.name ?? blockId.replace(/_/g, ' ')).toLowerCase() - - switch (state) { - case ClientToolCallState.success: - return `Retrieved ${blockName} operations` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Retrieving ${blockName} operations` - case ClientToolCallState.error: - return `Failed to retrieve ${blockName} operations` - case ClientToolCallState.aborted: - return `Aborted retrieving ${blockName} operations` - case ClientToolCallState.rejected: - return `Skipped retrieving ${blockName} operations` - } - } - return undefined - }, - } - - async execute(args?: GetBlockOptionsArgs): Promise { - const logger = createLogger('GetBlockOptionsClientTool') - try { - this.setState(ClientToolCallState.executing) - - // Handle both camelCase and snake_case parameter names, plus blockType as an alias - const normalizedArgs = args - ? { - blockId: - args.blockId || - (args as any).block_id || - (args as any).blockType || - (args as any).block_type, - } - : {} - - logger.info('execute called', { originalArgs: args, normalizedArgs }) - - const { blockId } = GetBlockOptionsInput.parse(normalizedArgs) - - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'get_block_options', payload: { blockId } }), - }) - if (!res.ok) { - const errorText = await res.text().catch(() => '') - throw new Error(errorText || `Server error (${res.status})`) - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - const result = GetBlockOptionsResult.parse(parsed.result) - - await this.markToolComplete(200, { operations: result.operations.length }, result) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - logger.error('Execute failed', { message }) - await this.markToolComplete(500, message) - this.setState(ClientToolCallState.error) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/blocks/get-blocks-and-tools.ts b/apps/sim/lib/copilot/tools/client/blocks/get-blocks-and-tools.ts deleted file mode 100644 index d57cb1d24..000000000 --- a/apps/sim/lib/copilot/tools/client/blocks/get-blocks-and-tools.ts +++ /dev/null @@ -1,59 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Blocks, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { - ExecuteResponseSuccessSchema, - GetBlocksAndToolsResult, -} from '@/lib/copilot/tools/shared/schemas' - -export class GetBlocksAndToolsClientTool extends BaseClientTool { - static readonly id = 'get_blocks_and_tools' - - constructor(toolCallId: string) { - super(toolCallId, GetBlocksAndToolsClientTool.id, GetBlocksAndToolsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Exploring available options', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Exploring available options', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Exploring available options', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Explored available options', icon: Blocks }, - [ClientToolCallState.error]: { text: 'Failed to explore options', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted exploring options', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped exploring options', icon: MinusCircle }, - }, - interrupt: undefined, - } - - async execute(): Promise { - const logger = createLogger('GetBlocksAndToolsClientTool') - try { - this.setState(ClientToolCallState.executing) - - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'get_blocks_and_tools', payload: {} }), - }) - if (!res.ok) { - const errorText = await res.text().catch(() => '') - throw new Error(errorText || `Server error (${res.status})`) - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - const result = GetBlocksAndToolsResult.parse(parsed.result) - - await this.markToolComplete(200, 'Successfully retrieved blocks and tools', result) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - await this.markToolComplete(500, message) - this.setState(ClientToolCallState.error) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/blocks/get-blocks-metadata.ts b/apps/sim/lib/copilot/tools/client/blocks/get-blocks-metadata.ts deleted file mode 100644 index 8fd88b1a3..000000000 --- a/apps/sim/lib/copilot/tools/client/blocks/get-blocks-metadata.ts +++ /dev/null @@ -1,95 +0,0 @@ -import { createLogger } from '@sim/logger' -import { ListFilter, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { - ExecuteResponseSuccessSchema, - GetBlocksMetadataInput, - GetBlocksMetadataResult, -} from '@/lib/copilot/tools/shared/schemas' - -interface GetBlocksMetadataArgs { - blockIds: string[] -} - -export class GetBlocksMetadataClientTool extends BaseClientTool { - static readonly id = 'get_blocks_metadata' - - constructor(toolCallId: string) { - super(toolCallId, GetBlocksMetadataClientTool.id, GetBlocksMetadataClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Searching block choices', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Searching block choices', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Searching block choices', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Searched block choices', icon: ListFilter }, - [ClientToolCallState.error]: { text: 'Failed to search block choices', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted searching block choices', icon: XCircle }, - [ClientToolCallState.rejected]: { - text: 'Skipped searching block choices', - icon: MinusCircle, - }, - }, - getDynamicText: (params, state) => { - if (params?.blockIds && Array.isArray(params.blockIds) && params.blockIds.length > 0) { - const blockList = params.blockIds - .slice(0, 3) - .map((blockId) => blockId.replace(/_/g, ' ')) - .join(', ') - const more = params.blockIds.length > 3 ? '...' : '' - const blocks = `${blockList}${more}` - - switch (state) { - case ClientToolCallState.success: - return `Searched ${blocks}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Searching ${blocks}` - case ClientToolCallState.error: - return `Failed to search ${blocks}` - case ClientToolCallState.aborted: - return `Aborted searching ${blocks}` - case ClientToolCallState.rejected: - return `Skipped searching ${blocks}` - } - } - return undefined - }, - } - - async execute(args?: GetBlocksMetadataArgs): Promise { - const logger = createLogger('GetBlocksMetadataClientTool') - try { - this.setState(ClientToolCallState.executing) - - const { blockIds } = GetBlocksMetadataInput.parse(args || {}) - - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'get_blocks_metadata', payload: { blockIds } }), - }) - if (!res.ok) { - const errorText = await res.text().catch(() => '') - throw new Error(errorText || `Server error (${res.status})`) - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - const result = GetBlocksMetadataResult.parse(parsed.result) - - await this.markToolComplete(200, { retrieved: Object.keys(result.metadata).length }, result) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - logger.error('Execute failed', { message }) - await this.markToolComplete(500, message) - this.setState(ClientToolCallState.error) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/blocks/get-trigger-blocks.ts b/apps/sim/lib/copilot/tools/client/blocks/get-trigger-blocks.ts deleted file mode 100644 index c9fa0f78a..000000000 --- a/apps/sim/lib/copilot/tools/client/blocks/get-trigger-blocks.ts +++ /dev/null @@ -1,64 +0,0 @@ -import { createLogger } from '@sim/logger' -import { ListFilter, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { - ExecuteResponseSuccessSchema, - GetTriggerBlocksResult, -} from '@/lib/copilot/tools/shared/schemas' - -export class GetTriggerBlocksClientTool extends BaseClientTool { - static readonly id = 'get_trigger_blocks' - - constructor(toolCallId: string) { - super(toolCallId, GetTriggerBlocksClientTool.id, GetTriggerBlocksClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Finding trigger blocks', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Finding trigger blocks', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Finding trigger blocks', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Found trigger blocks', icon: ListFilter }, - [ClientToolCallState.error]: { text: 'Failed to find trigger blocks', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted finding trigger blocks', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped finding trigger blocks', icon: MinusCircle }, - }, - interrupt: undefined, - } - - async execute(): Promise { - const logger = createLogger('GetTriggerBlocksClientTool') - try { - this.setState(ClientToolCallState.executing) - - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'get_trigger_blocks', payload: {} }), - }) - if (!res.ok) { - const errorText = await res.text().catch(() => '') - try { - const errorJson = JSON.parse(errorText) - throw new Error(errorJson.error || errorText || `Server error (${res.status})`) - } catch { - throw new Error(errorText || `Server error (${res.status})`) - } - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - const result = GetTriggerBlocksResult.parse(parsed.result) - - await this.markToolComplete(200, 'Successfully retrieved trigger blocks', result) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - await this.markToolComplete(500, message) - this.setState(ClientToolCallState.error) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/examples/get-examples-rag.ts b/apps/sim/lib/copilot/tools/client/examples/get-examples-rag.ts deleted file mode 100644 index 258330e0e..000000000 --- a/apps/sim/lib/copilot/tools/client/examples/get-examples-rag.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { Loader2, MinusCircle, Search, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class GetExamplesRagClientTool extends BaseClientTool { - static readonly id = 'get_examples_rag' - - constructor(toolCallId: string) { - super(toolCallId, GetExamplesRagClientTool.id, GetExamplesRagClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Fetching examples', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Fetching examples', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Fetching examples', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Fetched examples', icon: Search }, - [ClientToolCallState.error]: { text: 'Failed to fetch examples', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted getting examples', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped getting examples', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.query && typeof params.query === 'string') { - const query = params.query - - switch (state) { - case ClientToolCallState.success: - return `Found examples for ${query}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Searching examples for ${query}` - case ClientToolCallState.error: - return `Failed to find examples for ${query}` - case ClientToolCallState.aborted: - return `Aborted searching examples for ${query}` - case ClientToolCallState.rejected: - return `Skipped searching examples for ${query}` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/examples/get-operations-examples.ts b/apps/sim/lib/copilot/tools/client/examples/get-operations-examples.ts deleted file mode 100644 index 4a14b71ef..000000000 --- a/apps/sim/lib/copilot/tools/client/examples/get-operations-examples.ts +++ /dev/null @@ -1,58 +0,0 @@ -import { Loader2, MinusCircle, XCircle, Zap } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class GetOperationsExamplesClientTool extends BaseClientTool { - static readonly id = 'get_operations_examples' - - constructor(toolCallId: string) { - super(toolCallId, GetOperationsExamplesClientTool.id, GetOperationsExamplesClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Designing workflow component', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Designing workflow component', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Designing workflow component', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Designed workflow component', icon: Zap }, - [ClientToolCallState.error]: { text: 'Failed to design workflow component', icon: XCircle }, - [ClientToolCallState.aborted]: { - text: 'Aborted designing workflow component', - icon: MinusCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped designing workflow component', - icon: MinusCircle, - }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.query && typeof params.query === 'string') { - const query = params.query - - switch (state) { - case ClientToolCallState.success: - return `Designed ${query}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Designing ${query}` - case ClientToolCallState.error: - return `Failed to design ${query}` - case ClientToolCallState.aborted: - return `Aborted designing ${query}` - case ClientToolCallState.rejected: - return `Skipped designing ${query}` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/examples/get-trigger-examples.ts b/apps/sim/lib/copilot/tools/client/examples/get-trigger-examples.ts deleted file mode 100644 index f24ea4801..000000000 --- a/apps/sim/lib/copilot/tools/client/examples/get-trigger-examples.ts +++ /dev/null @@ -1,31 +0,0 @@ -import { Loader2, MinusCircle, XCircle, Zap } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class GetTriggerExamplesClientTool extends BaseClientTool { - static readonly id = 'get_trigger_examples' - - constructor(toolCallId: string) { - super(toolCallId, GetTriggerExamplesClientTool.id, GetTriggerExamplesClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Selecting a trigger', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Selecting a trigger', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Selecting a trigger', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Selected a trigger', icon: Zap }, - [ClientToolCallState.error]: { text: 'Failed to select a trigger', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted selecting a trigger', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped selecting a trigger', icon: MinusCircle }, - }, - interrupt: undefined, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/examples/summarize.ts b/apps/sim/lib/copilot/tools/client/examples/summarize.ts deleted file mode 100644 index 240be300b..000000000 --- a/apps/sim/lib/copilot/tools/client/examples/summarize.ts +++ /dev/null @@ -1,37 +0,0 @@ -import { Loader2, MinusCircle, PencilLine, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class SummarizeClientTool extends BaseClientTool { - static readonly id = 'summarize_conversation' - - constructor(toolCallId: string) { - super(toolCallId, SummarizeClientTool.id, SummarizeClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Summarizing conversation', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Summarizing conversation', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Summarizing conversation', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Summarized conversation', icon: PencilLine }, - [ClientToolCallState.error]: { text: 'Failed to summarize conversation', icon: XCircle }, - [ClientToolCallState.aborted]: { - text: 'Aborted summarizing conversation', - icon: MinusCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped summarizing conversation', - icon: MinusCircle, - }, - }, - interrupt: undefined, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/init-tool-configs.ts b/apps/sim/lib/copilot/tools/client/init-tool-configs.ts deleted file mode 100644 index 336fdbb0c..000000000 --- a/apps/sim/lib/copilot/tools/client/init-tool-configs.ts +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Initialize all tool UI configurations. - * - * This module imports all client tools to trigger their UI config registration. - * Import this module early in the app to ensure all tool configs are available. - */ - -// Other tools (subagents) -import './other/auth' -import './other/custom-tool' -import './other/debug' -import './other/deploy' -import './other/edit' -import './other/evaluate' -import './other/info' -import './other/knowledge' -import './other/make-api-request' -import './other/plan' -import './other/research' -import './other/sleep' -import './other/superagent' -import './other/test' -import './other/tour' -import './other/workflow' - -// Workflow tools -import './workflow/deploy-api' -import './workflow/deploy-chat' -import './workflow/deploy-mcp' -import './workflow/edit-workflow' -import './workflow/redeploy' -import './workflow/run-workflow' -import './workflow/set-global-workflow-variables' - -// User tools -import './user/set-environment-variables' diff --git a/apps/sim/lib/copilot/tools/client/knowledge/knowledge-base.ts b/apps/sim/lib/copilot/tools/client/knowledge/knowledge-base.ts deleted file mode 100644 index 89f60b155..000000000 --- a/apps/sim/lib/copilot/tools/client/knowledge/knowledge-base.ts +++ /dev/null @@ -1,143 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Database, Loader2, MinusCircle, PlusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { - ExecuteResponseSuccessSchema, - type KnowledgeBaseArgs, -} from '@/lib/copilot/tools/shared/schemas' -import { useCopilotStore } from '@/stores/panel/copilot/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -/** - * Client tool for knowledge base operations - */ -export class KnowledgeBaseClientTool extends BaseClientTool { - static readonly id = 'knowledge_base' - - constructor(toolCallId: string) { - super(toolCallId, KnowledgeBaseClientTool.id, KnowledgeBaseClientTool.metadata) - } - - /** - * Only show interrupt for create operation - */ - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - const toolCallsById = useCopilotStore.getState().toolCallsById - const toolCall = toolCallsById[this.toolCallId] - const params = toolCall?.params as KnowledgeBaseArgs | undefined - - // Only require confirmation for create operation - if (params?.operation === 'create') { - const name = params?.args?.name || 'new knowledge base' - return { - accept: { text: `Create "${name}"`, icon: PlusCircle }, - reject: { text: 'Skip', icon: XCircle }, - } - } - - // No interrupt for list, get, query - auto-execute - return undefined - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Accessing knowledge base', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Accessing knowledge base', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Accessing knowledge base', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Accessed knowledge base', icon: Database }, - [ClientToolCallState.error]: { text: 'Failed to access knowledge base', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted knowledge base access', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped knowledge base access', icon: MinusCircle }, - }, - getDynamicText: (params: Record, state: ClientToolCallState) => { - const operation = params?.operation as string | undefined - const name = params?.args?.name as string | undefined - - const opVerbs: Record = { - create: { - active: 'Creating knowledge base', - past: 'Created knowledge base', - pending: name ? `Create knowledge base "${name}"?` : 'Create knowledge base?', - }, - list: { active: 'Listing knowledge bases', past: 'Listed knowledge bases' }, - get: { active: 'Getting knowledge base', past: 'Retrieved knowledge base' }, - query: { active: 'Querying knowledge base', past: 'Queried knowledge base' }, - } - const defaultVerb: { active: string; past: string; pending?: string } = { - active: 'Accessing knowledge base', - past: 'Accessed knowledge base', - } - const verb = operation ? opVerbs[operation] || defaultVerb : defaultVerb - - if (state === ClientToolCallState.success) { - return verb.past - } - if (state === ClientToolCallState.pending && verb.pending) { - return verb.pending - } - if ( - state === ClientToolCallState.generating || - state === ClientToolCallState.pending || - state === ClientToolCallState.executing - ) { - return verb.active - } - return undefined - }, - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: KnowledgeBaseArgs): Promise { - await this.execute(args) - } - - async execute(args?: KnowledgeBaseArgs): Promise { - const logger = createLogger('KnowledgeBaseClientTool') - try { - this.setState(ClientToolCallState.executing) - - // Get the workspace ID from the workflow registry hydration state - const { hydration } = useWorkflowRegistry.getState() - const workspaceId = hydration.workspaceId - - // Build payload with workspace ID included in args - const payload: KnowledgeBaseArgs = { - ...(args || { operation: 'list' }), - args: { - ...(args?.args || {}), - workspaceId: workspaceId || undefined, - }, - } - - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'knowledge_base', payload }), - }) - - if (!res.ok) { - const txt = await res.text().catch(() => '') - throw new Error(txt || `Server error (${res.status})`) - } - - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Knowledge base operation completed', parsed.result) - this.setState(ClientToolCallState.success) - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to access knowledge base') - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/manager.ts b/apps/sim/lib/copilot/tools/client/manager.ts deleted file mode 100644 index bb83771d1..000000000 --- a/apps/sim/lib/copilot/tools/client/manager.ts +++ /dev/null @@ -1,24 +0,0 @@ -const instances: Record = {} - -let syncStateFn: ((toolCallId: string, nextState: any, options?: { result?: any }) => void) | null = - null - -export function registerClientTool(toolCallId: string, instance: any) { - instances[toolCallId] = instance -} - -export function getClientTool(toolCallId: string): any | undefined { - return instances[toolCallId] -} - -export function registerToolStateSync( - fn: (toolCallId: string, nextState: any, options?: { result?: any }) => void -) { - syncStateFn = fn -} - -export function syncToolState(toolCallId: string, nextState: any, options?: { result?: any }) { - try { - syncStateFn?.(toolCallId, nextState, options) - } catch {} -} diff --git a/apps/sim/lib/copilot/tools/client/navigation/navigate-ui.ts b/apps/sim/lib/copilot/tools/client/navigation/navigate-ui.ts deleted file mode 100644 index 5b9d30c06..000000000 --- a/apps/sim/lib/copilot/tools/client/navigation/navigate-ui.ts +++ /dev/null @@ -1,241 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Navigation, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { useCopilotStore } from '@/stores/panel/copilot/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -type NavigationDestination = 'workflow' | 'logs' | 'templates' | 'vector_db' | 'settings' - -interface NavigateUIArgs { - destination: NavigationDestination - workflowName?: string -} - -export class NavigateUIClientTool extends BaseClientTool { - static readonly id = 'navigate_ui' - - constructor(toolCallId: string) { - super(toolCallId, NavigateUIClientTool.id, NavigateUIClientTool.metadata) - } - - /** - * Override to provide dynamic button text based on destination - */ - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - const toolCallsById = useCopilotStore.getState().toolCallsById - const toolCall = toolCallsById[this.toolCallId] - const params = toolCall?.params as NavigateUIArgs | undefined - - const destination = params?.destination - const workflowName = params?.workflowName - - let buttonText = 'Navigate' - - if (destination === 'workflow' && workflowName) { - buttonText = 'Open workflow' - } else if (destination === 'logs') { - buttonText = 'Open logs' - } else if (destination === 'templates') { - buttonText = 'Open templates' - } else if (destination === 'vector_db') { - buttonText = 'Open vector DB' - } else if (destination === 'settings') { - buttonText = 'Open settings' - } - - return { - accept: { text: buttonText, icon: Navigation }, - reject: { text: 'Skip', icon: XCircle }, - } - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to open', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Open?', icon: Navigation }, - [ClientToolCallState.executing]: { text: 'Opening', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Opened', icon: Navigation }, - [ClientToolCallState.error]: { text: 'Failed to open', icon: X }, - [ClientToolCallState.aborted]: { - text: 'Aborted opening', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped opening', - icon: XCircle, - }, - }, - interrupt: { - accept: { text: 'Open', icon: Navigation }, - reject: { text: 'Skip', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const destination = params?.destination as NavigationDestination | undefined - const workflowName = params?.workflowName - - const action = 'open' - const actionCapitalized = 'Open' - const actionPast = 'opened' - const actionIng = 'opening' - let target = '' - - if (destination === 'workflow' && workflowName) { - target = ` workflow "${workflowName}"` - } else if (destination === 'workflow') { - target = ' workflows' - } else if (destination === 'logs') { - target = ' logs' - } else if (destination === 'templates') { - target = ' templates' - } else if (destination === 'vector_db') { - target = ' vector database' - } else if (destination === 'settings') { - target = ' settings' - } - - const fullAction = `${action}${target}` - const fullActionCapitalized = `${actionCapitalized}${target}` - const fullActionPast = `${actionPast}${target}` - const fullActionIng = `${actionIng}${target}` - - switch (state) { - case ClientToolCallState.success: - return fullActionPast.charAt(0).toUpperCase() + fullActionPast.slice(1) - case ClientToolCallState.executing: - return fullActionIng.charAt(0).toUpperCase() + fullActionIng.slice(1) - case ClientToolCallState.generating: - return `Preparing to ${fullAction}` - case ClientToolCallState.pending: - return `${fullActionCapitalized}?` - case ClientToolCallState.error: - return `Failed to ${fullAction}` - case ClientToolCallState.aborted: - return `Aborted ${fullAction}` - case ClientToolCallState.rejected: - return `Skipped ${fullAction}` - } - return undefined - }, - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: NavigateUIArgs): Promise { - const logger = createLogger('NavigateUIClientTool') - try { - this.setState(ClientToolCallState.executing) - - // Get params from copilot store if not provided directly - let destination = args?.destination - let workflowName = args?.workflowName - - if (!destination) { - const toolCallsById = useCopilotStore.getState().toolCallsById - const toolCall = toolCallsById[this.toolCallId] - const params = toolCall?.params as NavigateUIArgs | undefined - destination = params?.destination - workflowName = params?.workflowName - } - - if (!destination) { - throw new Error('No destination provided') - } - - let navigationUrl = '' - let successMessage = '' - - // Get current workspace ID from URL - const workspaceId = window.location.pathname.split('/')[2] - - switch (destination) { - case 'workflow': - if (workflowName) { - // Find workflow by name - const { workflows } = useWorkflowRegistry.getState() - const workflow = Object.values(workflows).find( - (w) => w.name.toLowerCase() === workflowName.toLowerCase() - ) - - if (!workflow) { - throw new Error(`Workflow "${workflowName}" not found`) - } - - navigationUrl = `/workspace/${workspaceId}/w/${workflow.id}` - successMessage = `Navigated to workflow "${workflowName}"` - } else { - navigationUrl = `/workspace/${workspaceId}/w` - successMessage = 'Navigated to workflows' - } - break - - case 'logs': - navigationUrl = `/workspace/${workspaceId}/logs` - successMessage = 'Navigated to logs' - break - - case 'templates': - navigationUrl = `/workspace/${workspaceId}/templates` - successMessage = 'Navigated to templates' - break - - case 'vector_db': - navigationUrl = `/workspace/${workspaceId}/vector-db` - successMessage = 'Navigated to vector database' - break - - case 'settings': - window.dispatchEvent(new CustomEvent('open-settings', { detail: { tab: 'general' } })) - successMessage = 'Opened settings' - break - - default: - throw new Error(`Unknown destination: ${destination}`) - } - - // Navigate if URL was set - if (navigationUrl) { - window.location.href = navigationUrl - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, successMessage, { - destination, - workflowName, - navigated: true, - }) - } catch (e: any) { - logger.error('Navigation failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - - // Get destination info for better error message - const toolCallsById = useCopilotStore.getState().toolCallsById - const toolCall = toolCallsById[this.toolCallId] - const params = toolCall?.params as NavigateUIArgs | undefined - const dest = params?.destination - const wfName = params?.workflowName - - let errorMessage = e?.message || 'Failed to navigate' - if (dest === 'workflow' && wfName) { - errorMessage = `Failed to navigate to workflow "${wfName}": ${e?.message || 'Unknown error'}` - } else if (dest) { - errorMessage = `Failed to navigate to ${dest}: ${e?.message || 'Unknown error'}` - } - - await this.markToolComplete(500, errorMessage) - } - } - - async execute(args?: NavigateUIArgs): Promise { - await this.handleAccept(args) - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/auth.ts b/apps/sim/lib/copilot/tools/client/other/auth.ts deleted file mode 100644 index b73a3f003..000000000 --- a/apps/sim/lib/copilot/tools/client/other/auth.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { KeyRound, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface AuthArgs { - instruction: string -} - -/** - * Auth tool that spawns a subagent to handle authentication setup. - * This tool auto-executes and the actual work is done by the auth subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class AuthClientTool extends BaseClientTool { - static readonly id = 'auth' - - constructor(toolCallId: string) { - super(toolCallId, AuthClientTool.id, AuthClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Authenticating', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Authenticating', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Authenticating', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Authenticated', icon: KeyRound }, - [ClientToolCallState.error]: { text: 'Failed to authenticate', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped auth', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted auth', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Authenticating', - completedLabel: 'Authenticated', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the auth tool. - * This just marks the tool as executing - the actual auth work is done server-side - * by the auth subagent, and its output is streamed as subagent events. - */ - async execute(_args?: AuthArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(AuthClientTool.id, AuthClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/checkoff-todo.ts b/apps/sim/lib/copilot/tools/client/other/checkoff-todo.ts deleted file mode 100644 index 2a925d82d..000000000 --- a/apps/sim/lib/copilot/tools/client/other/checkoff-todo.ts +++ /dev/null @@ -1,61 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Check, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -interface CheckoffTodoArgs { - id?: string - todoId?: string -} - -export class CheckoffTodoClientTool extends BaseClientTool { - static readonly id = 'checkoff_todo' - - constructor(toolCallId: string) { - super(toolCallId, CheckoffTodoClientTool.id, CheckoffTodoClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Marking todo', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Marking todo', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Marked todo complete', icon: Check }, - [ClientToolCallState.error]: { text: 'Failed to mark todo', icon: XCircle }, - }, - } - - async execute(args?: CheckoffTodoArgs): Promise { - const logger = createLogger('CheckoffTodoClientTool') - try { - this.setState(ClientToolCallState.executing) - - const todoId = args?.id || args?.todoId - if (!todoId) { - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, 'Missing todo id') - return - } - - try { - const { useCopilotStore } = await import('@/stores/panel/copilot/store') - const store = useCopilotStore.getState() - if (store.updatePlanTodoStatus) { - store.updatePlanTodoStatus(todoId, 'completed') - } - } catch (e) { - logger.warn('Failed to update todo status in store', { message: (e as any)?.message }) - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Todo checked off', { todoId }) - this.setState(ClientToolCallState.success) - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to check off todo') - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/crawl-website.ts b/apps/sim/lib/copilot/tools/client/other/crawl-website.ts deleted file mode 100644 index 37c220d36..000000000 --- a/apps/sim/lib/copilot/tools/client/other/crawl-website.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { Globe, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class CrawlWebsiteClientTool extends BaseClientTool { - static readonly id = 'crawl_website' - - constructor(toolCallId: string) { - super(toolCallId, CrawlWebsiteClientTool.id, CrawlWebsiteClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Crawling website', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Crawling website', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Crawling website', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Crawled website', icon: Globe }, - [ClientToolCallState.error]: { text: 'Failed to crawl website', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted crawling website', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped crawling website', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.url && typeof params.url === 'string') { - const url = params.url - - switch (state) { - case ClientToolCallState.success: - return `Crawled ${url}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Crawling ${url}` - case ClientToolCallState.error: - return `Failed to crawl ${url}` - case ClientToolCallState.aborted: - return `Aborted crawling ${url}` - case ClientToolCallState.rejected: - return `Skipped crawling ${url}` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/custom-tool.ts b/apps/sim/lib/copilot/tools/client/other/custom-tool.ts deleted file mode 100644 index eab2818a8..000000000 --- a/apps/sim/lib/copilot/tools/client/other/custom-tool.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { Loader2, Wrench, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface CustomToolArgs { - instruction: string -} - -/** - * Custom tool that spawns a subagent to manage custom tools. - * This tool auto-executes and the actual work is done by the custom_tool subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class CustomToolClientTool extends BaseClientTool { - static readonly id = 'custom_tool' - - constructor(toolCallId: string) { - super(toolCallId, CustomToolClientTool.id, CustomToolClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Managing custom tool', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Managing custom tool', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Managing custom tool', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Managed custom tool', icon: Wrench }, - [ClientToolCallState.error]: { text: 'Failed custom tool', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped custom tool', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted custom tool', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Managing custom tool', - completedLabel: 'Custom tool managed', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the custom_tool tool. - * This just marks the tool as executing - the actual custom tool work is done server-side - * by the custom_tool subagent, and its output is streamed as subagent events. - */ - async execute(_args?: CustomToolArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(CustomToolClientTool.id, CustomToolClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/debug.ts b/apps/sim/lib/copilot/tools/client/other/debug.ts deleted file mode 100644 index 6be16d886..000000000 --- a/apps/sim/lib/copilot/tools/client/other/debug.ts +++ /dev/null @@ -1,60 +0,0 @@ -import { Bug, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface DebugArgs { - error_description: string - context?: string -} - -/** - * Debug tool that spawns a subagent to diagnose workflow issues. - * This tool auto-executes and the actual work is done by the debug subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class DebugClientTool extends BaseClientTool { - static readonly id = 'debug' - - constructor(toolCallId: string) { - super(toolCallId, DebugClientTool.id, DebugClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Debugged', icon: Bug }, - [ClientToolCallState.error]: { text: 'Failed to debug', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped debug', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted debug', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Debugging', - completedLabel: 'Debugged', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the debug tool. - * This just marks the tool as executing - the actual debug work is done server-side - * by the debug subagent, and its output is streamed as subagent events. - */ - async execute(_args?: DebugArgs): Promise { - // Immediately transition to executing state - no user confirmation needed - this.setState(ClientToolCallState.executing) - // The tool result will come from the server via tool_result event - // when the debug subagent completes its work - } -} - -// Register UI config at module load -registerToolUIConfig(DebugClientTool.id, DebugClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/deploy.ts b/apps/sim/lib/copilot/tools/client/other/deploy.ts deleted file mode 100644 index 80e8f8bc6..000000000 --- a/apps/sim/lib/copilot/tools/client/other/deploy.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { Loader2, Rocket, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface DeployArgs { - instruction: string -} - -/** - * Deploy tool that spawns a subagent to handle deployment. - * This tool auto-executes and the actual work is done by the deploy subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class DeployClientTool extends BaseClientTool { - static readonly id = 'deploy' - - constructor(toolCallId: string) { - super(toolCallId, DeployClientTool.id, DeployClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Deploying', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Deploying', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Deploying', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Deployed', icon: Rocket }, - [ClientToolCallState.error]: { text: 'Failed to deploy', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped deploy', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted deploy', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Deploying', - completedLabel: 'Deployed', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the deploy tool. - * This just marks the tool as executing - the actual deploy work is done server-side - * by the deploy subagent, and its output is streamed as subagent events. - */ - async execute(_args?: DeployArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(DeployClientTool.id, DeployClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/edit.ts b/apps/sim/lib/copilot/tools/client/other/edit.ts deleted file mode 100644 index 85e67a927..000000000 --- a/apps/sim/lib/copilot/tools/client/other/edit.ts +++ /dev/null @@ -1,61 +0,0 @@ -import { Loader2, Pencil, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface EditArgs { - instruction: string -} - -/** - * Edit tool that spawns a subagent to apply code/workflow edits. - * This tool auto-executes and the actual work is done by the edit subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class EditClientTool extends BaseClientTool { - static readonly id = 'edit' - - constructor(toolCallId: string) { - super(toolCallId, EditClientTool.id, EditClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Editing', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Editing', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Editing', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Edited', icon: Pencil }, - [ClientToolCallState.error]: { text: 'Failed to apply edit', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped edit', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted edit', icon: XCircle }, - }, - uiConfig: { - isSpecial: true, - subagent: { - streamingLabel: 'Editing', - completedLabel: 'Edited', - shouldCollapse: false, // Edit subagent stays expanded - outputArtifacts: ['edit_summary'], - hideThinkingText: true, // We show WorkflowEditSummary instead - }, - }, - } - - /** - * Execute the edit tool. - * This just marks the tool as executing - the actual edit work is done server-side - * by the edit subagent, and its output is streamed as subagent events. - */ - async execute(_args?: EditArgs): Promise { - // Immediately transition to executing state - no user confirmation needed - this.setState(ClientToolCallState.executing) - // The tool result will come from the server via tool_result event - // when the edit subagent completes its work - } -} - -// Register UI config at module load -registerToolUIConfig(EditClientTool.id, EditClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/evaluate.ts b/apps/sim/lib/copilot/tools/client/other/evaluate.ts deleted file mode 100644 index eaf7f542a..000000000 --- a/apps/sim/lib/copilot/tools/client/other/evaluate.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { ClipboardCheck, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface EvaluateArgs { - instruction: string -} - -/** - * Evaluate tool that spawns a subagent to evaluate workflows or outputs. - * This tool auto-executes and the actual work is done by the evaluate subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class EvaluateClientTool extends BaseClientTool { - static readonly id = 'evaluate' - - constructor(toolCallId: string) { - super(toolCallId, EvaluateClientTool.id, EvaluateClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Evaluating', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Evaluating', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Evaluating', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Evaluated', icon: ClipboardCheck }, - [ClientToolCallState.error]: { text: 'Failed to evaluate', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped evaluation', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted evaluation', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Evaluating', - completedLabel: 'Evaluated', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the evaluate tool. - * This just marks the tool as executing - the actual evaluation work is done server-side - * by the evaluate subagent, and its output is streamed as subagent events. - */ - async execute(_args?: EvaluateArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(EvaluateClientTool.id, EvaluateClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/get-page-contents.ts b/apps/sim/lib/copilot/tools/client/other/get-page-contents.ts deleted file mode 100644 index 5b30c9111..000000000 --- a/apps/sim/lib/copilot/tools/client/other/get-page-contents.ts +++ /dev/null @@ -1,53 +0,0 @@ -import { FileText, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class GetPageContentsClientTool extends BaseClientTool { - static readonly id = 'get_page_contents' - - constructor(toolCallId: string) { - super(toolCallId, GetPageContentsClientTool.id, GetPageContentsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting page contents', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting page contents', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Getting page contents', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Retrieved page contents', icon: FileText }, - [ClientToolCallState.error]: { text: 'Failed to get page contents', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted getting page contents', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped getting page contents', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.urls && Array.isArray(params.urls) && params.urls.length > 0) { - const firstUrl = String(params.urls[0]) - const count = params.urls.length - - switch (state) { - case ClientToolCallState.success: - return count > 1 ? `Retrieved ${count} pages` : `Retrieved ${firstUrl}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return count > 1 ? `Getting ${count} pages` : `Getting ${firstUrl}` - case ClientToolCallState.error: - return count > 1 ? `Failed to get ${count} pages` : `Failed to get ${firstUrl}` - case ClientToolCallState.aborted: - return count > 1 ? `Aborted getting ${count} pages` : `Aborted getting ${firstUrl}` - case ClientToolCallState.rejected: - return count > 1 ? `Skipped getting ${count} pages` : `Skipped getting ${firstUrl}` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/info.ts b/apps/sim/lib/copilot/tools/client/other/info.ts deleted file mode 100644 index e4253a22c..000000000 --- a/apps/sim/lib/copilot/tools/client/other/info.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { Info, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface InfoArgs { - instruction: string -} - -/** - * Info tool that spawns a subagent to retrieve information. - * This tool auto-executes and the actual work is done by the info subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class InfoClientTool extends BaseClientTool { - static readonly id = 'info' - - constructor(toolCallId: string) { - super(toolCallId, InfoClientTool.id, InfoClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting info', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting info', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Getting info', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Retrieved info', icon: Info }, - [ClientToolCallState.error]: { text: 'Failed to get info', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped info', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted info', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Getting info', - completedLabel: 'Info retrieved', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the info tool. - * This just marks the tool as executing - the actual info work is done server-side - * by the info subagent, and its output is streamed as subagent events. - */ - async execute(_args?: InfoArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(InfoClientTool.id, InfoClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/knowledge.ts b/apps/sim/lib/copilot/tools/client/other/knowledge.ts deleted file mode 100644 index 25c853c71..000000000 --- a/apps/sim/lib/copilot/tools/client/other/knowledge.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { BookOpen, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface KnowledgeArgs { - instruction: string -} - -/** - * Knowledge tool that spawns a subagent to manage knowledge bases. - * This tool auto-executes and the actual work is done by the knowledge subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class KnowledgeClientTool extends BaseClientTool { - static readonly id = 'knowledge' - - constructor(toolCallId: string) { - super(toolCallId, KnowledgeClientTool.id, KnowledgeClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Managing knowledge', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Managing knowledge', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Managing knowledge', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Managed knowledge', icon: BookOpen }, - [ClientToolCallState.error]: { text: 'Failed to manage knowledge', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped knowledge', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted knowledge', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Managing knowledge', - completedLabel: 'Knowledge managed', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the knowledge tool. - * This just marks the tool as executing - the actual knowledge search work is done server-side - * by the knowledge subagent, and its output is streamed as subagent events. - */ - async execute(_args?: KnowledgeArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(KnowledgeClientTool.id, KnowledgeClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/make-api-request.ts b/apps/sim/lib/copilot/tools/client/other/make-api-request.ts deleted file mode 100644 index 051622c05..000000000 --- a/apps/sim/lib/copilot/tools/client/other/make-api-request.ts +++ /dev/null @@ -1,127 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Globe2, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' -import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas' - -interface MakeApiRequestArgs { - url: string - method: 'GET' | 'POST' | 'PUT' - queryParams?: Record - headers?: Record - body?: any -} - -export class MakeApiRequestClientTool extends BaseClientTool { - static readonly id = 'make_api_request' - - constructor(toolCallId: string) { - super(toolCallId, MakeApiRequestClientTool.id, MakeApiRequestClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Preparing API request', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Review API request', icon: Globe2 }, - [ClientToolCallState.executing]: { text: 'Executing API request', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Completed API request', icon: Globe2 }, - [ClientToolCallState.error]: { text: 'Failed to execute API request', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped API request', icon: MinusCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted API request', icon: XCircle }, - }, - interrupt: { - accept: { text: 'Execute', icon: Globe2 }, - reject: { text: 'Skip', icon: MinusCircle }, - }, - uiConfig: { - interrupt: { - accept: { text: 'Execute', icon: Globe2 }, - reject: { text: 'Skip', icon: MinusCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - paramsTable: { - columns: [ - { key: 'method', label: 'Method', width: '26%', editable: true, mono: true }, - { key: 'url', label: 'Endpoint', width: '74%', editable: true, mono: true }, - ], - extractRows: (params) => { - return [['request', (params.method || 'GET').toUpperCase(), params.url || '']] - }, - }, - }, - getDynamicText: (params, state) => { - if (params?.url && typeof params.url === 'string') { - const method = params.method || 'GET' - let url = params.url - - // Extract domain from URL for cleaner display - try { - const urlObj = new URL(url) - url = urlObj.hostname + urlObj.pathname - } catch { - // Use URL as-is if parsing fails - } - - switch (state) { - case ClientToolCallState.success: - return `${method} ${url} complete` - case ClientToolCallState.executing: - return `${method} ${url}` - case ClientToolCallState.generating: - return `Preparing ${method} ${url}` - case ClientToolCallState.pending: - return `Review ${method} ${url}` - case ClientToolCallState.error: - return `Failed ${method} ${url}` - case ClientToolCallState.rejected: - return `Skipped ${method} ${url}` - case ClientToolCallState.aborted: - return `Aborted ${method} ${url}` - } - } - return undefined - }, - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: MakeApiRequestArgs): Promise { - const logger = createLogger('MakeApiRequestClientTool') - try { - this.setState(ClientToolCallState.executing) - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'make_api_request', payload: args || {} }), - }) - if (!res.ok) { - const txt = await res.text().catch(() => '') - throw new Error(txt || `Server error (${res.status})`) - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'API request executed', parsed.result) - this.setState(ClientToolCallState.success) - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'API request failed') - } - } - - async execute(args?: MakeApiRequestArgs): Promise { - await this.handleAccept(args) - } -} - -// Register UI config at module load -registerToolUIConfig(MakeApiRequestClientTool.id, MakeApiRequestClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/mark-todo-in-progress.ts b/apps/sim/lib/copilot/tools/client/other/mark-todo-in-progress.ts deleted file mode 100644 index fbed86ea8..000000000 --- a/apps/sim/lib/copilot/tools/client/other/mark-todo-in-progress.ts +++ /dev/null @@ -1,64 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -interface MarkTodoInProgressArgs { - id?: string - todoId?: string -} - -export class MarkTodoInProgressClientTool extends BaseClientTool { - static readonly id = 'mark_todo_in_progress' - - constructor(toolCallId: string) { - super(toolCallId, MarkTodoInProgressClientTool.id, MarkTodoInProgressClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Marking todo in progress', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Marking todo in progress', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Marking todo in progress', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Marked todo in progress', icon: Loader2 }, - [ClientToolCallState.error]: { text: 'Failed to mark in progress', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted marking in progress', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped marking in progress', icon: MinusCircle }, - }, - } - - async execute(args?: MarkTodoInProgressArgs): Promise { - const logger = createLogger('MarkTodoInProgressClientTool') - try { - this.setState(ClientToolCallState.executing) - - const todoId = args?.id || args?.todoId - if (!todoId) { - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, 'Missing todo id') - return - } - - try { - const { useCopilotStore } = await import('@/stores/panel/copilot/store') - const store = useCopilotStore.getState() - if (store.updatePlanTodoStatus) { - store.updatePlanTodoStatus(todoId, 'executing') - } - } catch (e) { - logger.warn('Failed to update todo status in store', { message: (e as any)?.message }) - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Todo marked in progress', { todoId }) - this.setState(ClientToolCallState.success) - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to mark todo in progress') - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/oauth-request-access.ts b/apps/sim/lib/copilot/tools/client/other/oauth-request-access.ts deleted file mode 100644 index 725f73bc7..000000000 --- a/apps/sim/lib/copilot/tools/client/other/oauth-request-access.ts +++ /dev/null @@ -1,174 +0,0 @@ -import { createLogger } from '@sim/logger' -import { CheckCircle, Loader2, MinusCircle, PlugZap, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { OAUTH_PROVIDERS, type OAuthServiceConfig } from '@/lib/oauth' - -const logger = createLogger('OAuthRequestAccessClientTool') - -interface OAuthRequestAccessArgs { - providerName?: string -} - -interface ResolvedServiceInfo { - serviceId: string - providerId: string - service: OAuthServiceConfig -} - -/** - * Finds the service configuration from a provider name. - * The providerName should match the exact `name` field returned by get_credentials tool's notConnected services. - */ -function findServiceByName(providerName: string): ResolvedServiceInfo | null { - const normalizedName = providerName.toLowerCase().trim() - - // First pass: exact match (case-insensitive) - for (const [, providerConfig] of Object.entries(OAUTH_PROVIDERS)) { - for (const [serviceId, service] of Object.entries(providerConfig.services)) { - if (service.name.toLowerCase() === normalizedName) { - return { serviceId, providerId: service.providerId, service } - } - } - } - - // Second pass: partial match as fallback for flexibility - for (const [, providerConfig] of Object.entries(OAUTH_PROVIDERS)) { - for (const [serviceId, service] of Object.entries(providerConfig.services)) { - if ( - service.name.toLowerCase().includes(normalizedName) || - normalizedName.includes(service.name.toLowerCase()) - ) { - return { serviceId, providerId: service.providerId, service } - } - } - } - - return null -} - -export interface OAuthConnectEventDetail { - providerName: string - serviceId: string - providerId: string - requiredScopes: string[] - newScopes?: string[] -} - -export class OAuthRequestAccessClientTool extends BaseClientTool { - static readonly id = 'oauth_request_access' - - private providerName?: string - - constructor(toolCallId: string) { - super(toolCallId, OAuthRequestAccessClientTool.id, OAuthRequestAccessClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Requesting integration access', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Requesting integration access', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Requesting integration access', icon: Loader2 }, - [ClientToolCallState.rejected]: { text: 'Skipped integration access', icon: MinusCircle }, - [ClientToolCallState.success]: { text: 'Requested integration access', icon: CheckCircle }, - [ClientToolCallState.error]: { text: 'Failed to request integration access', icon: X }, - [ClientToolCallState.aborted]: { text: 'Aborted integration access request', icon: XCircle }, - }, - interrupt: { - accept: { text: 'Connect', icon: PlugZap }, - reject: { text: 'Skip', icon: MinusCircle }, - }, - getDynamicText: (params, state) => { - if (params.providerName) { - const name = params.providerName - switch (state) { - case ClientToolCallState.generating: - case ClientToolCallState.pending: - case ClientToolCallState.executing: - return `Requesting ${name} access` - case ClientToolCallState.rejected: - return `Skipped ${name} access` - case ClientToolCallState.success: - return `Requested ${name} access` - case ClientToolCallState.error: - return `Failed to request ${name} access` - case ClientToolCallState.aborted: - return `Aborted ${name} access request` - } - } - return undefined - }, - } - - async handleAccept(args?: OAuthRequestAccessArgs): Promise { - try { - if (args?.providerName) { - this.providerName = args.providerName - } - - if (!this.providerName) { - logger.error('No provider name provided') - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, 'No provider name specified') - return - } - - // Find the service by name - const serviceInfo = findServiceByName(this.providerName) - if (!serviceInfo) { - logger.error('Could not find OAuth service for provider', { - providerName: this.providerName, - }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, `Unknown provider: ${this.providerName}`) - return - } - - const { serviceId, providerId, service } = serviceInfo - - logger.info('Opening OAuth connect modal', { - providerName: this.providerName, - serviceId, - providerId, - }) - - // Move to executing state - this.setState(ClientToolCallState.executing) - - // Dispatch event to open the OAuth modal (same pattern as open-settings) - window.dispatchEvent( - new CustomEvent('open-oauth-connect', { - detail: { - providerName: this.providerName, - serviceId, - providerId, - requiredScopes: service.scopes || [], - }, - }) - ) - - // Mark as success - the user opened the prompt, but connection is not guaranteed - this.setState(ClientToolCallState.success) - await this.markToolComplete( - 200, - `The user opened the ${this.providerName} connection prompt and may have connected. Check the connected integrations to verify the connection status.` - ) - } catch (e) { - logger.error('Failed to open OAuth connect modal', { error: e }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, 'Failed to open OAuth connection dialog') - } - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async execute(args?: OAuthRequestAccessArgs): Promise { - await this.handleAccept(args) - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/plan.ts b/apps/sim/lib/copilot/tools/client/other/plan.ts deleted file mode 100644 index 63eaad7b4..000000000 --- a/apps/sim/lib/copilot/tools/client/other/plan.ts +++ /dev/null @@ -1,59 +0,0 @@ -import { ListTodo, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface PlanArgs { - request: string -} - -/** - * Plan tool that spawns a subagent to plan an approach. - * This tool auto-executes and the actual work is done by the plan subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class PlanClientTool extends BaseClientTool { - static readonly id = 'plan' - - constructor(toolCallId: string) { - super(toolCallId, PlanClientTool.id, PlanClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Planning', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Planning', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Planning', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Planned', icon: ListTodo }, - [ClientToolCallState.error]: { text: 'Failed to plan', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped plan', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted plan', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Planning', - completedLabel: 'Planned', - shouldCollapse: true, - outputArtifacts: ['plan'], - }, - }, - } - - /** - * Execute the plan tool. - * This just marks the tool as executing - the actual planning work is done server-side - * by the plan subagent, and its output is streamed as subagent events. - */ - async execute(_args?: PlanArgs): Promise { - // Immediately transition to executing state - no user confirmation needed - this.setState(ClientToolCallState.executing) - // The tool result will come from the server via tool_result event - // when the plan subagent completes its work - } -} - -// Register UI config at module load -registerToolUIConfig(PlanClientTool.id, PlanClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/remember-debug.ts b/apps/sim/lib/copilot/tools/client/other/remember-debug.ts deleted file mode 100644 index 822ceda07..000000000 --- a/apps/sim/lib/copilot/tools/client/other/remember-debug.ts +++ /dev/null @@ -1,76 +0,0 @@ -import { CheckCircle2, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class RememberDebugClientTool extends BaseClientTool { - static readonly id = 'remember_debug' - - constructor(toolCallId: string) { - super(toolCallId, RememberDebugClientTool.id, RememberDebugClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Validating fix', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Validating fix', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Validating fix', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Validated fix', icon: CheckCircle2 }, - [ClientToolCallState.error]: { text: 'Failed to validate', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted validation', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped validation', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - const operation = params?.operation - - if (operation === 'add' || operation === 'edit') { - // For add/edit, show from problem or solution - const text = params?.problem || params?.solution - if (text && typeof text === 'string') { - switch (state) { - case ClientToolCallState.success: - return `Validated fix ${text}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Validating fix ${text}` - case ClientToolCallState.error: - return `Failed to validate fix ${text}` - case ClientToolCallState.aborted: - return `Aborted validating fix ${text}` - case ClientToolCallState.rejected: - return `Skipped validating fix ${text}` - } - } - } else if (operation === 'delete') { - // For delete, show from problem or solution (or id as fallback) - const text = params?.problem || params?.solution || params?.id - if (text && typeof text === 'string') { - switch (state) { - case ClientToolCallState.success: - return `Adjusted fix ${text}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Adjusting fix ${text}` - case ClientToolCallState.error: - return `Failed to adjust fix ${text}` - case ClientToolCallState.aborted: - return `Aborted adjusting fix ${text}` - case ClientToolCallState.rejected: - return `Skipped adjusting fix ${text}` - } - } - } - - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/research.ts b/apps/sim/lib/copilot/tools/client/other/research.ts deleted file mode 100644 index 0a10e8989..000000000 --- a/apps/sim/lib/copilot/tools/client/other/research.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { Loader2, Search, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface ResearchArgs { - instruction: string -} - -/** - * Research tool that spawns a subagent to research information. - * This tool auto-executes and the actual work is done by the research subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class ResearchClientTool extends BaseClientTool { - static readonly id = 'research' - - constructor(toolCallId: string) { - super(toolCallId, ResearchClientTool.id, ResearchClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Researching', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Researching', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Researching', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Researched', icon: Search }, - [ClientToolCallState.error]: { text: 'Failed to research', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped research', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted research', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Researching', - completedLabel: 'Researched', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the research tool. - * This just marks the tool as executing - the actual research work is done server-side - * by the research subagent, and its output is streamed as subagent events. - */ - async execute(_args?: ResearchArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(ResearchClientTool.id, ResearchClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/scrape-page.ts b/apps/sim/lib/copilot/tools/client/other/scrape-page.ts deleted file mode 100644 index 5979c9f0c..000000000 --- a/apps/sim/lib/copilot/tools/client/other/scrape-page.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { Globe, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class ScrapePageClientTool extends BaseClientTool { - static readonly id = 'scrape_page' - - constructor(toolCallId: string) { - super(toolCallId, ScrapePageClientTool.id, ScrapePageClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Scraping page', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Scraping page', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Scraping page', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Scraped page', icon: Globe }, - [ClientToolCallState.error]: { text: 'Failed to scrape page', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted scraping page', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped scraping page', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.url && typeof params.url === 'string') { - const url = params.url - - switch (state) { - case ClientToolCallState.success: - return `Scraped ${url}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Scraping ${url}` - case ClientToolCallState.error: - return `Failed to scrape ${url}` - case ClientToolCallState.aborted: - return `Aborted scraping ${url}` - case ClientToolCallState.rejected: - return `Skipped scraping ${url}` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/search-documentation.ts b/apps/sim/lib/copilot/tools/client/other/search-documentation.ts deleted file mode 100644 index cf784d3f2..000000000 --- a/apps/sim/lib/copilot/tools/client/other/search-documentation.ts +++ /dev/null @@ -1,80 +0,0 @@ -import { createLogger } from '@sim/logger' -import { BookOpen, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas' - -interface SearchDocumentationArgs { - query: string - topK?: number - threshold?: number -} - -export class SearchDocumentationClientTool extends BaseClientTool { - static readonly id = 'search_documentation' - - constructor(toolCallId: string) { - super(toolCallId, SearchDocumentationClientTool.id, SearchDocumentationClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Searching documentation', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Searching documentation', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Searching documentation', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Completed documentation search', icon: BookOpen }, - [ClientToolCallState.error]: { text: 'Failed to search docs', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted documentation search', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped documentation search', icon: MinusCircle }, - }, - getDynamicText: (params, state) => { - if (params?.query && typeof params.query === 'string') { - const query = params.query - - switch (state) { - case ClientToolCallState.success: - return `Searched docs for ${query}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Searching docs for ${query}` - case ClientToolCallState.error: - return `Failed to search docs for ${query}` - case ClientToolCallState.aborted: - return `Aborted searching docs for ${query}` - case ClientToolCallState.rejected: - return `Skipped searching docs for ${query}` - } - } - return undefined - }, - } - - async execute(args?: SearchDocumentationArgs): Promise { - const logger = createLogger('SearchDocumentationClientTool') - try { - this.setState(ClientToolCallState.executing) - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'search_documentation', payload: args || {} }), - }) - if (!res.ok) { - const txt = await res.text().catch(() => '') - throw new Error(txt || `Server error (${res.status})`) - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Documentation search complete', parsed.result) - this.setState(ClientToolCallState.success) - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Documentation search failed') - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/search-errors.ts b/apps/sim/lib/copilot/tools/client/other/search-errors.ts deleted file mode 100644 index d0eb6cc35..000000000 --- a/apps/sim/lib/copilot/tools/client/other/search-errors.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { Bug, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class SearchErrorsClientTool extends BaseClientTool { - static readonly id = 'search_errors' - - constructor(toolCallId: string) { - super(toolCallId, SearchErrorsClientTool.id, SearchErrorsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Debugging', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Debugged', icon: Bug }, - [ClientToolCallState.error]: { text: 'Failed to debug', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted debugging', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped debugging', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.query && typeof params.query === 'string') { - const query = params.query - - switch (state) { - case ClientToolCallState.success: - return `Debugged ${query}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Debugging ${query}` - case ClientToolCallState.error: - return `Failed to debug ${query}` - case ClientToolCallState.aborted: - return `Aborted debugging ${query}` - case ClientToolCallState.rejected: - return `Skipped debugging ${query}` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/search-library-docs.ts b/apps/sim/lib/copilot/tools/client/other/search-library-docs.ts deleted file mode 100644 index 7dcff295b..000000000 --- a/apps/sim/lib/copilot/tools/client/other/search-library-docs.ts +++ /dev/null @@ -1,50 +0,0 @@ -import { BookOpen, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class SearchLibraryDocsClientTool extends BaseClientTool { - static readonly id = 'search_library_docs' - - constructor(toolCallId: string) { - super(toolCallId, SearchLibraryDocsClientTool.id, SearchLibraryDocsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Reading docs', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Reading docs', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Reading docs', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Read docs', icon: BookOpen }, - [ClientToolCallState.error]: { text: 'Failed to read docs', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted reading docs', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped reading docs', icon: MinusCircle }, - }, - getDynamicText: (params, state) => { - const libraryName = params?.library_name - if (libraryName && typeof libraryName === 'string') { - switch (state) { - case ClientToolCallState.success: - return `Read ${libraryName} docs` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Reading ${libraryName} docs` - case ClientToolCallState.error: - return `Failed to read ${libraryName} docs` - case ClientToolCallState.aborted: - return `Aborted reading ${libraryName} docs` - case ClientToolCallState.rejected: - return `Skipped reading ${libraryName} docs` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/search-online.ts b/apps/sim/lib/copilot/tools/client/other/search-online.ts deleted file mode 100644 index 083658468..000000000 --- a/apps/sim/lib/copilot/tools/client/other/search-online.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { Globe, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class SearchOnlineClientTool extends BaseClientTool { - static readonly id = 'search_online' - - constructor(toolCallId: string) { - super(toolCallId, SearchOnlineClientTool.id, SearchOnlineClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Searching online', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Searching online', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Searching online', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Completed online search', icon: Globe }, - [ClientToolCallState.error]: { text: 'Failed to search online', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped online search', icon: MinusCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted online search', icon: XCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.query && typeof params.query === 'string') { - const query = params.query - - switch (state) { - case ClientToolCallState.success: - return `Searched online for ${query}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Searching online for ${query}` - case ClientToolCallState.error: - return `Failed to search online for ${query}` - case ClientToolCallState.aborted: - return `Aborted searching online for ${query}` - case ClientToolCallState.rejected: - return `Skipped searching online for ${query}` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/search-patterns.ts b/apps/sim/lib/copilot/tools/client/other/search-patterns.ts deleted file mode 100644 index e16785a70..000000000 --- a/apps/sim/lib/copilot/tools/client/other/search-patterns.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { Loader2, MinusCircle, Search, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -export class SearchPatternsClientTool extends BaseClientTool { - static readonly id = 'search_patterns' - - constructor(toolCallId: string) { - super(toolCallId, SearchPatternsClientTool.id, SearchPatternsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Searching workflow patterns', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Searching workflow patterns', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Searching workflow patterns', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Found workflow patterns', icon: Search }, - [ClientToolCallState.error]: { text: 'Failed to search patterns', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted pattern search', icon: MinusCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped pattern search', icon: MinusCircle }, - }, - interrupt: undefined, - getDynamicText: (params, state) => { - if (params?.queries && Array.isArray(params.queries) && params.queries.length > 0) { - const firstQuery = String(params.queries[0]) - - switch (state) { - case ClientToolCallState.success: - return `Searched ${firstQuery}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Searching ${firstQuery}` - case ClientToolCallState.error: - return `Failed to search ${firstQuery}` - case ClientToolCallState.aborted: - return `Aborted searching ${firstQuery}` - case ClientToolCallState.rejected: - return `Skipped searching ${firstQuery}` - } - } - return undefined - }, - } - - async execute(): Promise { - return - } -} diff --git a/apps/sim/lib/copilot/tools/client/other/sleep.ts b/apps/sim/lib/copilot/tools/client/other/sleep.ts deleted file mode 100644 index 91949ea81..000000000 --- a/apps/sim/lib/copilot/tools/client/other/sleep.ts +++ /dev/null @@ -1,157 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, MinusCircle, Moon, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -/** Maximum sleep duration in seconds (3 minutes) */ -const MAX_SLEEP_SECONDS = 180 - -/** Track sleep start times for calculating elapsed time on wake */ -const sleepStartTimes: Record = {} - -interface SleepArgs { - seconds?: number -} - -/** - * Format seconds into a human-readable duration string - */ -function formatDuration(seconds: number): string { - if (seconds >= 60) { - return `${Math.round(seconds / 60)} minute${seconds >= 120 ? 's' : ''}` - } - return `${seconds} second${seconds !== 1 ? 's' : ''}` -} - -export class SleepClientTool extends BaseClientTool { - static readonly id = 'sleep' - - constructor(toolCallId: string) { - super(toolCallId, SleepClientTool.id, SleepClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Preparing to sleep', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Sleeping', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Sleeping', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Finished sleeping', icon: Moon }, - [ClientToolCallState.error]: { text: 'Interrupted sleep', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped sleep', icon: MinusCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted sleep', icon: MinusCircle }, - [ClientToolCallState.background]: { text: 'Resumed', icon: Moon }, - }, - uiConfig: { - secondaryAction: { - text: 'Wake', - title: 'Wake', - variant: 'tertiary', - showInStates: [ClientToolCallState.executing], - targetState: ClientToolCallState.background, - }, - }, - // No interrupt - auto-execute immediately - getDynamicText: (params, state) => { - const seconds = params?.seconds - if (typeof seconds === 'number' && seconds > 0) { - const displayTime = formatDuration(seconds) - switch (state) { - case ClientToolCallState.success: - return `Slept for ${displayTime}` - case ClientToolCallState.executing: - case ClientToolCallState.pending: - return `Sleeping for ${displayTime}` - case ClientToolCallState.generating: - return `Preparing to sleep for ${displayTime}` - case ClientToolCallState.error: - return `Failed to sleep for ${displayTime}` - case ClientToolCallState.rejected: - return `Skipped sleeping for ${displayTime}` - case ClientToolCallState.aborted: - return `Aborted sleeping for ${displayTime}` - case ClientToolCallState.background: { - // Calculate elapsed time from when sleep started - const elapsedSeconds = params?._elapsedSeconds - if (typeof elapsedSeconds === 'number' && elapsedSeconds > 0) { - return `Resumed after ${formatDuration(Math.round(elapsedSeconds))}` - } - return 'Resumed early' - } - } - } - return undefined - }, - } - - /** - * Get elapsed seconds since sleep started - */ - getElapsedSeconds(): number { - const startTime = sleepStartTimes[this.toolCallId] - if (!startTime) return 0 - return (Date.now() - startTime) / 1000 - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: SleepArgs): Promise { - const logger = createLogger('SleepClientTool') - - // Use a timeout slightly longer than max sleep (3 minutes + buffer) - const timeoutMs = (MAX_SLEEP_SECONDS + 30) * 1000 - - await this.executeWithTimeout(async () => { - const params = args || {} - logger.debug('handleAccept() called', { - toolCallId: this.toolCallId, - state: this.getState(), - hasArgs: !!args, - seconds: params.seconds, - }) - - // Validate and clamp seconds - let seconds = typeof params.seconds === 'number' ? params.seconds : 0 - if (seconds < 0) seconds = 0 - if (seconds > MAX_SLEEP_SECONDS) seconds = MAX_SLEEP_SECONDS - - logger.debug('Starting sleep', { seconds }) - - // Track start time for elapsed calculation - sleepStartTimes[this.toolCallId] = Date.now() - - this.setState(ClientToolCallState.executing) - - try { - // Sleep for the specified duration - await new Promise((resolve) => setTimeout(resolve, seconds * 1000)) - - logger.debug('Sleep completed successfully') - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, `Slept for ${seconds} seconds`) - } catch (error) { - const message = error instanceof Error ? error.message : String(error) - logger.error('Sleep failed', { error: message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, message) - } finally { - // Clean up start time tracking - delete sleepStartTimes[this.toolCallId] - } - }, timeoutMs) - } - - async execute(args?: SleepArgs): Promise { - // Auto-execute without confirmation - go straight to executing - await this.handleAccept(args) - } -} - -// Register UI config at module load -registerToolUIConfig(SleepClientTool.id, SleepClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/superagent.ts b/apps/sim/lib/copilot/tools/client/other/superagent.ts deleted file mode 100644 index 99ec1fbfe..000000000 --- a/apps/sim/lib/copilot/tools/client/other/superagent.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { Loader2, Sparkles, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface SuperagentArgs { - instruction: string -} - -/** - * Superagent tool that spawns a powerful subagent for complex tasks. - * This tool auto-executes and the actual work is done by the superagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class SuperagentClientTool extends BaseClientTool { - static readonly id = 'superagent' - - constructor(toolCallId: string) { - super(toolCallId, SuperagentClientTool.id, SuperagentClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Superagent working', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Superagent working', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Superagent working', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Superagent completed', icon: Sparkles }, - [ClientToolCallState.error]: { text: 'Superagent failed', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Superagent skipped', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Superagent aborted', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Superagent working', - completedLabel: 'Superagent completed', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the superagent tool. - * This just marks the tool as executing - the actual work is done server-side - * by the superagent, and its output is streamed as subagent events. - */ - async execute(_args?: SuperagentArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(SuperagentClientTool.id, SuperagentClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/test.ts b/apps/sim/lib/copilot/tools/client/other/test.ts deleted file mode 100644 index 3aa698aad..000000000 --- a/apps/sim/lib/copilot/tools/client/other/test.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { FlaskConical, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface TestArgs { - instruction: string -} - -/** - * Test tool that spawns a subagent to run tests. - * This tool auto-executes and the actual work is done by the test subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class TestClientTool extends BaseClientTool { - static readonly id = 'test' - - constructor(toolCallId: string) { - super(toolCallId, TestClientTool.id, TestClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Testing', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Testing', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Testing', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Tested', icon: FlaskConical }, - [ClientToolCallState.error]: { text: 'Failed to test', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped test', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted test', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Testing', - completedLabel: 'Tested', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the test tool. - * This just marks the tool as executing - the actual test work is done server-side - * by the test subagent, and its output is streamed as subagent events. - */ - async execute(_args?: TestArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(TestClientTool.id, TestClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/tour.ts b/apps/sim/lib/copilot/tools/client/other/tour.ts deleted file mode 100644 index 8faca5587..000000000 --- a/apps/sim/lib/copilot/tools/client/other/tour.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { Compass, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface TourArgs { - instruction: string -} - -/** - * Tour tool that spawns a subagent to guide the user. - * This tool auto-executes and the actual work is done by the tour subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class TourClientTool extends BaseClientTool { - static readonly id = 'tour' - - constructor(toolCallId: string) { - super(toolCallId, TourClientTool.id, TourClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Touring', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Touring', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Touring', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Completed tour', icon: Compass }, - [ClientToolCallState.error]: { text: 'Failed tour', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped tour', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted tour', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Touring', - completedLabel: 'Tour complete', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the tour tool. - * This just marks the tool as executing - the actual tour work is done server-side - * by the tour subagent, and its output is streamed as subagent events. - */ - async execute(_args?: TourArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(TourClientTool.id, TourClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/other/workflow.ts b/apps/sim/lib/copilot/tools/client/other/workflow.ts deleted file mode 100644 index 5b99e73e9..000000000 --- a/apps/sim/lib/copilot/tools/client/other/workflow.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { GitBranch, Loader2, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' - -interface WorkflowArgs { - instruction: string -} - -/** - * Workflow tool that spawns a subagent to manage workflows. - * This tool auto-executes and the actual work is done by the workflow subagent. - * The subagent's output is streamed as nested content under this tool call. - */ -export class WorkflowClientTool extends BaseClientTool { - static readonly id = 'workflow' - - constructor(toolCallId: string) { - super(toolCallId, WorkflowClientTool.id, WorkflowClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Managing workflow', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Managing workflow', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Managing workflow', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Managed workflow', icon: GitBranch }, - [ClientToolCallState.error]: { text: 'Failed to manage workflow', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped workflow', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted workflow', icon: XCircle }, - }, - uiConfig: { - subagent: { - streamingLabel: 'Managing workflow', - completedLabel: 'Workflow managed', - shouldCollapse: true, - outputArtifacts: [], - }, - }, - } - - /** - * Execute the workflow tool. - * This just marks the tool as executing - the actual workflow work is done server-side - * by the workflow subagent, and its output is streamed as subagent events. - */ - async execute(_args?: WorkflowArgs): Promise { - this.setState(ClientToolCallState.executing) - } -} - -// Register UI config at module load -registerToolUIConfig(WorkflowClientTool.id, WorkflowClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/registry.ts b/apps/sim/lib/copilot/tools/client/registry.ts deleted file mode 100644 index 7dfb757aa..000000000 --- a/apps/sim/lib/copilot/tools/client/registry.ts +++ /dev/null @@ -1,34 +0,0 @@ -import { createLogger } from '@sim/logger' -import type { ClientToolDefinition, ToolExecutionContext } from '@/lib/copilot/tools/client/types' - -const logger = createLogger('ClientToolRegistry') - -const tools: Record> = {} - -export function registerTool(def: ClientToolDefinition) { - tools[def.name] = def -} - -export function getTool(name: string): ClientToolDefinition | undefined { - return tools[name] -} - -export function createExecutionContext(params: { - toolCallId: string - toolName: string -}): ToolExecutionContext { - const { toolCallId, toolName } = params - return { - toolCallId, - toolName, - log: (level, message, extra) => { - try { - logger[level](message, { toolCallId, toolName, ...(extra || {}) }) - } catch {} - }, - } -} - -export function getRegisteredTools(): Record> { - return { ...tools } -} diff --git a/apps/sim/lib/copilot/tools/client/tool-display-registry.ts b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts new file mode 100644 index 000000000..137ab0821 --- /dev/null +++ b/apps/sim/lib/copilot/tools/client/tool-display-registry.ts @@ -0,0 +1,2604 @@ +import type { LucideIcon } from 'lucide-react' +import { + Blocks, + BookOpen, + Bug, + Check, + CheckCircle, + CheckCircle2, + ClipboardCheck, + Compass, + Database, + FileCode, + FileText, + FlaskConical, + GitBranch, + Globe, + Globe2, + Grid2x2, + Grid2x2Check, + Grid2x2X, + Info, + Key, + KeyRound, + ListChecks, + ListFilter, + ListTodo, + Loader2, + MessageSquare, + MinusCircle, + Moon, + Navigation, + Pencil, + PencilLine, + Play, + PlugZap, + Plus, + Rocket, + Search, + Server, + Settings2, + Sparkles, + Tag, + TerminalSquare, + WorkflowIcon, + Wrench, + X, + XCircle, + Zap, +} from 'lucide-react' +import { getLatestBlock } from '@/blocks/registry' +import { getCustomTool } from '@/hooks/queries/custom-tools' +import { useWorkflowRegistry } from '@/stores/workflows/registry/store' +import { useWorkflowStore } from '@/stores/workflows/workflow/store' + +/** Resolve a block ID to its human-readable name from the workflow store. */ +function resolveBlockName(blockId: string | undefined): string | undefined { + if (!blockId) return undefined + try { + const blocks = useWorkflowStore.getState().blocks + return blocks[blockId]?.name || undefined + } catch { + return undefined + } +} + +export enum ClientToolCallState { + generating = 'generating', + pending = 'pending', + executing = 'executing', + aborted = 'aborted', + rejected = 'rejected', + success = 'success', + error = 'error', + review = 'review', + background = 'background', +} + +export interface ClientToolDisplay { + text: string + icon: LucideIcon +} + +export type DynamicTextFormatter = ( + params: Record, + state: ClientToolCallState +) => string | undefined + +export interface ToolUIConfig { + isSpecial?: boolean + subagent?: boolean + interrupt?: boolean + customRenderer?: string + paramsTable?: any + dynamicText?: DynamicTextFormatter + secondaryAction?: any + alwaysExpanded?: boolean + subagentLabels?: { + streaming: string + completed: string + } +} + +interface ToolMetadata { + displayNames: Partial> + interrupt?: { + accept: ClientToolDisplay + reject: ClientToolDisplay + } + getDynamicText?: DynamicTextFormatter + uiConfig?: { + isSpecial?: boolean + subagent?: { + streamingLabel?: string + completedLabel?: string + shouldCollapse?: boolean + outputArtifacts?: string[] + hideThinkingText?: boolean + } + interrupt?: any + customRenderer?: string + paramsTable?: any + secondaryAction?: any + alwaysExpanded?: boolean + } +} + +interface ToolDisplayEntry { + displayNames: Partial> + uiConfig?: ToolUIConfig +} + +type WorkflowDataType = 'global_variables' | 'custom_tools' | 'mcp_tools' | 'files' + +type NavigationDestination = 'workflow' | 'logs' | 'templates' | 'vector_db' | 'settings' + +function formatDuration(seconds: number): string { + if (seconds < 60) return `${Math.round(seconds)}s` + const mins = Math.floor(seconds / 60) + const secs = Math.round(seconds % 60) + if (mins < 60) return secs > 0 ? `${mins}m ${secs}s` : `${mins}m` + const hours = Math.floor(mins / 60) + const remMins = mins % 60 + if (remMins > 0) return `${hours}h ${remMins}m` + return `${hours}h` +} + +function toUiConfig(metadata?: ToolMetadata): ToolUIConfig | undefined { + const legacy = metadata?.uiConfig + const subagent = legacy?.subagent + const dynamicText = metadata?.getDynamicText + // Check both nested uiConfig.interrupt AND top-level interrupt + const hasInterrupt = !!legacy?.interrupt || !!metadata?.interrupt + if (!legacy && !dynamicText && !hasInterrupt) return undefined + + const config: ToolUIConfig = { + isSpecial: legacy?.isSpecial === true, + subagent: !!legacy?.subagent, + interrupt: hasInterrupt, + customRenderer: legacy?.customRenderer, + paramsTable: legacy?.paramsTable, + dynamicText, + secondaryAction: legacy?.secondaryAction, + alwaysExpanded: legacy?.alwaysExpanded, + } + + if (subagent?.streamingLabel || subagent?.completedLabel) { + config.subagentLabels = { + streaming: subagent.streamingLabel || '', + completed: subagent.completedLabel || '', + } + } + + return config +} + +function toToolDisplayEntry(metadata?: ToolMetadata): ToolDisplayEntry { + return { + displayNames: metadata?.displayNames || {}, + uiConfig: toUiConfig(metadata), + } +} + +const META_auth: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Authenticating', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Authenticating', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Authenticating', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Authenticated', icon: KeyRound }, + [ClientToolCallState.error]: { text: 'Failed to authenticate', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped auth', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted auth', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Authenticating', + completedLabel: 'Authenticated', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} + +const META_check_deployment_status: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Checking deployment status', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Checking deployment status', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Checking deployment status', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Checked deployment status', icon: Rocket }, + [ClientToolCallState.error]: { text: 'Failed to check deployment status', icon: X }, + [ClientToolCallState.aborted]: { + text: 'Aborted checking deployment status', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped checking deployment status', + icon: XCircle, + }, + }, + interrupt: undefined, +} + +const META_checkoff_todo: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Marking todo', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Marking todo', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Marked todo complete', icon: Check }, + [ClientToolCallState.error]: { text: 'Failed to mark todo', icon: XCircle }, + }, +} + +const META_crawl_website: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Crawling website', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Crawling website', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Crawling website', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Crawled website', icon: Globe }, + [ClientToolCallState.error]: { text: 'Failed to crawl website', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted crawling website', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped crawling website', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.url && typeof params.url === 'string') { + const url = params.url + + switch (state) { + case ClientToolCallState.success: + return `Crawled ${url}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Crawling ${url}` + case ClientToolCallState.error: + return `Failed to crawl ${url}` + case ClientToolCallState.aborted: + return `Aborted crawling ${url}` + case ClientToolCallState.rejected: + return `Skipped crawling ${url}` + } + } + return undefined + }, +} + +const META_create_workspace_mcp_server: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to create MCP server', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Create MCP server?', icon: Server }, + [ClientToolCallState.executing]: { text: 'Creating MCP server', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Created MCP server', icon: Server }, + [ClientToolCallState.error]: { text: 'Failed to create MCP server', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted creating MCP server', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped creating MCP server', icon: XCircle }, + }, + interrupt: { + accept: { text: 'Create', icon: Plus }, + reject: { text: 'Skip', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const name = params?.name || 'MCP server' + switch (state) { + case ClientToolCallState.success: + return `Created MCP server "${name}"` + case ClientToolCallState.executing: + return `Creating MCP server "${name}"` + case ClientToolCallState.generating: + return `Preparing to create "${name}"` + case ClientToolCallState.pending: + return `Create MCP server "${name}"?` + case ClientToolCallState.error: + return `Failed to create "${name}"` + } + return undefined + }, +} + +const META_custom_tool: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Managing custom tool', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Managing custom tool', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Managing custom tool', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Managed custom tool', icon: Wrench }, + [ClientToolCallState.error]: { text: 'Failed custom tool', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped custom tool', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted custom tool', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Managing custom tool', + completedLabel: 'Custom tool managed', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} + +const META_build: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Building', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Building', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Building', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Built', icon: Wrench }, + [ClientToolCallState.error]: { text: 'Failed to build', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped build', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted build', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Building', + completedLabel: 'Built', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} + +const META_debug: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Debugged', icon: Bug }, + [ClientToolCallState.error]: { text: 'Failed to debug', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped debug', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted debug', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Debugging', + completedLabel: 'Debugged', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} + +const META_discovery: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Discovering', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Discovering', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Discovering', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Discovered', icon: Search }, + [ClientToolCallState.error]: { text: 'Failed to discover', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped discovery', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted discovery', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Discovering', + completedLabel: 'Discovered', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} + +const META_deploy: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Deploying', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Deploying', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Deploying', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Deployed', icon: Rocket }, + [ClientToolCallState.error]: { text: 'Failed to deploy', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped deploy', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted deploy', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Deploying', + completedLabel: 'Deployed', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} + +const META_deploy_api: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to deploy API', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Deploy as API?', icon: Rocket }, + [ClientToolCallState.executing]: { text: 'Deploying API', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Deployed API', icon: Rocket }, + [ClientToolCallState.error]: { text: 'Failed to deploy API', icon: XCircle }, + [ClientToolCallState.aborted]: { + text: 'Aborted deploying API', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped deploying API', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Deploy', icon: Rocket }, + reject: { text: 'Skip', icon: XCircle }, + }, + uiConfig: { + isSpecial: true, + interrupt: { + accept: { text: 'Deploy', icon: Rocket }, + reject: { text: 'Skip', icon: XCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + }, + getDynamicText: (params, state) => { + const action = params?.action === 'undeploy' ? 'undeploy' : 'deploy' + + const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId + const isAlreadyDeployed = workflowId + ? useWorkflowRegistry.getState().getWorkflowDeploymentStatus(workflowId)?.isDeployed + : false + + let actionText = action + let actionTextIng = action === 'undeploy' ? 'undeploying' : 'deploying' + const actionTextPast = action === 'undeploy' ? 'undeployed' : 'deployed' + + if (action === 'deploy' && isAlreadyDeployed) { + actionText = 'redeploy' + actionTextIng = 'redeploying' + } + + const actionCapitalized = actionText.charAt(0).toUpperCase() + actionText.slice(1) + + switch (state) { + case ClientToolCallState.success: + return `API ${actionTextPast}` + case ClientToolCallState.executing: + return `${actionCapitalized}ing API` + case ClientToolCallState.generating: + return `Preparing to ${actionText} API` + case ClientToolCallState.pending: + return `${actionCapitalized} API?` + case ClientToolCallState.error: + return `Failed to ${actionText} API` + case ClientToolCallState.aborted: + return `Aborted ${actionTextIng} API` + case ClientToolCallState.rejected: + return `Skipped ${actionTextIng} API` + } + return undefined + }, +} + +const META_deploy_chat: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to deploy chat', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Deploy as chat?', icon: MessageSquare }, + [ClientToolCallState.executing]: { text: 'Deploying chat', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Deployed chat', icon: MessageSquare }, + [ClientToolCallState.error]: { text: 'Failed to deploy chat', icon: XCircle }, + [ClientToolCallState.aborted]: { + text: 'Aborted deploying chat', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped deploying chat', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Deploy Chat', icon: MessageSquare }, + reject: { text: 'Skip', icon: XCircle }, + }, + uiConfig: { + isSpecial: true, + interrupt: { + accept: { text: 'Deploy Chat', icon: MessageSquare }, + reject: { text: 'Skip', icon: XCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + }, + getDynamicText: (params, state) => { + const action = params?.action === 'undeploy' ? 'undeploy' : 'deploy' + + switch (state) { + case ClientToolCallState.success: + return action === 'undeploy' ? 'Chat undeployed' : 'Chat deployed' + case ClientToolCallState.executing: + return action === 'undeploy' ? 'Undeploying chat' : 'Deploying chat' + case ClientToolCallState.generating: + return `Preparing to ${action} chat` + case ClientToolCallState.pending: + return action === 'undeploy' ? 'Undeploy chat?' : 'Deploy as chat?' + case ClientToolCallState.error: + return `Failed to ${action} chat` + case ClientToolCallState.aborted: + return action === 'undeploy' ? 'Aborted undeploying chat' : 'Aborted deploying chat' + case ClientToolCallState.rejected: + return action === 'undeploy' ? 'Skipped undeploying chat' : 'Skipped deploying chat' + } + return undefined + }, +} + +const META_deploy_mcp: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to deploy to MCP', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Deploy to MCP server?', icon: Server }, + [ClientToolCallState.executing]: { text: 'Deploying to MCP', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Deployed to MCP', icon: Server }, + [ClientToolCallState.error]: { text: 'Failed to deploy to MCP', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted MCP deployment', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped MCP deployment', icon: XCircle }, + }, + interrupt: { + accept: { text: 'Deploy', icon: Server }, + reject: { text: 'Skip', icon: XCircle }, + }, + uiConfig: { + isSpecial: true, + interrupt: { + accept: { text: 'Deploy', icon: Server }, + reject: { text: 'Skip', icon: XCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + }, + getDynamicText: (params, state) => { + const toolName = params?.toolName || 'workflow' + switch (state) { + case ClientToolCallState.success: + return `Deployed "${toolName}" to MCP` + case ClientToolCallState.executing: + return `Deploying "${toolName}" to MCP` + case ClientToolCallState.generating: + return `Preparing to deploy to MCP` + case ClientToolCallState.pending: + return `Deploy "${toolName}" to MCP?` + case ClientToolCallState.error: + return `Failed to deploy to MCP` + } + return undefined + }, +} + +const META_edit: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Editing', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Editing', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Editing', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Edited', icon: Pencil }, + [ClientToolCallState.error]: { text: 'Failed to apply edit', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped edit', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted edit', icon: XCircle }, + }, + uiConfig: { + isSpecial: true, + subagent: { + streamingLabel: 'Editing', + completedLabel: 'Edited', + shouldCollapse: false, // Edit subagent stays expanded + outputArtifacts: ['edit_summary'], + hideThinkingText: true, // We show WorkflowEditSummary instead + }, + }, +} + +const META_edit_workflow: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Editing your workflow', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Editing your workflow', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Edited your workflow', icon: Grid2x2Check }, + [ClientToolCallState.error]: { text: 'Failed to edit your workflow', icon: XCircle }, + [ClientToolCallState.review]: { text: 'Review your workflow changes', icon: Grid2x2 }, + [ClientToolCallState.rejected]: { text: 'Rejected workflow changes', icon: Grid2x2X }, + [ClientToolCallState.aborted]: { text: 'Aborted editing your workflow', icon: MinusCircle }, + [ClientToolCallState.pending]: { text: 'Editing your workflow', icon: Loader2 }, + }, + uiConfig: { + isSpecial: true, + customRenderer: 'edit_summary', + }, +} + +const META_evaluate: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Evaluating', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Evaluating', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Evaluating', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Evaluated', icon: ClipboardCheck }, + [ClientToolCallState.error]: { text: 'Failed to evaluate', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped evaluation', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted evaluation', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Evaluating', + completedLabel: 'Evaluated', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} + +const META_get_block_config: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting block config', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting block config', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Getting block config', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Retrieved block config', icon: FileCode }, + [ClientToolCallState.error]: { text: 'Failed to get block config', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted getting block config', icon: XCircle }, + [ClientToolCallState.rejected]: { + text: 'Skipped getting block config', + icon: MinusCircle, + }, + }, + getDynamicText: (params, state) => { + if (params?.blockType && typeof params.blockType === 'string') { + const blockConfig = getLatestBlock(params.blockType) + const blockName = (blockConfig?.name ?? params.blockType.replace(/_/g, ' ')).toLowerCase() + const opSuffix = params.operation ? ` (${params.operation})` : '' + + switch (state) { + case ClientToolCallState.success: + return `Retrieved ${blockName}${opSuffix} config` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Retrieving ${blockName}${opSuffix} config` + case ClientToolCallState.error: + return `Failed to retrieve ${blockName}${opSuffix} config` + case ClientToolCallState.aborted: + return `Aborted retrieving ${blockName}${opSuffix} config` + case ClientToolCallState.rejected: + return `Skipped retrieving ${blockName}${opSuffix} config` + } + } + return undefined + }, +} + +const META_get_block_options: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting block operations', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting block operations', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Getting block operations', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Retrieved block operations', icon: ListFilter }, + [ClientToolCallState.error]: { text: 'Failed to get block operations', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted getting block operations', icon: XCircle }, + [ClientToolCallState.rejected]: { + text: 'Skipped getting block operations', + icon: MinusCircle, + }, + }, + getDynamicText: (params, state) => { + const blockId = + (params as any)?.blockId || + (params as any)?.blockType || + (params as any)?.block_id || + (params as any)?.block_type + if (typeof blockId === 'string') { + const blockConfig = getLatestBlock(blockId) + const blockName = (blockConfig?.name ?? blockId.replace(/_/g, ' ')).toLowerCase() + + switch (state) { + case ClientToolCallState.success: + return `Retrieved ${blockName} operations` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Retrieving ${blockName} operations` + case ClientToolCallState.error: + return `Failed to retrieve ${blockName} operations` + case ClientToolCallState.aborted: + return `Aborted retrieving ${blockName} operations` + case ClientToolCallState.rejected: + return `Skipped retrieving ${blockName} operations` + } + } + return undefined + }, +} + +const META_get_block_outputs: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting block outputs', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting block outputs', icon: Tag }, + [ClientToolCallState.executing]: { text: 'Getting block outputs', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted getting outputs', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Retrieved block outputs', icon: Tag }, + [ClientToolCallState.error]: { text: 'Failed to get outputs', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped getting outputs', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const blockIds = params?.blockIds + if (blockIds && Array.isArray(blockIds) && blockIds.length > 0) { + const count = blockIds.length + switch (state) { + case ClientToolCallState.success: + return `Retrieved outputs for ${count} block${count > 1 ? 's' : ''}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Getting outputs for ${count} block${count > 1 ? 's' : ''}` + case ClientToolCallState.error: + return `Failed to get outputs for ${count} block${count > 1 ? 's' : ''}` + } + } + return undefined + }, +} + +const META_get_block_upstream_references: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting upstream references', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting upstream references', icon: GitBranch }, + [ClientToolCallState.executing]: { text: 'Getting upstream references', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted getting references', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Retrieved upstream references', icon: GitBranch }, + [ClientToolCallState.error]: { text: 'Failed to get references', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped getting references', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const blockIds = params?.blockIds + if (blockIds && Array.isArray(blockIds) && blockIds.length > 0) { + const count = blockIds.length + switch (state) { + case ClientToolCallState.success: + return `Retrieved references for ${count} block${count > 1 ? 's' : ''}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Getting references for ${count} block${count > 1 ? 's' : ''}` + case ClientToolCallState.error: + return `Failed to get references for ${count} block${count > 1 ? 's' : ''}` + } + } + return undefined + }, +} + +const META_get_blocks_and_tools: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Exploring available options', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Exploring available options', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Exploring available options', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Explored available options', icon: Blocks }, + [ClientToolCallState.error]: { text: 'Failed to explore options', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted exploring options', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped exploring options', icon: MinusCircle }, + }, + interrupt: undefined, +} + +const META_get_blocks_metadata: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Searching block choices', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Searching block choices', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Searching block choices', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Searched block choices', icon: ListFilter }, + [ClientToolCallState.error]: { text: 'Failed to search block choices', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted searching block choices', icon: XCircle }, + [ClientToolCallState.rejected]: { + text: 'Skipped searching block choices', + icon: MinusCircle, + }, + }, + getDynamicText: (params, state) => { + if (params?.blockIds && Array.isArray(params.blockIds) && params.blockIds.length > 0) { + const blockList = params.blockIds + .slice(0, 3) + .map((blockId) => blockId.replace(/_/g, ' ')) + .join(', ') + const more = params.blockIds.length > 3 ? '...' : '' + const blocks = `${blockList}${more}` + + switch (state) { + case ClientToolCallState.success: + return `Searched ${blocks}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Searching ${blocks}` + case ClientToolCallState.error: + return `Failed to search ${blocks}` + case ClientToolCallState.aborted: + return `Aborted searching ${blocks}` + case ClientToolCallState.rejected: + return `Skipped searching ${blocks}` + } + } + return undefined + }, +} + +const META_get_credentials: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Fetching connected integrations', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Fetching connected integrations', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Fetching connected integrations', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Fetched connected integrations', icon: Key }, + [ClientToolCallState.error]: { + text: 'Failed to fetch connected integrations', + icon: XCircle, + }, + [ClientToolCallState.aborted]: { + text: 'Aborted fetching connected integrations', + icon: MinusCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped fetching connected integrations', + icon: MinusCircle, + }, + }, +} + +const META_get_examples_rag: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Fetching examples', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Fetching examples', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Fetching examples', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Fetched examples', icon: Search }, + [ClientToolCallState.error]: { text: 'Failed to fetch examples', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted getting examples', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped getting examples', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.query && typeof params.query === 'string') { + const query = params.query + + switch (state) { + case ClientToolCallState.success: + return `Found examples for ${query}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Searching examples for ${query}` + case ClientToolCallState.error: + return `Failed to find examples for ${query}` + case ClientToolCallState.aborted: + return `Aborted searching examples for ${query}` + case ClientToolCallState.rejected: + return `Skipped searching examples for ${query}` + } + } + return undefined + }, +} + +const META_get_operations_examples: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Designing workflow component', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Designing workflow component', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Designing workflow component', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Designed workflow component', icon: Zap }, + [ClientToolCallState.error]: { text: 'Failed to design workflow component', icon: XCircle }, + [ClientToolCallState.aborted]: { + text: 'Aborted designing workflow component', + icon: MinusCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped designing workflow component', + icon: MinusCircle, + }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.query && typeof params.query === 'string') { + const query = params.query + + switch (state) { + case ClientToolCallState.success: + return `Designed ${query}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Designing ${query}` + case ClientToolCallState.error: + return `Failed to design ${query}` + case ClientToolCallState.aborted: + return `Aborted designing ${query}` + case ClientToolCallState.rejected: + return `Skipped designing ${query}` + } + } + return undefined + }, +} + +const META_get_platform_actions: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Viewing platform actions', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Viewing platform actions', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Viewing platform actions', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Viewed platform actions', icon: Navigation }, + [ClientToolCallState.error]: { text: 'Failed to view platform actions', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped platform actions', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted platform actions', icon: MinusCircle }, + }, +} + +const META_get_page_contents: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting page contents', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting page contents', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Getting page contents', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Retrieved page contents', icon: FileText }, + [ClientToolCallState.error]: { text: 'Failed to get page contents', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted getting page contents', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped getting page contents', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.urls && Array.isArray(params.urls) && params.urls.length > 0) { + const firstUrl = String(params.urls[0]) + const count = params.urls.length + + switch (state) { + case ClientToolCallState.success: + return count > 1 ? `Retrieved ${count} pages` : `Retrieved ${firstUrl}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return count > 1 ? `Getting ${count} pages` : `Getting ${firstUrl}` + case ClientToolCallState.error: + return count > 1 ? `Failed to get ${count} pages` : `Failed to get ${firstUrl}` + case ClientToolCallState.aborted: + return count > 1 ? `Aborted getting ${count} pages` : `Aborted getting ${firstUrl}` + case ClientToolCallState.rejected: + return count > 1 ? `Skipped getting ${count} pages` : `Skipped getting ${firstUrl}` + } + } + return undefined + }, +} + +const META_get_trigger_blocks: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Finding trigger blocks', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Finding trigger blocks', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Finding trigger blocks', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Found trigger blocks', icon: ListFilter }, + [ClientToolCallState.error]: { text: 'Failed to find trigger blocks', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted finding trigger blocks', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped finding trigger blocks', icon: MinusCircle }, + }, + interrupt: undefined, +} + +const META_get_trigger_examples: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Selecting a trigger', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Selecting a trigger', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Selecting a trigger', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Selected a trigger', icon: Zap }, + [ClientToolCallState.error]: { text: 'Failed to select a trigger', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted selecting a trigger', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped selecting a trigger', icon: MinusCircle }, + }, + interrupt: undefined, +} + +const META_get_user_workflow: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Reading your workflow', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Reading your workflow', icon: WorkflowIcon }, + [ClientToolCallState.executing]: { text: 'Reading your workflow', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted reading your workflow', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Read your workflow', icon: WorkflowIcon }, + [ClientToolCallState.error]: { text: 'Failed to read your workflow', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped reading your workflow', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId + if (workflowId) { + const workflowName = useWorkflowRegistry.getState().workflows[workflowId]?.name + if (workflowName) { + switch (state) { + case ClientToolCallState.success: + return `Read ${workflowName}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Reading ${workflowName}` + case ClientToolCallState.error: + return `Failed to read ${workflowName}` + case ClientToolCallState.aborted: + return `Aborted reading ${workflowName}` + case ClientToolCallState.rejected: + return `Skipped reading ${workflowName}` + } + } + } + return undefined + }, +} + +const META_get_workflow_console: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Fetching execution logs', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Fetching execution logs', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Fetched execution logs', icon: TerminalSquare }, + [ClientToolCallState.error]: { text: 'Failed to fetch execution logs', icon: XCircle }, + [ClientToolCallState.rejected]: { + text: 'Skipped fetching execution logs', + icon: MinusCircle, + }, + [ClientToolCallState.aborted]: { + text: 'Aborted fetching execution logs', + icon: MinusCircle, + }, + [ClientToolCallState.pending]: { text: 'Fetching execution logs', icon: Loader2 }, + }, + getDynamicText: (params, state) => { + const limit = params?.limit + if (limit && typeof limit === 'number') { + const logText = limit === 1 ? 'execution log' : 'execution logs' + + switch (state) { + case ClientToolCallState.success: + return `Fetched last ${limit} ${logText}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Fetching last ${limit} ${logText}` + case ClientToolCallState.error: + return `Failed to fetch last ${limit} ${logText}` + case ClientToolCallState.rejected: + return `Skipped fetching last ${limit} ${logText}` + case ClientToolCallState.aborted: + return `Aborted fetching last ${limit} ${logText}` + } + } + return undefined + }, +} + +const META_get_workflow_data: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Fetching workflow data', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Fetching workflow data', icon: Database }, + [ClientToolCallState.executing]: { text: 'Fetching workflow data', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted fetching data', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Retrieved workflow data', icon: Database }, + [ClientToolCallState.error]: { text: 'Failed to fetch data', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped fetching data', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const dataType = params?.data_type as WorkflowDataType | undefined + if (!dataType) return undefined + + const typeLabels: Record = { + global_variables: 'variables', + custom_tools: 'custom tools', + mcp_tools: 'MCP tools', + files: 'files', + } + + const label = typeLabels[dataType] || dataType + + switch (state) { + case ClientToolCallState.success: + return `Retrieved ${label}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + return `Fetching ${label}` + case ClientToolCallState.pending: + return `Fetch ${label}?` + case ClientToolCallState.error: + return `Failed to fetch ${label}` + case ClientToolCallState.aborted: + return `Aborted fetching ${label}` + case ClientToolCallState.rejected: + return `Skipped fetching ${label}` + } + return undefined + }, +} + +const META_get_workflow_from_name: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Reading workflow', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Reading workflow', icon: FileText }, + [ClientToolCallState.executing]: { text: 'Reading workflow', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted reading workflow', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Read workflow', icon: FileText }, + [ClientToolCallState.error]: { text: 'Failed to read workflow', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped reading workflow', icon: XCircle }, + }, + getDynamicText: (params, state) => { + if (params?.workflow_name && typeof params.workflow_name === 'string') { + const workflowName = params.workflow_name + + switch (state) { + case ClientToolCallState.success: + return `Read ${workflowName}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Reading ${workflowName}` + case ClientToolCallState.error: + return `Failed to read ${workflowName}` + case ClientToolCallState.aborted: + return `Aborted reading ${workflowName}` + case ClientToolCallState.rejected: + return `Skipped reading ${workflowName}` + } + } + return undefined + }, +} + +const META_info: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Getting info', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Getting info', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Getting info', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Retrieved info', icon: Info }, + [ClientToolCallState.error]: { text: 'Failed to get info', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped info', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted info', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Getting info', + completedLabel: 'Info retrieved', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} + +const META_knowledge: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Managing knowledge', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Managing knowledge', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Managing knowledge', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Managed knowledge', icon: BookOpen }, + [ClientToolCallState.error]: { text: 'Failed to manage knowledge', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped knowledge', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted knowledge', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Managing knowledge', + completedLabel: 'Knowledge managed', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} + +const META_knowledge_base: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Accessing knowledge base', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Access knowledge base?', icon: Database }, + [ClientToolCallState.executing]: { text: 'Accessing knowledge base', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Accessed knowledge base', icon: Database }, + [ClientToolCallState.error]: { text: 'Failed to access knowledge base', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted knowledge base access', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped knowledge base access', icon: MinusCircle }, + }, + interrupt: { + accept: { text: 'Allow', icon: Database }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + getDynamicText: (params: Record, state: ClientToolCallState) => { + const operation = params?.operation as string | undefined + const name = params?.args?.name as string | undefined + + const opVerbs: Record = { + create: { + active: 'Creating knowledge base', + past: 'Created knowledge base', + pending: name ? `Create knowledge base "${name}"?` : 'Create knowledge base?', + }, + list: { active: 'Listing knowledge bases', past: 'Listed knowledge bases' }, + get: { active: 'Getting knowledge base', past: 'Retrieved knowledge base' }, + query: { active: 'Querying knowledge base', past: 'Queried knowledge base' }, + } + const defaultVerb: { active: string; past: string; pending?: string } = { + active: 'Accessing knowledge base', + past: 'Accessed knowledge base', + } + const verb = operation ? opVerbs[operation] || defaultVerb : defaultVerb + + if (state === ClientToolCallState.success) { + return verb.past + } + if (state === ClientToolCallState.pending && verb.pending) { + return verb.pending + } + if ( + state === ClientToolCallState.generating || + state === ClientToolCallState.pending || + state === ClientToolCallState.executing + ) { + return verb.active + } + return undefined + }, +} + +const META_list_user_workflows: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Listing your workflows', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Listing your workflows', icon: ListChecks }, + [ClientToolCallState.executing]: { text: 'Listing your workflows', icon: Loader2 }, + [ClientToolCallState.aborted]: { text: 'Aborted listing workflows', icon: XCircle }, + [ClientToolCallState.success]: { text: 'Listed your workflows', icon: ListChecks }, + [ClientToolCallState.error]: { text: 'Failed to list workflows', icon: X }, + [ClientToolCallState.rejected]: { text: 'Skipped listing workflows', icon: XCircle }, + }, +} + +const META_list_workspace_mcp_servers: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Getting MCP servers', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Getting MCP servers', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Getting MCP servers', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Retrieved MCP servers', icon: Server }, + [ClientToolCallState.error]: { text: 'Failed to get MCP servers', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted getting MCP servers', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped getting MCP servers', icon: XCircle }, + }, + interrupt: undefined, +} + +const META_make_api_request: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Preparing API request', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Review API request', icon: Globe2 }, + [ClientToolCallState.executing]: { text: 'Executing API request', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Completed API request', icon: Globe2 }, + [ClientToolCallState.error]: { text: 'Failed to execute API request', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped API request', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted API request', icon: XCircle }, + }, + interrupt: { + accept: { text: 'Execute', icon: Globe2 }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + uiConfig: { + interrupt: { + accept: { text: 'Execute', icon: Globe2 }, + reject: { text: 'Skip', icon: MinusCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + paramsTable: { + columns: [ + { key: 'method', label: 'Method', width: '26%', editable: true, mono: true }, + { key: 'url', label: 'Endpoint', width: '74%', editable: true, mono: true }, + ], + extractRows: (params: Record): Array<[string, ...any[]]> => { + return [['request', (params.method || 'GET').toUpperCase(), params.url || '']] + }, + }, + }, + getDynamicText: (params, state) => { + if (params?.url && typeof params.url === 'string') { + const method = params.method || 'GET' + let url = params.url + + // Extract domain from URL for cleaner display + try { + const urlObj = new URL(url) + url = urlObj.hostname + urlObj.pathname + } catch { + // Use URL as-is if parsing fails + } + + switch (state) { + case ClientToolCallState.success: + return `${method} ${url} complete` + case ClientToolCallState.executing: + return `${method} ${url}` + case ClientToolCallState.generating: + return `Preparing ${method} ${url}` + case ClientToolCallState.pending: + return `Review ${method} ${url}` + case ClientToolCallState.error: + return `Failed ${method} ${url}` + case ClientToolCallState.rejected: + return `Skipped ${method} ${url}` + case ClientToolCallState.aborted: + return `Aborted ${method} ${url}` + } + } + return undefined + }, +} + +const META_manage_custom_tool: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Managing custom tool', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Manage custom tool?', icon: Plus }, + [ClientToolCallState.executing]: { text: 'Managing custom tool', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Managed custom tool', icon: Check }, + [ClientToolCallState.error]: { text: 'Failed to manage custom tool', icon: X }, + [ClientToolCallState.aborted]: { + text: 'Aborted managing custom tool', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped managing custom tool', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Allow', icon: Check }, + reject: { text: 'Skip', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const operation = params?.operation as 'add' | 'edit' | 'delete' | 'list' | undefined + + if (!operation) return undefined + + let toolName = params?.schema?.function?.name + if (!toolName && params?.toolId) { + try { + const tool = getCustomTool(params.toolId) + toolName = tool?.schema?.function?.name + } catch { + // Ignore errors accessing cache + } + } + + const getActionText = (verb: 'present' | 'past' | 'gerund') => { + switch (operation) { + case 'add': + return verb === 'present' ? 'Create' : verb === 'past' ? 'Created' : 'Creating' + case 'edit': + return verb === 'present' ? 'Edit' : verb === 'past' ? 'Edited' : 'Editing' + case 'delete': + return verb === 'present' ? 'Delete' : verb === 'past' ? 'Deleted' : 'Deleting' + case 'list': + return verb === 'present' ? 'List' : verb === 'past' ? 'Listed' : 'Listing' + default: + return verb === 'present' ? 'Manage' : verb === 'past' ? 'Managed' : 'Managing' + } + } + + // For add: only show tool name in past tense (success) + // For edit/delete: always show tool name + // For list: never show individual tool name, use plural + const shouldShowToolName = (currentState: ClientToolCallState) => { + if (operation === 'list') return false + if (operation === 'add') { + return currentState === ClientToolCallState.success + } + return true // edit and delete always show tool name + } + + const nameText = + operation === 'list' + ? ' custom tools' + : shouldShowToolName(state) && toolName + ? ` ${toolName}` + : ' custom tool' + + switch (state) { + case ClientToolCallState.success: + return `${getActionText('past')}${nameText}` + case ClientToolCallState.executing: + return `${getActionText('gerund')}${nameText}` + case ClientToolCallState.generating: + return `${getActionText('gerund')}${nameText}` + case ClientToolCallState.pending: + return `${getActionText('present')}${nameText}?` + case ClientToolCallState.error: + return `Failed to ${getActionText('present')?.toLowerCase()}${nameText}` + case ClientToolCallState.aborted: + return `Aborted ${getActionText('gerund')?.toLowerCase()}${nameText}` + case ClientToolCallState.rejected: + return `Skipped ${getActionText('gerund')?.toLowerCase()}${nameText}` + } + return undefined + }, +} + +const META_manage_mcp_tool: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Managing MCP tool', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Manage MCP tool?', icon: Server }, + [ClientToolCallState.executing]: { text: 'Managing MCP tool', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Managed MCP tool', icon: Check }, + [ClientToolCallState.error]: { text: 'Failed to manage MCP tool', icon: X }, + [ClientToolCallState.aborted]: { + text: 'Aborted managing MCP tool', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped managing MCP tool', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Allow', icon: Check }, + reject: { text: 'Skip', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const operation = params?.operation as 'add' | 'edit' | 'delete' | undefined + + if (!operation) return undefined + + const serverName = params?.config?.name || params?.serverName + + const getActionText = (verb: 'present' | 'past' | 'gerund') => { + switch (operation) { + case 'add': + return verb === 'present' ? 'Add' : verb === 'past' ? 'Added' : 'Adding' + case 'edit': + return verb === 'present' ? 'Edit' : verb === 'past' ? 'Edited' : 'Editing' + case 'delete': + return verb === 'present' ? 'Delete' : verb === 'past' ? 'Deleted' : 'Deleting' + } + } + + const shouldShowServerName = (currentState: ClientToolCallState) => { + if (operation === 'add') { + return currentState === ClientToolCallState.success + } + return true + } + + const nameText = shouldShowServerName(state) && serverName ? ` ${serverName}` : ' MCP tool' + + switch (state) { + case ClientToolCallState.success: + return `${getActionText('past')}${nameText}` + case ClientToolCallState.executing: + return `${getActionText('gerund')}${nameText}` + case ClientToolCallState.generating: + return `${getActionText('gerund')}${nameText}` + case ClientToolCallState.pending: + return `${getActionText('present')}${nameText}?` + case ClientToolCallState.error: + return `Failed to ${getActionText('present')?.toLowerCase()}${nameText}` + case ClientToolCallState.aborted: + return `Aborted ${getActionText('gerund')?.toLowerCase()}${nameText}` + case ClientToolCallState.rejected: + return `Skipped ${getActionText('gerund')?.toLowerCase()}${nameText}` + } + return undefined + }, +} + +const META_mark_todo_in_progress: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Marking todo in progress', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Marking todo in progress', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Marking todo in progress', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Marked todo in progress', icon: Loader2 }, + [ClientToolCallState.error]: { text: 'Failed to mark in progress', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted marking in progress', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped marking in progress', icon: MinusCircle }, + }, +} + +const META_navigate_ui: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to open', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Open?', icon: Navigation }, + [ClientToolCallState.executing]: { text: 'Opening', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Opened', icon: Navigation }, + [ClientToolCallState.error]: { text: 'Failed to open', icon: X }, + [ClientToolCallState.aborted]: { + text: 'Aborted opening', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped opening', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Open', icon: Navigation }, + reject: { text: 'Skip', icon: XCircle }, + }, + getDynamicText: (params, state) => { + const destination = params?.destination as NavigationDestination | undefined + const workflowName = params?.workflowName + + const action = 'open' + const actionCapitalized = 'Open' + const actionPast = 'opened' + const actionIng = 'opening' + let target = '' + + if (destination === 'workflow' && workflowName) { + target = ` workflow "${workflowName}"` + } else if (destination === 'workflow') { + target = ' workflows' + } else if (destination === 'logs') { + target = ' logs' + } else if (destination === 'templates') { + target = ' templates' + } else if (destination === 'vector_db') { + target = ' vector database' + } else if (destination === 'settings') { + target = ' settings' + } + + const fullAction = `${action}${target}` + const fullActionCapitalized = `${actionCapitalized}${target}` + const fullActionPast = `${actionPast}${target}` + const fullActionIng = `${actionIng}${target}` + + switch (state) { + case ClientToolCallState.success: + return fullActionPast.charAt(0).toUpperCase() + fullActionPast.slice(1) + case ClientToolCallState.executing: + return fullActionIng.charAt(0).toUpperCase() + fullActionIng.slice(1) + case ClientToolCallState.generating: + return `Preparing to ${fullAction}` + case ClientToolCallState.pending: + return `${fullActionCapitalized}?` + case ClientToolCallState.error: + return `Failed to ${fullAction}` + case ClientToolCallState.aborted: + return `Aborted ${fullAction}` + case ClientToolCallState.rejected: + return `Skipped ${fullAction}` + } + return undefined + }, +} + +const META_oauth_request_access: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Requesting integration access', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Requesting integration access', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Requesting integration access', icon: Loader2 }, + [ClientToolCallState.rejected]: { text: 'Skipped integration access', icon: MinusCircle }, + [ClientToolCallState.success]: { text: 'Requested integration access', icon: CheckCircle }, + [ClientToolCallState.error]: { text: 'Failed to request integration access', icon: X }, + [ClientToolCallState.aborted]: { text: 'Aborted integration access request', icon: XCircle }, + }, + interrupt: { + accept: { text: 'Connect', icon: PlugZap }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + getDynamicText: (params, state) => { + if (params.providerName) { + const name = params.providerName + switch (state) { + case ClientToolCallState.generating: + case ClientToolCallState.pending: + case ClientToolCallState.executing: + return `Requesting ${name} access` + case ClientToolCallState.rejected: + return `Skipped ${name} access` + case ClientToolCallState.success: + return `Requested ${name} access` + case ClientToolCallState.error: + return `Failed to request ${name} access` + case ClientToolCallState.aborted: + return `Aborted ${name} access request` + } + } + return undefined + }, +} + +const META_plan: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Planning', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Planning', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Planning', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Planned', icon: ListTodo }, + [ClientToolCallState.error]: { text: 'Failed to plan', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped plan', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted plan', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Planning', + completedLabel: 'Planned', + shouldCollapse: true, + outputArtifacts: ['plan'], + }, + }, +} + +const META_redeploy: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Redeploying workflow', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Redeploy workflow', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Redeploying workflow', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Redeployed workflow', icon: Rocket }, + [ClientToolCallState.error]: { text: 'Failed to redeploy workflow', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted redeploy', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped redeploy', icon: XCircle }, + }, + interrupt: undefined, +} + +const META_remember_debug: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Validating fix', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Validating fix', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Validating fix', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Validated fix', icon: CheckCircle2 }, + [ClientToolCallState.error]: { text: 'Failed to validate', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted validation', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped validation', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + const operation = params?.operation + + if (operation === 'add' || operation === 'edit') { + // For add/edit, show from problem or solution + const text = params?.problem || params?.solution + if (text && typeof text === 'string') { + switch (state) { + case ClientToolCallState.success: + return `Validated fix ${text}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Validating fix ${text}` + case ClientToolCallState.error: + return `Failed to validate fix ${text}` + case ClientToolCallState.aborted: + return `Aborted validating fix ${text}` + case ClientToolCallState.rejected: + return `Skipped validating fix ${text}` + } + } + } else if (operation === 'delete') { + // For delete, show from problem or solution (or id as fallback) + const text = params?.problem || params?.solution || params?.id + if (text && typeof text === 'string') { + switch (state) { + case ClientToolCallState.success: + return `Adjusted fix ${text}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Adjusting fix ${text}` + case ClientToolCallState.error: + return `Failed to adjust fix ${text}` + case ClientToolCallState.aborted: + return `Aborted adjusting fix ${text}` + case ClientToolCallState.rejected: + return `Skipped adjusting fix ${text}` + } + } + } + + return undefined + }, +} + +const META_research: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Researching', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Researching', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Researching', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Researched', icon: Search }, + [ClientToolCallState.error]: { text: 'Failed to research', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped research', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted research', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Researching', + completedLabel: 'Researched', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} + +const META_generate_api_key: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Preparing to generate API key', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Generate API key?', icon: KeyRound }, + [ClientToolCallState.executing]: { text: 'Generating API key', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Generated API key', icon: KeyRound }, + [ClientToolCallState.error]: { text: 'Failed to generate API key', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped generating API key', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted generating API key', icon: XCircle }, + }, + interrupt: { + accept: { text: 'Generate', icon: KeyRound }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + uiConfig: { + interrupt: { + accept: { text: 'Generate', icon: KeyRound }, + reject: { text: 'Skip', icon: MinusCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + }, + getDynamicText: (params, state) => { + const name = params?.name + if (name && typeof name === 'string') { + switch (state) { + case ClientToolCallState.success: + return `Generated API key "${name}"` + case ClientToolCallState.executing: + return `Generating API key "${name}"` + case ClientToolCallState.generating: + return `Preparing to generate "${name}"` + case ClientToolCallState.pending: + return `Generate API key "${name}"?` + case ClientToolCallState.error: + return `Failed to generate "${name}"` + } + } + return undefined + }, +} + +const META_run_block: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Preparing to run block', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Run block?', icon: Play }, + [ClientToolCallState.executing]: { text: 'Running block', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Ran block', icon: Play }, + [ClientToolCallState.error]: { text: 'Failed to run block', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped running block', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted running block', icon: MinusCircle }, + [ClientToolCallState.background]: { text: 'Running block in background', icon: Play }, + }, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + uiConfig: { + isSpecial: true, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + secondaryAction: { + text: 'Move to Background', + title: 'Move to Background', + variant: 'tertiary', + showInStates: [ClientToolCallState.executing], + completionMessage: + 'The user has chosen to move the block execution to the background. Check back with them later to know when the block execution is complete', + targetState: ClientToolCallState.background, + }, + }, + getDynamicText: (params, state) => { + const blockId = params?.blockId || params?.block_id + if (blockId && typeof blockId === 'string') { + const name = resolveBlockName(blockId) || blockId + switch (state) { + case ClientToolCallState.success: + return `Ran ${name}` + case ClientToolCallState.executing: + return `Running ${name}` + case ClientToolCallState.generating: + return `Preparing to run ${name}` + case ClientToolCallState.pending: + return `Run ${name}?` + case ClientToolCallState.error: + return `Failed to run ${name}` + case ClientToolCallState.rejected: + return `Skipped running ${name}` + case ClientToolCallState.aborted: + return `Aborted running ${name}` + case ClientToolCallState.background: + return `Running ${name} in background` + } + } + return undefined + }, +} + +const META_run_from_block: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Preparing to run from block', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Run from block?', icon: Play }, + [ClientToolCallState.executing]: { text: 'Running from block', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Ran from block', icon: Play }, + [ClientToolCallState.error]: { text: 'Failed to run from block', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped running from block', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted running from block', icon: MinusCircle }, + [ClientToolCallState.background]: { text: 'Running from block in background', icon: Play }, + }, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + uiConfig: { + isSpecial: true, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + secondaryAction: { + text: 'Move to Background', + title: 'Move to Background', + variant: 'tertiary', + showInStates: [ClientToolCallState.executing], + completionMessage: + 'The user has chosen to move the workflow execution to the background. Check back with them later to know when the workflow execution is complete', + targetState: ClientToolCallState.background, + }, + }, + getDynamicText: (params, state) => { + const blockId = params?.startBlockId || params?.start_block_id + if (blockId && typeof blockId === 'string') { + const name = resolveBlockName(blockId) || blockId + switch (state) { + case ClientToolCallState.success: + return `Ran from ${name}` + case ClientToolCallState.executing: + return `Running from ${name}` + case ClientToolCallState.generating: + return `Preparing to run from ${name}` + case ClientToolCallState.pending: + return `Run from ${name}?` + case ClientToolCallState.error: + return `Failed to run from ${name}` + case ClientToolCallState.rejected: + return `Skipped running from ${name}` + case ClientToolCallState.aborted: + return `Aborted running from ${name}` + case ClientToolCallState.background: + return `Running from ${name} in background` + } + } + return undefined + }, +} + +const META_run_workflow_until_block: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Preparing to run until block', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Run until block?', icon: Play }, + [ClientToolCallState.executing]: { text: 'Running until block', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Ran until block', icon: Play }, + [ClientToolCallState.error]: { text: 'Failed to run until block', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped running until block', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted running until block', icon: MinusCircle }, + [ClientToolCallState.background]: { text: 'Running until block in background', icon: Play }, + }, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + uiConfig: { + isSpecial: true, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + secondaryAction: { + text: 'Move to Background', + title: 'Move to Background', + variant: 'tertiary', + showInStates: [ClientToolCallState.executing], + completionMessage: + 'The user has chosen to move the workflow execution to the background. Check back with them later to know when the workflow execution is complete', + targetState: ClientToolCallState.background, + }, + }, + getDynamicText: (params, state) => { + const blockId = params?.stopAfterBlockId || params?.stop_after_block_id + if (blockId && typeof blockId === 'string') { + const name = resolveBlockName(blockId) || blockId + switch (state) { + case ClientToolCallState.success: + return `Ran until ${name}` + case ClientToolCallState.executing: + return `Running until ${name}` + case ClientToolCallState.generating: + return `Preparing to run until ${name}` + case ClientToolCallState.pending: + return `Run until ${name}?` + case ClientToolCallState.error: + return `Failed to run until ${name}` + case ClientToolCallState.rejected: + return `Skipped running until ${name}` + case ClientToolCallState.aborted: + return `Aborted running until ${name}` + case ClientToolCallState.background: + return `Running until ${name} in background` + } + } + return undefined + }, +} + +const META_run_workflow: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Preparing to run your workflow', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Run this workflow?', icon: Play }, + [ClientToolCallState.executing]: { text: 'Running your workflow', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Executed workflow', icon: Play }, + [ClientToolCallState.error]: { text: 'Errored running workflow', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped workflow execution', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted workflow execution', icon: MinusCircle }, + [ClientToolCallState.background]: { text: 'Running in background', icon: Play }, + }, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + }, + uiConfig: { + isSpecial: true, + interrupt: { + accept: { text: 'Run', icon: Play }, + reject: { text: 'Skip', icon: MinusCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + secondaryAction: { + text: 'Move to Background', + title: 'Move to Background', + variant: 'tertiary', + showInStates: [ClientToolCallState.executing], + completionMessage: + 'The user has chosen to move the workflow execution to the background. Check back with them later to know when the workflow execution is complete', + targetState: ClientToolCallState.background, + }, + paramsTable: { + columns: [ + { key: 'input', label: 'Input', width: '36%' }, + { key: 'value', label: 'Value', width: '64%', editable: true, mono: true }, + ], + extractRows: (params: Record): Array<[string, ...any[]]> => { + let inputs = params.input || params.inputs || params.workflow_input + if (typeof inputs === 'string') { + try { + inputs = JSON.parse(inputs) + } catch { + inputs = {} + } + } + if (params.workflow_input && typeof params.workflow_input === 'object') { + inputs = params.workflow_input + } + if (!inputs || typeof inputs !== 'object') { + const { workflowId, workflow_input, ...rest } = params + inputs = rest + } + const safeInputs = inputs && typeof inputs === 'object' ? inputs : {} + return Object.entries(safeInputs).map(([key, value]) => [key, key, String(value)]) + }, + }, + }, + getDynamicText: (params, state) => { + const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId + if (workflowId) { + const workflowName = useWorkflowRegistry.getState().workflows[workflowId]?.name + if (workflowName) { + switch (state) { + case ClientToolCallState.success: + return `Ran ${workflowName}` + case ClientToolCallState.executing: + return `Running ${workflowName}` + case ClientToolCallState.generating: + return `Preparing to run ${workflowName}` + case ClientToolCallState.pending: + return `Run ${workflowName}?` + case ClientToolCallState.error: + return `Failed to run ${workflowName}` + case ClientToolCallState.rejected: + return `Skipped running ${workflowName}` + case ClientToolCallState.aborted: + return `Aborted running ${workflowName}` + case ClientToolCallState.background: + return `Running ${workflowName} in background` + } + } + } + return undefined + }, +} + +const META_scrape_page: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Scraping page', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Scraping page', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Scraping page', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Scraped page', icon: Globe }, + [ClientToolCallState.error]: { text: 'Failed to scrape page', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted scraping page', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped scraping page', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.url && typeof params.url === 'string') { + const url = params.url + + switch (state) { + case ClientToolCallState.success: + return `Scraped ${url}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Scraping ${url}` + case ClientToolCallState.error: + return `Failed to scrape ${url}` + case ClientToolCallState.aborted: + return `Aborted scraping ${url}` + case ClientToolCallState.rejected: + return `Skipped scraping ${url}` + } + } + return undefined + }, +} + +const META_search_documentation: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Searching documentation', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Searching documentation', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Searching documentation', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Completed documentation search', icon: BookOpen }, + [ClientToolCallState.error]: { text: 'Failed to search docs', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted documentation search', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped documentation search', icon: MinusCircle }, + }, + getDynamicText: (params, state) => { + if (params?.query && typeof params.query === 'string') { + const query = params.query + + switch (state) { + case ClientToolCallState.success: + return `Searched docs for ${query}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Searching docs for ${query}` + case ClientToolCallState.error: + return `Failed to search docs for ${query}` + case ClientToolCallState.aborted: + return `Aborted searching docs for ${query}` + case ClientToolCallState.rejected: + return `Skipped searching docs for ${query}` + } + } + return undefined + }, +} + +const META_search_errors: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Debugging', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Debugged', icon: Bug }, + [ClientToolCallState.error]: { text: 'Failed to debug', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted debugging', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped debugging', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.query && typeof params.query === 'string') { + const query = params.query + + switch (state) { + case ClientToolCallState.success: + return `Debugged ${query}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Debugging ${query}` + case ClientToolCallState.error: + return `Failed to debug ${query}` + case ClientToolCallState.aborted: + return `Aborted debugging ${query}` + case ClientToolCallState.rejected: + return `Skipped debugging ${query}` + } + } + return undefined + }, +} + +const META_search_library_docs: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Reading docs', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Reading docs', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Reading docs', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Read docs', icon: BookOpen }, + [ClientToolCallState.error]: { text: 'Failed to read docs', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted reading docs', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped reading docs', icon: MinusCircle }, + }, + getDynamicText: (params, state) => { + const libraryName = params?.library_name + if (libraryName && typeof libraryName === 'string') { + switch (state) { + case ClientToolCallState.success: + return `Read ${libraryName} docs` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Reading ${libraryName} docs` + case ClientToolCallState.error: + return `Failed to read ${libraryName} docs` + case ClientToolCallState.aborted: + return `Aborted reading ${libraryName} docs` + case ClientToolCallState.rejected: + return `Skipped reading ${libraryName} docs` + } + } + return undefined + }, +} + +const META_search_online: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Searching online', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Searching online', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Searching online', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Completed online search', icon: Globe }, + [ClientToolCallState.error]: { text: 'Failed to search online', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped online search', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted online search', icon: XCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.query && typeof params.query === 'string') { + const query = params.query + + switch (state) { + case ClientToolCallState.success: + return `Searched online for ${query}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Searching online for ${query}` + case ClientToolCallState.error: + return `Failed to search online for ${query}` + case ClientToolCallState.aborted: + return `Aborted searching online for ${query}` + case ClientToolCallState.rejected: + return `Skipped searching online for ${query}` + } + } + return undefined + }, +} + +const META_search_patterns: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Searching workflow patterns', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Searching workflow patterns', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Searching workflow patterns', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Found workflow patterns', icon: Search }, + [ClientToolCallState.error]: { text: 'Failed to search patterns', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted pattern search', icon: MinusCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped pattern search', icon: MinusCircle }, + }, + interrupt: undefined, + getDynamicText: (params, state) => { + if (params?.queries && Array.isArray(params.queries) && params.queries.length > 0) { + const firstQuery = String(params.queries[0]) + + switch (state) { + case ClientToolCallState.success: + return `Searched ${firstQuery}` + case ClientToolCallState.executing: + case ClientToolCallState.generating: + case ClientToolCallState.pending: + return `Searching ${firstQuery}` + case ClientToolCallState.error: + return `Failed to search ${firstQuery}` + case ClientToolCallState.aborted: + return `Aborted searching ${firstQuery}` + case ClientToolCallState.rejected: + return `Skipped searching ${firstQuery}` + } + } + return undefined + }, +} + +const META_set_environment_variables: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to set environment variables', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Set environment variables?', icon: Settings2 }, + [ClientToolCallState.executing]: { text: 'Setting environment variables', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Set environment variables', icon: Settings2 }, + [ClientToolCallState.error]: { text: 'Failed to set environment variables', icon: X }, + [ClientToolCallState.aborted]: { + text: 'Aborted setting environment variables', + icon: XCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped setting environment variables', + icon: XCircle, + }, + }, + interrupt: { + accept: { text: 'Apply', icon: Settings2 }, + reject: { text: 'Skip', icon: XCircle }, + }, + uiConfig: { + alwaysExpanded: true, + interrupt: { + accept: { text: 'Apply', icon: Settings2 }, + reject: { text: 'Skip', icon: XCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + paramsTable: { + columns: [ + { key: 'name', label: 'Variable', width: '36%', editable: true }, + { key: 'value', label: 'Value', width: '64%', editable: true, mono: true }, + ], + extractRows: (params: Record): Array<[string, ...any[]]> => { + const variables = params.variables || {} + const entries = Array.isArray(variables) + ? variables.map((v: any, i: number) => [String(i), v.name || `var_${i}`, v.value || '']) + : Object.entries(variables).map(([key, val]) => { + if (typeof val === 'object' && val !== null && 'value' in (val as any)) { + return [key, key, (val as any).value] + } + return [key, key, val] + }) + return entries as Array<[string, ...any[]]> + }, + }, + }, + getDynamicText: (params, state) => { + if (params?.variables && typeof params.variables === 'object') { + const count = Object.keys(params.variables).length + const varText = count === 1 ? 'variable' : 'variables' + + switch (state) { + case ClientToolCallState.success: + return `Set ${count} ${varText}` + case ClientToolCallState.executing: + return `Setting ${count} ${varText}` + case ClientToolCallState.generating: + return `Preparing to set ${count} ${varText}` + case ClientToolCallState.pending: + return `Set ${count} ${varText}?` + case ClientToolCallState.error: + return `Failed to set ${count} ${varText}` + case ClientToolCallState.aborted: + return `Aborted setting ${count} ${varText}` + case ClientToolCallState.rejected: + return `Skipped setting ${count} ${varText}` + } + } + return undefined + }, +} + +const META_set_global_workflow_variables: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { + text: 'Preparing to set workflow variables', + icon: Loader2, + }, + [ClientToolCallState.pending]: { text: 'Set workflow variables?', icon: Settings2 }, + [ClientToolCallState.executing]: { text: 'Setting workflow variables', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Updated workflow variables', icon: Settings2 }, + [ClientToolCallState.error]: { text: 'Failed to set workflow variables', icon: X }, + [ClientToolCallState.aborted]: { text: 'Aborted setting variables', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped setting variables', icon: XCircle }, + }, + interrupt: { + accept: { text: 'Apply', icon: Settings2 }, + reject: { text: 'Skip', icon: XCircle }, + }, + uiConfig: { + interrupt: { + accept: { text: 'Apply', icon: Settings2 }, + reject: { text: 'Skip', icon: XCircle }, + showAllowOnce: true, + showAllowAlways: true, + }, + paramsTable: { + columns: [ + { key: 'name', label: 'Name', width: '40%', editable: true, mono: true }, + { key: 'value', label: 'Value', width: '60%', editable: true, mono: true }, + ], + extractRows: (params: Record): Array<[string, ...any[]]> => { + const operations = params.operations || [] + return operations.map((op: any, idx: number) => [ + String(idx), + op.name || '', + String(op.value ?? ''), + ]) + }, + }, + }, + getDynamicText: (params, state) => { + if (params?.operations && Array.isArray(params.operations)) { + const varNames = params.operations + .slice(0, 2) + .map((op: any) => op.name) + .filter(Boolean) + + if (varNames.length > 0) { + const varList = varNames.join(', ') + const more = params.operations.length > 2 ? '...' : '' + const displayText = `${varList}${more}` + + switch (state) { + case ClientToolCallState.success: + return `Set ${displayText}` + case ClientToolCallState.executing: + return `Setting ${displayText}` + case ClientToolCallState.generating: + return `Preparing to set ${displayText}` + case ClientToolCallState.pending: + return `Set ${displayText}?` + case ClientToolCallState.error: + return `Failed to set ${displayText}` + case ClientToolCallState.aborted: + return `Aborted setting ${displayText}` + case ClientToolCallState.rejected: + return `Skipped setting ${displayText}` + } + } + } + return undefined + }, +} + +const META_sleep: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Preparing to sleep', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Sleeping', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Sleeping', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Finished sleeping', icon: Moon }, + [ClientToolCallState.error]: { text: 'Interrupted sleep', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped sleep', icon: MinusCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted sleep', icon: MinusCircle }, + [ClientToolCallState.background]: { text: 'Resumed', icon: Moon }, + }, + uiConfig: { + secondaryAction: { + text: 'Wake', + title: 'Wake', + variant: 'tertiary', + showInStates: [ClientToolCallState.executing], + targetState: ClientToolCallState.background, + }, + }, + // No interrupt - auto-execute immediately + getDynamicText: (params, state) => { + const seconds = params?.seconds + if (typeof seconds === 'number' && seconds > 0) { + const displayTime = formatDuration(seconds) + switch (state) { + case ClientToolCallState.success: + return `Slept for ${displayTime}` + case ClientToolCallState.executing: + case ClientToolCallState.pending: + return `Sleeping for ${displayTime}` + case ClientToolCallState.generating: + return `Preparing to sleep for ${displayTime}` + case ClientToolCallState.error: + return `Failed to sleep for ${displayTime}` + case ClientToolCallState.rejected: + return `Skipped sleeping for ${displayTime}` + case ClientToolCallState.aborted: + return `Aborted sleeping for ${displayTime}` + case ClientToolCallState.background: { + // Calculate elapsed time from when sleep started + const elapsedSeconds = params?._elapsedSeconds + if (typeof elapsedSeconds === 'number' && elapsedSeconds > 0) { + return `Resumed after ${formatDuration(Math.round(elapsedSeconds))}` + } + return 'Resumed early' + } + } + } + return undefined + }, +} + +const META_summarize_conversation: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Summarizing conversation', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Summarizing conversation', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Summarizing conversation', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Summarized conversation', icon: PencilLine }, + [ClientToolCallState.error]: { text: 'Failed to summarize conversation', icon: XCircle }, + [ClientToolCallState.aborted]: { + text: 'Aborted summarizing conversation', + icon: MinusCircle, + }, + [ClientToolCallState.rejected]: { + text: 'Skipped summarizing conversation', + icon: MinusCircle, + }, + }, + interrupt: undefined, +} + +const META_superagent: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Superagent working', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Superagent working', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Superagent working', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Superagent completed', icon: Sparkles }, + [ClientToolCallState.error]: { text: 'Superagent failed', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Superagent skipped', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Superagent aborted', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Superagent working', + completedLabel: 'Superagent completed', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} + +const META_test: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Testing', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Testing', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Testing', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Tested', icon: FlaskConical }, + [ClientToolCallState.error]: { text: 'Failed to test', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped test', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted test', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Testing', + completedLabel: 'Tested', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} + +const META_tour: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Touring', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Touring', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Touring', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Completed tour', icon: Compass }, + [ClientToolCallState.error]: { text: 'Failed tour', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped tour', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted tour', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Touring', + completedLabel: 'Tour complete', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} + +const META_workflow: ToolMetadata = { + displayNames: { + [ClientToolCallState.generating]: { text: 'Managing workflow', icon: Loader2 }, + [ClientToolCallState.pending]: { text: 'Managing workflow', icon: Loader2 }, + [ClientToolCallState.executing]: { text: 'Managing workflow', icon: Loader2 }, + [ClientToolCallState.success]: { text: 'Managed workflow', icon: GitBranch }, + [ClientToolCallState.error]: { text: 'Failed to manage workflow', icon: XCircle }, + [ClientToolCallState.rejected]: { text: 'Skipped workflow', icon: XCircle }, + [ClientToolCallState.aborted]: { text: 'Aborted workflow', icon: XCircle }, + }, + uiConfig: { + subagent: { + streamingLabel: 'Managing workflow', + completedLabel: 'Workflow managed', + shouldCollapse: true, + outputArtifacts: [], + }, + }, +} + +const TOOL_METADATA_BY_ID: Record = { + auth: META_auth, + check_deployment_status: META_check_deployment_status, + checkoff_todo: META_checkoff_todo, + crawl_website: META_crawl_website, + create_workspace_mcp_server: META_create_workspace_mcp_server, + build: META_build, + custom_tool: META_custom_tool, + debug: META_debug, + deploy: META_deploy, + discovery: META_discovery, + deploy_api: META_deploy_api, + deploy_chat: META_deploy_chat, + deploy_mcp: META_deploy_mcp, + edit: META_edit, + edit_workflow: META_edit_workflow, + evaluate: META_evaluate, + get_block_config: META_get_block_config, + get_block_options: META_get_block_options, + get_block_outputs: META_get_block_outputs, + get_block_upstream_references: META_get_block_upstream_references, + get_blocks_and_tools: META_get_blocks_and_tools, + get_blocks_metadata: META_get_blocks_metadata, + get_credentials: META_get_credentials, + generate_api_key: META_generate_api_key, + get_examples_rag: META_get_examples_rag, + get_operations_examples: META_get_operations_examples, + get_page_contents: META_get_page_contents, + get_platform_actions: META_get_platform_actions, + get_trigger_blocks: META_get_trigger_blocks, + get_trigger_examples: META_get_trigger_examples, + get_user_workflow: META_get_user_workflow, + get_workflow_console: META_get_workflow_console, + get_workflow_data: META_get_workflow_data, + get_workflow_from_name: META_get_workflow_from_name, + info: META_info, + knowledge: META_knowledge, + knowledge_base: META_knowledge_base, + list_user_workflows: META_list_user_workflows, + list_workspace_mcp_servers: META_list_workspace_mcp_servers, + make_api_request: META_make_api_request, + manage_custom_tool: META_manage_custom_tool, + manage_mcp_tool: META_manage_mcp_tool, + mark_todo_in_progress: META_mark_todo_in_progress, + navigate_ui: META_navigate_ui, + oauth_request_access: META_oauth_request_access, + plan: META_plan, + redeploy: META_redeploy, + remember_debug: META_remember_debug, + research: META_research, + run_block: META_run_block, + run_from_block: META_run_from_block, + run_workflow: META_run_workflow, + run_workflow_until_block: META_run_workflow_until_block, + scrape_page: META_scrape_page, + search_documentation: META_search_documentation, + search_errors: META_search_errors, + search_library_docs: META_search_library_docs, + search_online: META_search_online, + search_patterns: META_search_patterns, + set_environment_variables: META_set_environment_variables, + set_global_workflow_variables: META_set_global_workflow_variables, + sleep: META_sleep, + summarize_conversation: META_summarize_conversation, + superagent: META_superagent, + test: META_test, + tour: META_tour, + workflow: META_workflow, +} + +export const TOOL_DISPLAY_REGISTRY: Record = Object.fromEntries( + Object.entries(TOOL_METADATA_BY_ID).map(([toolName, metadata]) => [ + toolName, + toToolDisplayEntry(metadata), + ]) +) diff --git a/apps/sim/lib/copilot/tools/client/types.ts b/apps/sim/lib/copilot/tools/client/types.ts deleted file mode 100644 index 0f8ded86d..000000000 --- a/apps/sim/lib/copilot/tools/client/types.ts +++ /dev/null @@ -1,33 +0,0 @@ -import type { BaseClientToolMetadata } from '@/lib/copilot/tools/client/base-tool' -import { ClientToolCallState } from '@/lib/copilot/tools/client/base-tool' - -export interface ToolExecutionContext { - toolCallId: string - toolName: string - // Logging only; tools must not mutate store state directly - log: ( - level: 'debug' | 'info' | 'warn' | 'error', - message: string, - extra?: Record - ) => void -} - -export interface ToolRunResult { - status: number - message?: any - data?: any -} - -export interface ClientToolDefinition { - name: string - metadata?: BaseClientToolMetadata - // Return true if this tool requires user confirmation before execution - hasInterrupt?: boolean | ((args?: Args) => boolean) - // Main execution entry point. Returns a result for the store to handle. - execute: (ctx: ToolExecutionContext, args?: Args) => Promise - // Optional accept/reject handlers for interrupt flows - accept?: (ctx: ToolExecutionContext, args?: Args) => Promise - reject?: (ctx: ToolExecutionContext, args?: Args) => Promise -} - -export { ClientToolCallState } diff --git a/apps/sim/lib/copilot/tools/client/ui-config.ts b/apps/sim/lib/copilot/tools/client/ui-config.ts deleted file mode 100644 index 6fac1645c..000000000 --- a/apps/sim/lib/copilot/tools/client/ui-config.ts +++ /dev/null @@ -1,238 +0,0 @@ -/** - * UI Configuration Types for Copilot Tools - * - * This module defines the configuration interfaces that control how tools - * are rendered in the tool-call component. All UI behavior should be defined - * here rather than hardcoded in the rendering component. - */ -import type { LucideIcon } from 'lucide-react' -import type { ClientToolCallState } from './base-tool' - -/** - * Configuration for a params table column - */ -export interface ParamsTableColumn { - /** Key to extract from params */ - key: string - /** Display label for the column header */ - label: string - /** Width as percentage or CSS value */ - width?: string - /** Whether values in this column are editable */ - editable?: boolean - /** Whether to use monospace font */ - mono?: boolean - /** Whether to mask the value (for passwords) */ - masked?: boolean -} - -/** - * Configuration for params table rendering - */ -export interface ParamsTableConfig { - /** Column definitions */ - columns: ParamsTableColumn[] - /** - * Extract rows from tool params. - * Returns array of [key, ...cellValues] for each row. - */ - extractRows: (params: Record) => Array<[string, ...any[]]> - /** - * Optional: Update params when a cell is edited. - * Returns the updated params object. - */ - updateCell?: ( - params: Record, - rowKey: string, - columnKey: string, - newValue: any - ) => Record -} - -/** - * Configuration for secondary action button (like "Move to Background") - */ -export interface SecondaryActionConfig { - /** Button text */ - text: string - /** Button title/tooltip */ - title?: string - /** Button variant */ - variant?: 'tertiary' | 'default' | 'outline' - /** States in which to show this button */ - showInStates: ClientToolCallState[] - /** - * Message to send when the action is triggered. - * Used by markToolComplete. - */ - completionMessage?: string - /** - * Target state after action. - * If not provided, defaults to 'background'. - */ - targetState?: ClientToolCallState -} - -/** - * Configuration for subagent tools (tools that spawn subagents) - */ -export interface SubagentConfig { - /** Label shown while streaming (e.g., "Planning", "Editing") */ - streamingLabel: string - /** Label shown when complete (e.g., "Planned", "Edited") */ - completedLabel: string - /** - * Whether the content should collapse when streaming ends. - * Default: true - */ - shouldCollapse?: boolean - /** - * Output artifacts that should NOT be collapsed. - * These are rendered outside the collapsible content. - * Examples: 'plan' for PlanSteps, 'options' for OptionsSelector - */ - outputArtifacts?: Array<'plan' | 'options' | 'edit_summary'> - /** - * Whether this subagent renders its own specialized content - * and the thinking text should be minimal or hidden. - * Used for tools like 'edit' where we show WorkflowEditSummary instead. - */ - hideThinkingText?: boolean -} - -/** - * Interrupt button configuration - */ -export interface InterruptButtonConfig { - text: string - icon: LucideIcon -} - -/** - * Configuration for interrupt behavior (Run/Skip buttons) - */ -export interface InterruptConfig { - /** Accept button config */ - accept: InterruptButtonConfig - /** Reject button config */ - reject: InterruptButtonConfig - /** - * Whether to show "Allow Once" button (default accept behavior). - * Default: true - */ - showAllowOnce?: boolean - /** - * Whether to show "Allow Always" button (auto-approve this tool in future). - * Default: true for most tools - */ - showAllowAlways?: boolean -} - -/** - * Complete UI configuration for a tool - */ -export interface ToolUIConfig { - /** - * Whether this is a "special" tool that gets gradient styling. - * Used for workflow operation tools like edit_workflow, build_workflow, etc. - */ - isSpecial?: boolean - - /** - * Interrupt configuration for tools that require user confirmation. - * If not provided, tool auto-executes. - */ - interrupt?: InterruptConfig - - /** - * Secondary action button (like "Move to Background" for run_workflow) - */ - secondaryAction?: SecondaryActionConfig - - /** - * Configuration for rendering params as a table. - * If provided, tool will show an expandable/inline table. - */ - paramsTable?: ParamsTableConfig - - /** - * Subagent configuration for tools that spawn subagents. - * If provided, tool is treated as a subagent tool. - */ - subagent?: SubagentConfig - - /** - * Whether this tool should always show params expanded (not collapsible). - * Used for tools like set_environment_variables that always show their table. - */ - alwaysExpanded?: boolean - - /** - * Custom component type for special rendering. - * The tool-call component will use this to render specialized content. - */ - customRenderer?: 'code' | 'edit_summary' | 'none' -} - -/** - * Registry of tool UI configurations. - * Tools can register their UI config here for the tool-call component to use. - */ -const toolUIConfigs: Record = {} - -/** - * Register a tool's UI configuration - */ -export function registerToolUIConfig(toolName: string, config: ToolUIConfig): void { - toolUIConfigs[toolName] = config -} - -/** - * Get a tool's UI configuration - */ -export function getToolUIConfig(toolName: string): ToolUIConfig | undefined { - return toolUIConfigs[toolName] -} - -/** - * Check if a tool is a subagent tool - */ -export function isSubagentTool(toolName: string): boolean { - return !!toolUIConfigs[toolName]?.subagent -} - -/** - * Check if a tool is a "special" tool (gets gradient styling) - */ -export function isSpecialTool(toolName: string): boolean { - return !!toolUIConfigs[toolName]?.isSpecial -} - -/** - * Check if a tool has interrupt (requires user confirmation) - */ -export function hasInterrupt(toolName: string): boolean { - return !!toolUIConfigs[toolName]?.interrupt -} - -/** - * Get subagent labels for a tool - */ -export function getSubagentLabels( - toolName: string, - isStreaming: boolean -): { streaming: string; completed: string } | undefined { - const config = toolUIConfigs[toolName]?.subagent - if (!config) return undefined - return { - streaming: config.streamingLabel, - completed: config.completedLabel, - } -} - -/** - * Get all registered tool UI configs (for debugging) - */ -export function getAllToolUIConfigs(): Record { - return { ...toolUIConfigs } -} diff --git a/apps/sim/lib/copilot/tools/client/user/get-credentials.ts b/apps/sim/lib/copilot/tools/client/user/get-credentials.ts deleted file mode 100644 index 8ad821b14..000000000 --- a/apps/sim/lib/copilot/tools/client/user/get-credentials.ts +++ /dev/null @@ -1,73 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Key, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface GetCredentialsArgs { - userId?: string - workflowId?: string -} - -export class GetCredentialsClientTool extends BaseClientTool { - static readonly id = 'get_credentials' - - constructor(toolCallId: string) { - super(toolCallId, GetCredentialsClientTool.id, GetCredentialsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Fetching connected integrations', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Fetching connected integrations', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Fetching connected integrations', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Fetched connected integrations', icon: Key }, - [ClientToolCallState.error]: { - text: 'Failed to fetch connected integrations', - icon: XCircle, - }, - [ClientToolCallState.aborted]: { - text: 'Aborted fetching connected integrations', - icon: MinusCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped fetching connected integrations', - icon: MinusCircle, - }, - }, - } - - async execute(args?: GetCredentialsArgs): Promise { - const logger = createLogger('GetCredentialsClientTool') - try { - this.setState(ClientToolCallState.executing) - const payload: GetCredentialsArgs = { ...(args || {}) } - if (!payload.workflowId && !payload.userId) { - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (activeWorkflowId) payload.workflowId = activeWorkflowId - } - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'get_credentials', payload }), - }) - if (!res.ok) { - const txt = await res.text().catch(() => '') - throw new Error(txt || `Server error (${res.status})`) - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Connected integrations fetched', parsed.result) - this.setState(ClientToolCallState.success) - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to fetch connected integrations') - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/user/set-environment-variables.ts b/apps/sim/lib/copilot/tools/client/user/set-environment-variables.ts deleted file mode 100644 index e4033ca85..000000000 --- a/apps/sim/lib/copilot/tools/client/user/set-environment-variables.ts +++ /dev/null @@ -1,157 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Settings2, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' -import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas' -import { useEnvironmentStore } from '@/stores/settings/environment' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface SetEnvArgs { - variables: Record - workflowId?: string -} - -export class SetEnvironmentVariablesClientTool extends BaseClientTool { - static readonly id = 'set_environment_variables' - - constructor(toolCallId: string) { - super( - toolCallId, - SetEnvironmentVariablesClientTool.id, - SetEnvironmentVariablesClientTool.metadata - ) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to set environment variables', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Set environment variables?', icon: Settings2 }, - [ClientToolCallState.executing]: { text: 'Setting environment variables', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Set environment variables', icon: Settings2 }, - [ClientToolCallState.error]: { text: 'Failed to set environment variables', icon: X }, - [ClientToolCallState.aborted]: { - text: 'Aborted setting environment variables', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped setting environment variables', - icon: XCircle, - }, - }, - interrupt: { - accept: { text: 'Apply', icon: Settings2 }, - reject: { text: 'Skip', icon: XCircle }, - }, - uiConfig: { - alwaysExpanded: true, - interrupt: { - accept: { text: 'Apply', icon: Settings2 }, - reject: { text: 'Skip', icon: XCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - paramsTable: { - columns: [ - { key: 'name', label: 'Variable', width: '36%', editable: true }, - { key: 'value', label: 'Value', width: '64%', editable: true, mono: true }, - ], - extractRows: (params) => { - const variables = params.variables || {} - const entries = Array.isArray(variables) - ? variables.map((v: any, i: number) => [String(i), v.name || `var_${i}`, v.value || '']) - : Object.entries(variables).map(([key, val]) => { - if (typeof val === 'object' && val !== null && 'value' in (val as any)) { - return [key, key, (val as any).value] - } - return [key, key, val] - }) - return entries as Array<[string, ...any[]]> - }, - }, - }, - getDynamicText: (params, state) => { - if (params?.variables && typeof params.variables === 'object') { - const count = Object.keys(params.variables).length - const varText = count === 1 ? 'variable' : 'variables' - - switch (state) { - case ClientToolCallState.success: - return `Set ${count} ${varText}` - case ClientToolCallState.executing: - return `Setting ${count} ${varText}` - case ClientToolCallState.generating: - return `Preparing to set ${count} ${varText}` - case ClientToolCallState.pending: - return `Set ${count} ${varText}?` - case ClientToolCallState.error: - return `Failed to set ${count} ${varText}` - case ClientToolCallState.aborted: - return `Aborted setting ${count} ${varText}` - case ClientToolCallState.rejected: - return `Skipped setting ${count} ${varText}` - } - } - return undefined - }, - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: SetEnvArgs): Promise { - const logger = createLogger('SetEnvironmentVariablesClientTool') - try { - this.setState(ClientToolCallState.executing) - const payload: SetEnvArgs = { ...(args || { variables: {} }) } - if (!payload.workflowId) { - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (activeWorkflowId) payload.workflowId = activeWorkflowId - } - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'set_environment_variables', payload }), - }) - if (!res.ok) { - const txt = await res.text().catch(() => '') - throw new Error(txt || `Server error (${res.status})`) - } - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Environment variables updated', parsed.result) - this.setState(ClientToolCallState.success) - - // Refresh the environment store so the UI reflects the new variables - try { - await useEnvironmentStore.getState().loadEnvironmentVariables() - logger.info('Environment store refreshed after setting variables') - } catch (error) { - logger.warn('Failed to refresh environment store:', error) - } - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to set environment variables') - } - } - - async execute(args?: SetEnvArgs): Promise { - await this.handleAccept(args) - } -} - -// Register UI config at module load -registerToolUIConfig( - SetEnvironmentVariablesClientTool.id, - SetEnvironmentVariablesClientTool.metadata.uiConfig! -) diff --git a/apps/sim/lib/copilot/tools/client/workflow/block-output-utils.ts b/apps/sim/lib/copilot/tools/client/workflow/block-output-utils.ts deleted file mode 100644 index dc678b60e..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/block-output-utils.ts +++ /dev/null @@ -1,119 +0,0 @@ -import { getEffectiveBlockOutputPaths } from '@/lib/workflows/blocks/block-outputs' -import { normalizeName } from '@/executor/constants' -import { useVariablesStore } from '@/stores/panel/variables/store' -import type { Variable } from '@/stores/panel/variables/types' -import { useSubBlockStore } from '@/stores/workflows/subblock/store' -import type { BlockState, Loop, Parallel } from '@/stores/workflows/workflow/types' - -export interface WorkflowContext { - workflowId: string - blocks: Record - loops: Record - parallels: Record - subBlockValues: Record> -} - -export interface VariableOutput { - id: string - name: string - type: string - tag: string -} - -export function getWorkflowSubBlockValues(workflowId: string): Record> { - const subBlockStore = useSubBlockStore.getState() - return subBlockStore.workflowValues[workflowId] ?? {} -} - -export function getMergedSubBlocks( - blocks: Record, - subBlockValues: Record>, - targetBlockId: string -): Record { - const base = blocks[targetBlockId]?.subBlocks || {} - const live = subBlockValues?.[targetBlockId] || {} - const merged: Record = { ...base } - for (const [subId, liveVal] of Object.entries(live)) { - merged[subId] = { ...(base[subId] || {}), value: liveVal } - } - return merged -} - -export function getSubBlockValue( - blocks: Record, - subBlockValues: Record>, - targetBlockId: string, - subBlockId: string -): any { - const live = subBlockValues?.[targetBlockId]?.[subBlockId] - if (live !== undefined) return live - return blocks[targetBlockId]?.subBlocks?.[subBlockId]?.value -} - -export function getWorkflowVariables(workflowId: string): VariableOutput[] { - const getVariablesByWorkflowId = useVariablesStore.getState().getVariablesByWorkflowId - const workflowVariables = getVariablesByWorkflowId(workflowId) - const validVariables = workflowVariables.filter( - (variable: Variable) => variable.name.trim() !== '' - ) - return validVariables.map((variable: Variable) => ({ - id: variable.id, - name: variable.name, - type: variable.type, - tag: `variable.${normalizeName(variable.name)}`, - })) -} - -export function getSubflowInsidePaths( - blockType: 'loop' | 'parallel', - blockId: string, - loops: Record, - parallels: Record -): string[] { - const paths = ['index'] - if (blockType === 'loop') { - const loopType = loops[blockId]?.loopType || 'for' - if (loopType === 'forEach') { - paths.push('currentItem', 'items') - } - } else { - const parallelType = parallels[blockId]?.parallelType || 'count' - if (parallelType === 'collection') { - paths.push('currentItem', 'items') - } - } - return paths -} - -export function computeBlockOutputPaths(block: BlockState, ctx: WorkflowContext): string[] { - const { blocks, loops, parallels, subBlockValues } = ctx - const mergedSubBlocks = getMergedSubBlocks(blocks, subBlockValues, block.id) - - if (block.type === 'loop' || block.type === 'parallel') { - const insidePaths = getSubflowInsidePaths(block.type, block.id, loops, parallels) - return ['results', ...insidePaths] - } - - if (block.type === 'variables') { - const variablesValue = getSubBlockValue(blocks, subBlockValues, block.id, 'variables') - if (variablesValue && Array.isArray(variablesValue) && variablesValue.length > 0) { - const validAssignments = variablesValue.filter((assignment: { variableName?: string }) => - assignment?.variableName?.trim() - ) - return validAssignments.map((assignment: { variableName: string }) => - assignment.variableName.trim() - ) - } - return [] - } - - return getEffectiveBlockOutputPaths(block.type, mergedSubBlocks, { - triggerMode: Boolean(block.triggerMode), - preferToolOutputs: !block.triggerMode, - }) -} - -export function formatOutputsWithPrefix(paths: string[], blockName: string): string[] { - const normalizedName = normalizeName(blockName) - return paths.map((path) => `${normalizedName}.${path}`) -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/check-deployment-status.ts b/apps/sim/lib/copilot/tools/client/workflow/check-deployment-status.ts deleted file mode 100644 index a0d3de72e..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/check-deployment-status.ts +++ /dev/null @@ -1,215 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Rocket, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface CheckDeploymentStatusArgs { - workflowId?: string -} - -interface ApiDeploymentDetails { - isDeployed: boolean - deployedAt: string | null - endpoint: string | null - apiKey: string | null - needsRedeployment: boolean -} - -interface ChatDeploymentDetails { - isDeployed: boolean - chatId: string | null - identifier: string | null - chatUrl: string | null - title: string | null - description: string | null - authType: string | null - allowedEmails: string[] | null - outputConfigs: Array<{ blockId: string; path: string }> | null - welcomeMessage: string | null - primaryColor: string | null - hasPassword: boolean -} - -interface McpDeploymentDetails { - isDeployed: boolean - servers: Array<{ - serverId: string - serverName: string - toolName: string - toolDescription: string | null - parameterSchema?: Record | null - toolId?: string | null - }> -} - -export class CheckDeploymentStatusClientTool extends BaseClientTool { - static readonly id = 'check_deployment_status' - - constructor(toolCallId: string) { - super(toolCallId, CheckDeploymentStatusClientTool.id, CheckDeploymentStatusClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Checking deployment status', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Checking deployment status', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Checking deployment status', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Checked deployment status', icon: Rocket }, - [ClientToolCallState.error]: { text: 'Failed to check deployment status', icon: X }, - [ClientToolCallState.aborted]: { - text: 'Aborted checking deployment status', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped checking deployment status', - icon: XCircle, - }, - }, - interrupt: undefined, - } - - async execute(args?: CheckDeploymentStatusArgs): Promise { - const logger = createLogger('CheckDeploymentStatusClientTool') - try { - this.setState(ClientToolCallState.executing) - - const { activeWorkflowId, workflows } = useWorkflowRegistry.getState() - const workflowId = args?.workflowId || activeWorkflowId - - if (!workflowId) { - throw new Error('No workflow ID provided') - } - - const workflow = workflows[workflowId] - const workspaceId = workflow?.workspaceId - - // Fetch deployment status from all sources - const [apiDeployRes, chatDeployRes, mcpServersRes] = await Promise.all([ - fetch(`/api/workflows/${workflowId}/deploy`), - fetch(`/api/workflows/${workflowId}/chat/status`), - workspaceId ? fetch(`/api/mcp/workflow-servers?workspaceId=${workspaceId}`) : null, - ]) - - const apiDeploy = apiDeployRes.ok ? await apiDeployRes.json() : null - const chatDeploy = chatDeployRes.ok ? await chatDeployRes.json() : null - const mcpServers = mcpServersRes?.ok ? await mcpServersRes.json() : null - - // API deployment details - const isApiDeployed = apiDeploy?.isDeployed || false - const appUrl = typeof window !== 'undefined' ? window.location.origin : '' - const apiDetails: ApiDeploymentDetails = { - isDeployed: isApiDeployed, - deployedAt: apiDeploy?.deployedAt || null, - endpoint: isApiDeployed ? `${appUrl}/api/workflows/${workflowId}/execute` : null, - apiKey: apiDeploy?.apiKey || null, - needsRedeployment: apiDeploy?.needsRedeployment === true, - } - - // Chat deployment details - const isChatDeployed = !!(chatDeploy?.isDeployed && chatDeploy?.deployment) - const chatDetails: ChatDeploymentDetails = { - isDeployed: isChatDeployed, - chatId: chatDeploy?.deployment?.id || null, - identifier: chatDeploy?.deployment?.identifier || null, - chatUrl: isChatDeployed ? `${appUrl}/chat/${chatDeploy?.deployment?.identifier}` : null, - title: chatDeploy?.deployment?.title || null, - description: chatDeploy?.deployment?.description || null, - authType: chatDeploy?.deployment?.authType || null, - allowedEmails: Array.isArray(chatDeploy?.deployment?.allowedEmails) - ? chatDeploy?.deployment?.allowedEmails - : null, - outputConfigs: Array.isArray(chatDeploy?.deployment?.outputConfigs) - ? chatDeploy?.deployment?.outputConfigs - : null, - welcomeMessage: chatDeploy?.deployment?.customizations?.welcomeMessage || null, - primaryColor: chatDeploy?.deployment?.customizations?.primaryColor || null, - hasPassword: chatDeploy?.deployment?.hasPassword === true, - } - - // MCP deployment details - find servers that have this workflow as a tool - const mcpServerList = mcpServers?.data?.servers || [] - const mcpToolDeployments: McpDeploymentDetails['servers'] = [] - - for (const server of mcpServerList) { - // Check if this workflow is deployed as a tool on this server - if (server.toolNames && Array.isArray(server.toolNames)) { - // We need to fetch the actual tools to check if this workflow is there - try { - const toolsRes = await fetch( - `/api/mcp/workflow-servers/${server.id}/tools?workspaceId=${workspaceId}` - ) - if (toolsRes.ok) { - const toolsData = await toolsRes.json() - const tools = toolsData.data?.tools || [] - for (const tool of tools) { - if (tool.workflowId === workflowId) { - mcpToolDeployments.push({ - serverId: server.id, - serverName: server.name, - toolName: tool.toolName, - toolDescription: tool.toolDescription, - parameterSchema: tool.parameterSchema ?? null, - toolId: tool.id ?? null, - }) - } - } - } - } catch { - // Skip this server if we can't fetch tools - } - } - } - - const isMcpDeployed = mcpToolDeployments.length > 0 - const mcpDetails: McpDeploymentDetails = { - isDeployed: isMcpDeployed, - servers: mcpToolDeployments, - } - - // Build deployment types list - const deploymentTypes: string[] = [] - if (isApiDeployed) deploymentTypes.push('api') - if (isChatDeployed) deploymentTypes.push('chat') - if (isMcpDeployed) deploymentTypes.push('mcp') - - const isDeployed = isApiDeployed || isChatDeployed || isMcpDeployed - - // Build summary message - let message = '' - if (!isDeployed) { - message = 'Workflow is not deployed' - } else { - const parts: string[] = [] - if (isApiDeployed) parts.push('API') - if (isChatDeployed) parts.push(`Chat (${chatDetails.identifier})`) - if (isMcpDeployed) { - const serverNames = mcpToolDeployments.map((d) => d.serverName).join(', ') - parts.push(`MCP (${serverNames})`) - } - message = `Workflow is deployed as: ${parts.join(', ')}` - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, message, { - isDeployed, - deploymentTypes, - api: apiDetails, - chat: chatDetails, - mcp: mcpDetails, - }) - - logger.info('Checked deployment status', { isDeployed, deploymentTypes }) - } catch (e: any) { - logger.error('Check deployment status failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to check deployment status') - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/create-workspace-mcp-server.ts b/apps/sim/lib/copilot/tools/client/workflow/create-workspace-mcp-server.ts deleted file mode 100644 index f50832184..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/create-workspace-mcp-server.ts +++ /dev/null @@ -1,155 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Plus, Server, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { useCopilotStore } from '@/stores/panel/copilot/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -export interface CreateWorkspaceMcpServerArgs { - /** Name of the MCP server */ - name: string - /** Optional description */ - description?: string - workspaceId?: string -} - -/** - * Create workspace MCP server tool. - * Creates a new MCP server in the workspace that workflows can be deployed to as tools. - */ -export class CreateWorkspaceMcpServerClientTool extends BaseClientTool { - static readonly id = 'create_workspace_mcp_server' - - constructor(toolCallId: string) { - super( - toolCallId, - CreateWorkspaceMcpServerClientTool.id, - CreateWorkspaceMcpServerClientTool.metadata - ) - } - - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - const toolCallsById = useCopilotStore.getState().toolCallsById - const toolCall = toolCallsById[this.toolCallId] - const params = toolCall?.params as CreateWorkspaceMcpServerArgs | undefined - - const serverName = params?.name || 'MCP Server' - - return { - accept: { text: `Create "${serverName}"`, icon: Plus }, - reject: { text: 'Skip', icon: XCircle }, - } - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to create MCP server', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Create MCP server?', icon: Server }, - [ClientToolCallState.executing]: { text: 'Creating MCP server', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Created MCP server', icon: Server }, - [ClientToolCallState.error]: { text: 'Failed to create MCP server', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted creating MCP server', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped creating MCP server', icon: XCircle }, - }, - interrupt: { - accept: { text: 'Create', icon: Plus }, - reject: { text: 'Skip', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const name = params?.name || 'MCP server' - switch (state) { - case ClientToolCallState.success: - return `Created MCP server "${name}"` - case ClientToolCallState.executing: - return `Creating MCP server "${name}"` - case ClientToolCallState.generating: - return `Preparing to create "${name}"` - case ClientToolCallState.pending: - return `Create MCP server "${name}"?` - case ClientToolCallState.error: - return `Failed to create "${name}"` - } - return undefined - }, - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: CreateWorkspaceMcpServerArgs): Promise { - const logger = createLogger('CreateWorkspaceMcpServerClientTool') - try { - if (!args?.name) { - throw new Error('Server name is required') - } - - // Get workspace ID from active workflow if not provided - const { activeWorkflowId, workflows } = useWorkflowRegistry.getState() - let workspaceId = args?.workspaceId - - if (!workspaceId && activeWorkflowId) { - workspaceId = workflows[activeWorkflowId]?.workspaceId - } - - if (!workspaceId) { - throw new Error('No workspace ID available') - } - - this.setState(ClientToolCallState.executing) - - const res = await fetch('/api/mcp/workflow-servers', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - workspaceId, - name: args.name.trim(), - description: args.description?.trim() || null, - }), - }) - - const data = await res.json() - - if (!res.ok) { - throw new Error(data.error || `Failed to create MCP server (${res.status})`) - } - - const server = data.data?.server - if (!server) { - throw new Error('Server creation response missing server data') - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete( - 200, - `MCP server "${args.name}" created successfully. You can now deploy workflows to it using deploy_mcp.`, - { - success: true, - serverId: server.id, - serverName: server.name, - description: server.description, - } - ) - - logger.info(`Created MCP server: ${server.name} (${server.id})`) - } catch (e: any) { - logger.error('Failed to create MCP server', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to create MCP server', { - success: false, - error: e?.message, - }) - } - } - - async execute(args?: CreateWorkspaceMcpServerArgs): Promise { - await this.handleAccept(args) - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/deploy-api.ts b/apps/sim/lib/copilot/tools/client/workflow/deploy-api.ts deleted file mode 100644 index c850dd493..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/deploy-api.ts +++ /dev/null @@ -1,286 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Rocket, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' -import { getBaseUrl } from '@/lib/core/utils/urls' -import { getInputFormatExample } from '@/lib/workflows/operations/deployment-utils' -import { useCopilotStore } from '@/stores/panel/copilot/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface DeployApiArgs { - action: 'deploy' | 'undeploy' - workflowId?: string -} - -/** - * Deploy API tool for deploying workflows as REST APIs. - * This tool handles both deploying and undeploying workflows via the API endpoint. - */ -export class DeployApiClientTool extends BaseClientTool { - static readonly id = 'deploy_api' - - constructor(toolCallId: string) { - super(toolCallId, DeployApiClientTool.id, DeployApiClientTool.metadata) - } - - /** - * Override to provide dynamic button text based on action - */ - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - const toolCallsById = useCopilotStore.getState().toolCallsById - const toolCall = toolCallsById[this.toolCallId] - const params = toolCall?.params as DeployApiArgs | undefined - - const action = params?.action || 'deploy' - - const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId - const isAlreadyDeployed = workflowId - ? useWorkflowRegistry.getState().getWorkflowDeploymentStatus(workflowId)?.isDeployed - : false - - let buttonText = action === 'undeploy' ? 'Undeploy' : 'Deploy' - - if (action === 'deploy' && isAlreadyDeployed) { - buttonText = 'Redeploy' - } - - return { - accept: { text: buttonText, icon: Rocket }, - reject: { text: 'Skip', icon: XCircle }, - } - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to deploy API', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Deploy as API?', icon: Rocket }, - [ClientToolCallState.executing]: { text: 'Deploying API', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Deployed API', icon: Rocket }, - [ClientToolCallState.error]: { text: 'Failed to deploy API', icon: XCircle }, - [ClientToolCallState.aborted]: { - text: 'Aborted deploying API', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped deploying API', - icon: XCircle, - }, - }, - interrupt: { - accept: { text: 'Deploy', icon: Rocket }, - reject: { text: 'Skip', icon: XCircle }, - }, - uiConfig: { - isSpecial: true, - interrupt: { - accept: { text: 'Deploy', icon: Rocket }, - reject: { text: 'Skip', icon: XCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - }, - getDynamicText: (params, state) => { - const action = params?.action === 'undeploy' ? 'undeploy' : 'deploy' - - const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId - const isAlreadyDeployed = workflowId - ? useWorkflowRegistry.getState().getWorkflowDeploymentStatus(workflowId)?.isDeployed - : false - - let actionText = action - let actionTextIng = action === 'undeploy' ? 'undeploying' : 'deploying' - const actionTextPast = action === 'undeploy' ? 'undeployed' : 'deployed' - - if (action === 'deploy' && isAlreadyDeployed) { - actionText = 'redeploy' - actionTextIng = 'redeploying' - } - - const actionCapitalized = actionText.charAt(0).toUpperCase() + actionText.slice(1) - - switch (state) { - case ClientToolCallState.success: - return `API ${actionTextPast}` - case ClientToolCallState.executing: - return `${actionCapitalized}ing API` - case ClientToolCallState.generating: - return `Preparing to ${actionText} API` - case ClientToolCallState.pending: - return `${actionCapitalized} API?` - case ClientToolCallState.error: - return `Failed to ${actionText} API` - case ClientToolCallState.aborted: - return `Aborted ${actionTextIng} API` - case ClientToolCallState.rejected: - return `Skipped ${actionTextIng} API` - } - return undefined - }, - } - - /** - * Checks if the user has any API keys (workspace or personal) - */ - private async hasApiKeys(workspaceId: string): Promise { - try { - const [workspaceRes, personalRes] = await Promise.all([ - fetch(`/api/workspaces/${workspaceId}/api-keys`), - fetch('/api/users/me/api-keys'), - ]) - - if (!workspaceRes.ok || !personalRes.ok) { - return false - } - - const workspaceData = await workspaceRes.json() - const personalData = await personalRes.json() - - const workspaceKeys = (workspaceData?.keys || []) as Array - const personalKeys = (personalData?.keys || []) as Array - - return workspaceKeys.length > 0 || personalKeys.length > 0 - } catch (error) { - const logger = createLogger('DeployApiClientTool') - logger.warn('Failed to check API keys:', error) - return false - } - } - - /** - * Opens the settings modal to the API keys tab - */ - private openApiKeysModal(): void { - window.dispatchEvent(new CustomEvent('open-settings', { detail: { tab: 'apikeys' } })) - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: DeployApiArgs): Promise { - const logger = createLogger('DeployApiClientTool') - try { - const action = args?.action || 'deploy' - const { activeWorkflowId, workflows } = useWorkflowRegistry.getState() - const workflowId = args?.workflowId || activeWorkflowId - - if (!workflowId) { - throw new Error('No workflow ID provided') - } - - const workflow = workflows[workflowId] - const workspaceId = workflow?.workspaceId - - // For deploy action, check if user has API keys first - if (action === 'deploy') { - if (!workspaceId) { - throw new Error('Workflow workspace not found') - } - - const hasKeys = await this.hasApiKeys(workspaceId) - - if (!hasKeys) { - this.setState(ClientToolCallState.rejected) - this.openApiKeysModal() - - await this.markToolComplete( - 200, - 'Cannot deploy without an API key. Opened API key settings so you can create one. Once you have an API key, try deploying again.', - { - needsApiKey: true, - message: - 'You need to create an API key before you can deploy your workflow. The API key settings have been opened for you. After creating an API key, you can deploy your workflow.', - } - ) - return - } - } - - this.setState(ClientToolCallState.executing) - - const endpoint = `/api/workflows/${workflowId}/deploy` - const method = action === 'deploy' ? 'POST' : 'DELETE' - - const res = await fetch(endpoint, { - method, - headers: { 'Content-Type': 'application/json' }, - body: action === 'deploy' ? JSON.stringify({ deployChatEnabled: false }) : undefined, - }) - - if (!res.ok) { - const txt = await res.text().catch(() => '') - throw new Error(txt || `Server error (${res.status})`) - } - - const json = await res.json() - - let successMessage = '' - let resultData: any = { - action, - isDeployed: action === 'deploy', - deployedAt: json.deployedAt, - } - - if (action === 'deploy') { - const appUrl = getBaseUrl() - const apiEndpoint = `${appUrl}/api/workflows/${workflowId}/execute` - const apiKeyPlaceholder = '$SIM_API_KEY' - - const inputExample = getInputFormatExample(false) - const curlCommand = `curl -X POST -H "X-API-Key: ${apiKeyPlaceholder}" -H "Content-Type: application/json"${inputExample} ${apiEndpoint}` - - successMessage = 'Workflow deployed successfully as API. You can now call it via REST.' - - resultData = { - ...resultData, - endpoint: apiEndpoint, - curlCommand, - apiKeyPlaceholder, - } - } else { - successMessage = 'Workflow undeployed successfully.' - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, successMessage, resultData) - - // Refresh the workflow registry to update deployment status - try { - const setDeploymentStatus = useWorkflowRegistry.getState().setDeploymentStatus - if (action === 'deploy') { - setDeploymentStatus( - workflowId, - true, - json.deployedAt ? new Date(json.deployedAt) : undefined, - json.apiKey || '' - ) - } else { - setDeploymentStatus(workflowId, false, undefined, '') - } - const actionPast = action === 'undeploy' ? 'undeployed' : 'deployed' - logger.info(`Workflow ${actionPast} as API and registry updated`) - } catch (error) { - logger.warn('Failed to update workflow registry:', error) - } - } catch (e: any) { - logger.error('Deploy API failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to deploy API') - } - } - - async execute(args?: DeployApiArgs): Promise { - await this.handleAccept(args) - } -} - -// Register UI config at module load -registerToolUIConfig(DeployApiClientTool.id, DeployApiClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/workflow/deploy-chat.ts b/apps/sim/lib/copilot/tools/client/workflow/deploy-chat.ts deleted file mode 100644 index 24ad19a53..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/deploy-chat.ts +++ /dev/null @@ -1,381 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, MessageSquare, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' -import { useCopilotStore } from '@/stores/panel/copilot/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -export type ChatAuthType = 'public' | 'password' | 'email' | 'sso' - -export interface OutputConfig { - blockId: string - path: string -} - -export interface DeployChatArgs { - action: 'deploy' | 'undeploy' - workflowId?: string - /** URL slug for the chat (lowercase letters, numbers, hyphens only) */ - identifier?: string - /** Display title for the chat interface */ - title?: string - /** Optional description */ - description?: string - /** Authentication type: public, password, email, or sso */ - authType?: ChatAuthType - /** Password for password-protected chats */ - password?: string - /** List of allowed emails/domains for email or SSO auth */ - allowedEmails?: string[] - /** Welcome message shown to users */ - welcomeMessage?: string - /** Output configurations specifying which block outputs to display in chat */ - outputConfigs?: OutputConfig[] -} - -/** - * Deploy Chat tool for deploying workflows as chat interfaces. - * This tool handles deploying workflows with chat-specific configuration - * including authentication, customization, and output selection. - */ -export class DeployChatClientTool extends BaseClientTool { - static readonly id = 'deploy_chat' - - constructor(toolCallId: string) { - super(toolCallId, DeployChatClientTool.id, DeployChatClientTool.metadata) - } - - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - const toolCallsById = useCopilotStore.getState().toolCallsById - const toolCall = toolCallsById[this.toolCallId] - const params = toolCall?.params as DeployChatArgs | undefined - - const action = params?.action || 'deploy' - const buttonText = action === 'undeploy' ? 'Undeploy' : 'Deploy Chat' - - return { - accept: { text: buttonText, icon: MessageSquare }, - reject: { text: 'Skip', icon: XCircle }, - } - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to deploy chat', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Deploy as chat?', icon: MessageSquare }, - [ClientToolCallState.executing]: { text: 'Deploying chat', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Deployed chat', icon: MessageSquare }, - [ClientToolCallState.error]: { text: 'Failed to deploy chat', icon: XCircle }, - [ClientToolCallState.aborted]: { - text: 'Aborted deploying chat', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped deploying chat', - icon: XCircle, - }, - }, - interrupt: { - accept: { text: 'Deploy Chat', icon: MessageSquare }, - reject: { text: 'Skip', icon: XCircle }, - }, - uiConfig: { - isSpecial: true, - interrupt: { - accept: { text: 'Deploy Chat', icon: MessageSquare }, - reject: { text: 'Skip', icon: XCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - }, - getDynamicText: (params, state) => { - const action = params?.action === 'undeploy' ? 'undeploy' : 'deploy' - - switch (state) { - case ClientToolCallState.success: - return action === 'undeploy' ? 'Chat undeployed' : 'Chat deployed' - case ClientToolCallState.executing: - return action === 'undeploy' ? 'Undeploying chat' : 'Deploying chat' - case ClientToolCallState.generating: - return `Preparing to ${action} chat` - case ClientToolCallState.pending: - return action === 'undeploy' ? 'Undeploy chat?' : 'Deploy as chat?' - case ClientToolCallState.error: - return `Failed to ${action} chat` - case ClientToolCallState.aborted: - return action === 'undeploy' ? 'Aborted undeploying chat' : 'Aborted deploying chat' - case ClientToolCallState.rejected: - return action === 'undeploy' ? 'Skipped undeploying chat' : 'Skipped deploying chat' - } - return undefined - }, - } - - /** - * Generates a default identifier from the workflow name - */ - private generateIdentifier(workflowName: string): string { - return workflowName - .toLowerCase() - .replace(/[^a-z0-9]+/g, '-') - .replace(/^-|-$/g, '') - .substring(0, 50) - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: DeployChatArgs): Promise { - const logger = createLogger('DeployChatClientTool') - try { - const action = args?.action || 'deploy' - const { activeWorkflowId, workflows } = useWorkflowRegistry.getState() - const workflowId = args?.workflowId || activeWorkflowId - - if (!workflowId) { - throw new Error('No workflow ID provided') - } - - const workflow = workflows[workflowId] - - // Handle undeploy action - if (action === 'undeploy') { - this.setState(ClientToolCallState.executing) - - // First get the chat deployment ID - const statusRes = await fetch(`/api/workflows/${workflowId}/chat/status`) - if (!statusRes.ok) { - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, 'Failed to check chat deployment status', { - success: false, - action: 'undeploy', - isDeployed: false, - error: 'Failed to check chat deployment status', - errorCode: 'SERVER_ERROR', - }) - return - } - - const statusJson = await statusRes.json() - if (!statusJson.isDeployed || !statusJson.deployment?.id) { - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, 'No active chat deployment found for this workflow', { - success: false, - action: 'undeploy', - isDeployed: false, - error: 'No active chat deployment found for this workflow', - errorCode: 'VALIDATION_ERROR', - }) - return - } - - const chatId = statusJson.deployment.id - - // Delete the chat deployment - const res = await fetch(`/api/chat/manage/${chatId}`, { - method: 'DELETE', - headers: { 'Content-Type': 'application/json' }, - }) - - if (!res.ok) { - const txt = await res.text().catch(() => '') - this.setState(ClientToolCallState.error) - await this.markToolComplete(res.status, txt || `Server error (${res.status})`, { - success: false, - action: 'undeploy', - isDeployed: true, - error: txt || 'Failed to undeploy chat', - errorCode: 'SERVER_ERROR', - }) - return - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Chat deployment removed successfully.', { - success: true, - action: 'undeploy', - isDeployed: false, - }) - return - } - - this.setState(ClientToolCallState.executing) - - const statusRes = await fetch(`/api/workflows/${workflowId}/chat/status`) - const statusJson = statusRes.ok ? await statusRes.json() : null - const existingDeployment = statusJson?.deployment || null - - const baseIdentifier = - existingDeployment?.identifier || this.generateIdentifier(workflow?.name || 'chat') - const baseTitle = existingDeployment?.title || workflow?.name || 'Chat' - const baseDescription = existingDeployment?.description || '' - const baseAuthType = existingDeployment?.authType || 'public' - const baseWelcomeMessage = - existingDeployment?.customizations?.welcomeMessage || 'Hi there! How can I help you today?' - const basePrimaryColor = - existingDeployment?.customizations?.primaryColor || 'var(--brand-primary-hover-hex)' - const baseAllowedEmails = Array.isArray(existingDeployment?.allowedEmails) - ? existingDeployment.allowedEmails - : [] - const baseOutputConfigs = Array.isArray(existingDeployment?.outputConfigs) - ? existingDeployment.outputConfigs - : [] - - const identifier = args?.identifier || baseIdentifier - const title = args?.title || baseTitle - const description = args?.description ?? baseDescription - const authType = args?.authType || baseAuthType - const welcomeMessage = args?.welcomeMessage || baseWelcomeMessage - const outputConfigs = args?.outputConfigs || baseOutputConfigs - const allowedEmails = args?.allowedEmails || baseAllowedEmails - const primaryColor = basePrimaryColor - - if (!identifier || !title) { - throw new Error('Chat identifier and title are required') - } - - if (authType === 'password' && !args?.password && !existingDeployment?.hasPassword) { - throw new Error('Password is required when using password protection') - } - - if ((authType === 'email' || authType === 'sso') && allowedEmails.length === 0) { - throw new Error(`At least one email or domain is required when using ${authType} access`) - } - - const payload = { - workflowId, - identifier: identifier.trim(), - title: title.trim(), - description: description.trim(), - customizations: { - primaryColor, - welcomeMessage: welcomeMessage.trim(), - }, - authType, - password: authType === 'password' ? args?.password : undefined, - allowedEmails: authType === 'email' || authType === 'sso' ? allowedEmails : [], - outputConfigs, - } - - const isUpdating = Boolean(existingDeployment?.id) - const endpoint = isUpdating ? `/api/chat/manage/${existingDeployment.id}` : '/api/chat' - const method = isUpdating ? 'PATCH' : 'POST' - - const res = await fetch(endpoint, { - method, - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify(payload), - }) - - const json = await res.json() - - if (!res.ok) { - if (json.error === 'Identifier already in use') { - this.setState(ClientToolCallState.error) - await this.markToolComplete( - 400, - `The identifier "${identifier}" is already in use. Please choose a different one.`, - { - success: false, - action: 'deploy', - isDeployed: false, - identifier, - error: `Identifier "${identifier}" is already taken`, - errorCode: 'IDENTIFIER_TAKEN', - } - ) - return - } - - // Handle validation errors - if (json.code === 'VALIDATION_ERROR') { - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, json.error || 'Validation error', { - success: false, - action: 'deploy', - isDeployed: false, - error: json.error, - errorCode: 'VALIDATION_ERROR', - }) - return - } - - this.setState(ClientToolCallState.error) - await this.markToolComplete(res.status, json.error || 'Failed to deploy chat', { - success: false, - action: 'deploy', - isDeployed: false, - error: json.error || 'Server error', - errorCode: 'SERVER_ERROR', - }) - return - } - - if (!json.chatUrl) { - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, 'Response missing chat URL', { - success: false, - action: 'deploy', - isDeployed: false, - error: 'Response missing chat URL', - errorCode: 'SERVER_ERROR', - }) - return - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete( - 200, - `Chat deployed successfully! Available at: ${json.chatUrl}`, - { - success: true, - action: 'deploy', - isDeployed: true, - chatId: json.id, - chatUrl: json.chatUrl, - identifier, - title, - authType, - } - ) - - // Update the workflow registry to reflect deployment status - // Chat deployment also deploys the API, so we update the registry - try { - const setDeploymentStatus = useWorkflowRegistry.getState().setDeploymentStatus - setDeploymentStatus(workflowId, true, new Date(), '') - logger.info('Workflow deployment status updated in registry') - } catch (error) { - logger.warn('Failed to update workflow registry:', error) - } - - logger.info('Chat deployed successfully:', json.chatUrl) - } catch (e: any) { - logger.error('Deploy chat failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to deploy chat', { - success: false, - action: 'deploy', - isDeployed: false, - error: e?.message || 'Failed to deploy chat', - errorCode: 'SERVER_ERROR', - }) - } - } - - async execute(args?: DeployChatArgs): Promise { - await this.handleAccept(args) - } -} - -// Register UI config at module load -registerToolUIConfig(DeployChatClientTool.id, DeployChatClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/workflow/deploy-mcp.ts b/apps/sim/lib/copilot/tools/client/workflow/deploy-mcp.ts deleted file mode 100644 index bcd87fc25..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/deploy-mcp.ts +++ /dev/null @@ -1,250 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Server, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -export interface ParameterDescription { - name: string - description: string -} - -export interface DeployMcpArgs { - /** The MCP server ID to deploy to (get from list_workspace_mcp_servers) */ - serverId: string - /** Optional workflow ID (defaults to active workflow) */ - workflowId?: string - /** Custom tool name (defaults to workflow name) */ - toolName?: string - /** Custom tool description */ - toolDescription?: string - /** Parameter descriptions to include in the schema */ - parameterDescriptions?: ParameterDescription[] -} - -/** - * Deploy MCP tool. - * Deploys the workflow as an MCP tool to a workspace MCP server. - */ -export class DeployMcpClientTool extends BaseClientTool { - static readonly id = 'deploy_mcp' - - constructor(toolCallId: string) { - super(toolCallId, DeployMcpClientTool.id, DeployMcpClientTool.metadata) - } - - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - return { - accept: { text: 'Deploy to MCP', icon: Server }, - reject: { text: 'Skip', icon: XCircle }, - } - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to deploy to MCP', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Deploy to MCP server?', icon: Server }, - [ClientToolCallState.executing]: { text: 'Deploying to MCP', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Deployed to MCP', icon: Server }, - [ClientToolCallState.error]: { text: 'Failed to deploy to MCP', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted MCP deployment', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped MCP deployment', icon: XCircle }, - }, - interrupt: { - accept: { text: 'Deploy', icon: Server }, - reject: { text: 'Skip', icon: XCircle }, - }, - uiConfig: { - isSpecial: true, - interrupt: { - accept: { text: 'Deploy', icon: Server }, - reject: { text: 'Skip', icon: XCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - }, - getDynamicText: (params, state) => { - const toolName = params?.toolName || 'workflow' - switch (state) { - case ClientToolCallState.success: - return `Deployed "${toolName}" to MCP` - case ClientToolCallState.executing: - return `Deploying "${toolName}" to MCP` - case ClientToolCallState.generating: - return `Preparing to deploy to MCP` - case ClientToolCallState.pending: - return `Deploy "${toolName}" to MCP?` - case ClientToolCallState.error: - return `Failed to deploy to MCP` - } - return undefined - }, - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: DeployMcpArgs): Promise { - const logger = createLogger('DeployMcpClientTool') - try { - if (!args?.serverId) { - throw new Error( - 'Server ID is required. Use list_workspace_mcp_servers to get available servers.' - ) - } - - const { activeWorkflowId, workflows } = useWorkflowRegistry.getState() - const workflowId = args?.workflowId || activeWorkflowId - - if (!workflowId) { - throw new Error('No workflow ID available') - } - - const workflow = workflows[workflowId] - const workspaceId = workflow?.workspaceId - - if (!workspaceId) { - throw new Error('Workflow workspace not found') - } - - // Check if workflow is deployed - const deploymentStatus = useWorkflowRegistry - .getState() - .getWorkflowDeploymentStatus(workflowId) - if (!deploymentStatus?.isDeployed) { - throw new Error( - 'Workflow must be deployed before adding as an MCP tool. Use deploy_api first.' - ) - } - - this.setState(ClientToolCallState.executing) - - let parameterSchema: Record | undefined - if (args?.parameterDescriptions && args.parameterDescriptions.length > 0) { - const properties: Record = {} - for (const param of args.parameterDescriptions) { - properties[param.name] = { description: param.description } - } - parameterSchema = { properties } - } - - const res = await fetch( - `/api/mcp/workflow-servers/${args.serverId}/tools?workspaceId=${workspaceId}`, - { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - workflowId, - toolName: args.toolName?.trim(), - toolDescription: args.toolDescription?.trim(), - parameterSchema, - }), - } - ) - - const data = await res.json() - - if (!res.ok) { - if (data.error?.includes('already added')) { - const toolsRes = await fetch( - `/api/mcp/workflow-servers/${args.serverId}/tools?workspaceId=${workspaceId}` - ) - const toolsJson = toolsRes.ok ? await toolsRes.json() : null - const tools = toolsJson?.data?.tools || [] - const existingTool = tools.find((tool: any) => tool.workflowId === workflowId) - if (!existingTool?.id) { - throw new Error('This workflow is already deployed to this MCP server') - } - const patchRes = await fetch( - `/api/mcp/workflow-servers/${args.serverId}/tools/${existingTool.id}?workspaceId=${workspaceId}`, - { - method: 'PATCH', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - toolName: args.toolName?.trim(), - toolDescription: args.toolDescription?.trim(), - parameterSchema, - }), - } - ) - const patchJson = patchRes.ok ? await patchRes.json() : null - if (!patchRes.ok) { - const patchError = patchJson?.error || `Failed to update MCP tool (${patchRes.status})` - throw new Error(patchError) - } - const updatedTool = patchJson?.data?.tool - this.setState(ClientToolCallState.success) - await this.markToolComplete( - 200, - `Workflow MCP tool updated to "${updatedTool?.toolName || existingTool.toolName}".`, - { - success: true, - toolId: updatedTool?.id || existingTool.id, - toolName: updatedTool?.toolName || existingTool.toolName, - toolDescription: updatedTool?.toolDescription || existingTool.toolDescription, - serverId: args.serverId, - updated: true, - } - ) - logger.info('Updated workflow MCP tool', { toolId: existingTool.id }) - return - } - if (data.error?.includes('not deployed')) { - throw new Error('Workflow must be deployed before adding as an MCP tool') - } - if (data.error?.includes('Start block')) { - throw new Error('Workflow must have a Start block to be used as an MCP tool') - } - if (data.error?.includes('Server not found')) { - throw new Error( - 'MCP server not found. Use list_workspace_mcp_servers to see available servers.' - ) - } - throw new Error(data.error || `Failed to deploy to MCP (${res.status})`) - } - - const tool = data.data?.tool - if (!tool) { - throw new Error('Response missing tool data') - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete( - 200, - `Workflow deployed as MCP tool "${tool.toolName}" to server.`, - { - success: true, - toolId: tool.id, - toolName: tool.toolName, - toolDescription: tool.toolDescription, - serverId: args.serverId, - } - ) - - logger.info(`Deployed workflow as MCP tool: ${tool.toolName}`) - } catch (e: any) { - logger.error('Failed to deploy to MCP', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to deploy to MCP', { - success: false, - error: e?.message, - }) - } - } - - async execute(args?: DeployMcpArgs): Promise { - await this.handleAccept(args) - } -} - -// Register UI config at module load -registerToolUIConfig(DeployMcpClientTool.id, DeployMcpClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/workflow/edit-workflow.ts b/apps/sim/lib/copilot/tools/client/workflow/edit-workflow.ts deleted file mode 100644 index 55ffdaa93..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/edit-workflow.ts +++ /dev/null @@ -1,426 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Grid2x2, Grid2x2Check, Grid2x2X, Loader2, MinusCircle, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' -import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas' -import { stripWorkflowDiffMarkers } from '@/lib/workflows/diff' -import { sanitizeForCopilot } from '@/lib/workflows/sanitization/json-sanitizer' -import { useWorkflowDiffStore } from '@/stores/workflow-diff/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' -import { mergeSubblockState } from '@/stores/workflows/utils' -import { useWorkflowStore } from '@/stores/workflows/workflow/store' -import type { WorkflowState } from '@/stores/workflows/workflow/types' - -interface EditWorkflowOperation { - operation_type: 'add' | 'edit' | 'delete' - block_id: string - params?: Record -} - -interface EditWorkflowArgs { - operations: EditWorkflowOperation[] - workflowId: string - currentUserWorkflow?: string -} - -export class EditWorkflowClientTool extends BaseClientTool { - static readonly id = 'edit_workflow' - private lastResult: any | undefined - private hasExecuted = false - private hasAppliedDiff = false - private workflowId: string | undefined - - constructor(toolCallId: string) { - super(toolCallId, EditWorkflowClientTool.id, EditWorkflowClientTool.metadata) - } - - async markToolComplete(status: number, message?: any, data?: any): Promise { - const logger = createLogger('EditWorkflowClientTool') - logger.info('markToolComplete payload', { - toolCallId: this.toolCallId, - toolName: this.name, - status, - message, - data, - }) - return super.markToolComplete(status, message, data) - } - - /** - * Get sanitized workflow JSON from a workflow state, merge subblocks, and sanitize for copilot - * This matches what get_user_workflow returns - */ - private getSanitizedWorkflowJson(workflowState: any): string | undefined { - const logger = createLogger('EditWorkflowClientTool') - - if (!this.workflowId) { - logger.warn('No workflowId available for getting sanitized workflow JSON') - return undefined - } - - if (!workflowState) { - logger.warn('No workflow state provided') - return undefined - } - - try { - // Normalize required properties - if (!workflowState.loops) workflowState.loops = {} - if (!workflowState.parallels) workflowState.parallels = {} - if (!workflowState.edges) workflowState.edges = [] - if (!workflowState.blocks) workflowState.blocks = {} - - // Merge latest subblock values so edits are reflected - let mergedState = workflowState - if (workflowState.blocks) { - mergedState = { - ...workflowState, - blocks: mergeSubblockState(workflowState.blocks, this.workflowId as any), - } - logger.info('Merged subblock values into workflow state', { - workflowId: this.workflowId, - blockCount: Object.keys(mergedState.blocks || {}).length, - }) - } - - // Sanitize workflow state for copilot (remove UI-specific data) - const sanitizedState = sanitizeForCopilot(mergedState) - - // Convert to JSON string for transport - const workflowJson = JSON.stringify(sanitizedState, null, 2) - logger.info('Successfully created sanitized workflow JSON', { - workflowId: this.workflowId, - jsonLength: workflowJson.length, - }) - - return workflowJson - } catch (error) { - logger.error('Failed to get sanitized workflow JSON', { - error: error instanceof Error ? error.message : String(error), - }) - return undefined - } - } - - /** - * Safely get the current workflow JSON sanitized for copilot without throwing. - * Used to ensure we always include workflow state in markComplete. - */ - private getCurrentWorkflowJsonSafe(logger: ReturnType): string | undefined { - try { - const currentState = useWorkflowStore.getState().getWorkflowState() - if (!currentState) { - logger.warn('No current workflow state available') - return undefined - } - return this.getSanitizedWorkflowJson(currentState) - } catch (error) { - logger.warn('Failed to get current workflow JSON safely', { - error: error instanceof Error ? error.message : String(error), - }) - return undefined - } - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Editing your workflow', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Editing your workflow', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Edited your workflow', icon: Grid2x2Check }, - [ClientToolCallState.error]: { text: 'Failed to edit your workflow', icon: XCircle }, - [ClientToolCallState.review]: { text: 'Review your workflow changes', icon: Grid2x2 }, - [ClientToolCallState.rejected]: { text: 'Rejected workflow changes', icon: Grid2x2X }, - [ClientToolCallState.aborted]: { text: 'Aborted editing your workflow', icon: MinusCircle }, - [ClientToolCallState.pending]: { text: 'Editing your workflow', icon: Loader2 }, - }, - uiConfig: { - isSpecial: true, - customRenderer: 'edit_summary', - }, - getDynamicText: (params, state) => { - const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId - if (workflowId) { - const workflowName = useWorkflowRegistry.getState().workflows[workflowId]?.name - if (workflowName) { - switch (state) { - case ClientToolCallState.success: - return `Edited ${workflowName}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Editing ${workflowName}` - case ClientToolCallState.error: - return `Failed to edit ${workflowName}` - case ClientToolCallState.review: - return `Review changes to ${workflowName}` - case ClientToolCallState.rejected: - return `Rejected changes to ${workflowName}` - case ClientToolCallState.aborted: - return `Aborted editing ${workflowName}` - } - } - } - return undefined - }, - } - - async handleAccept(): Promise { - const logger = createLogger('EditWorkflowClientTool') - logger.info('handleAccept called', { toolCallId: this.toolCallId, state: this.getState() }) - // Tool was already marked complete in execute() - this is just for UI state - this.setState(ClientToolCallState.success) - } - - async handleReject(): Promise { - const logger = createLogger('EditWorkflowClientTool') - logger.info('handleReject called', { toolCallId: this.toolCallId, state: this.getState() }) - // Tool was already marked complete in execute() - this is just for UI state - this.setState(ClientToolCallState.rejected) - } - - async execute(args?: EditWorkflowArgs): Promise { - const logger = createLogger('EditWorkflowClientTool') - - if (this.hasExecuted) { - logger.info('execute skipped (already executed)', { toolCallId: this.toolCallId }) - return - } - - // Use timeout protection to ensure tool always completes - await this.executeWithTimeout(async () => { - this.hasExecuted = true - logger.info('execute called', { toolCallId: this.toolCallId, argsProvided: !!args }) - this.setState(ClientToolCallState.executing) - - // Resolve workflowId - let workflowId = args?.workflowId - if (!workflowId) { - const { activeWorkflowId } = useWorkflowRegistry.getState() - workflowId = activeWorkflowId as any - } - if (!workflowId) { - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, 'No active workflow found') - return - } - - // Store workflowId for later use - this.workflowId = workflowId - - // Validate operations - const operations = args?.operations || [] - if (!operations.length) { - this.setState(ClientToolCallState.error) - const currentWorkflowJson = this.getCurrentWorkflowJsonSafe(logger) - await this.markToolComplete( - 400, - 'No operations provided for edit_workflow', - currentWorkflowJson ? { userWorkflow: currentWorkflowJson } : undefined - ) - return - } - - // Prepare currentUserWorkflow JSON from stores to preserve block IDs - let currentUserWorkflow = args?.currentUserWorkflow - - if (!currentUserWorkflow) { - try { - const workflowStore = useWorkflowStore.getState() - const fullState = workflowStore.getWorkflowState() - const mergedBlocks = mergeSubblockState(fullState.blocks, workflowId as any) - const payloadState = stripWorkflowDiffMarkers({ - ...fullState, - blocks: mergedBlocks, - edges: fullState.edges || [], - loops: fullState.loops || {}, - parallels: fullState.parallels || {}, - }) - currentUserWorkflow = JSON.stringify(payloadState) - } catch (error) { - logger.warn('Failed to build currentUserWorkflow from stores; proceeding without it', { - error, - }) - } - } - - // Fetch with AbortController for timeout support - const controller = new AbortController() - const fetchTimeout = setTimeout(() => controller.abort(), 60000) // 60s fetch timeout - - try { - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - toolName: 'edit_workflow', - payload: { - operations, - workflowId, - ...(currentUserWorkflow ? { currentUserWorkflow } : {}), - }, - }), - signal: controller.signal, - }) - - clearTimeout(fetchTimeout) - - if (!res.ok) { - const errorText = await res.text().catch(() => '') - let errorMessage: string - try { - const errorJson = JSON.parse(errorText) - errorMessage = errorJson.error || errorText || `Server error (${res.status})` - } catch { - errorMessage = errorText || `Server error (${res.status})` - } - // Mark complete with error but include current workflow state - this.setState(ClientToolCallState.error) - const currentWorkflowJson = this.getCurrentWorkflowJsonSafe(logger) - await this.markToolComplete( - res.status, - errorMessage, - currentWorkflowJson ? { userWorkflow: currentWorkflowJson } : undefined - ) - return - } - - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - const result = parsed.result as any - this.lastResult = result - logger.info('server result parsed', { - hasWorkflowState: !!result?.workflowState, - blocksCount: result?.workflowState - ? Object.keys(result.workflowState.blocks || {}).length - : 0, - hasSkippedItems: !!result?.skippedItems, - skippedItemsCount: result?.skippedItems?.length || 0, - hasInputValidationErrors: !!result?.inputValidationErrors, - inputValidationErrorsCount: result?.inputValidationErrors?.length || 0, - }) - - // Log skipped items and validation errors for visibility - if (result?.skippedItems?.length > 0) { - logger.warn('Some operations were skipped during edit_workflow', { - skippedItems: result.skippedItems, - }) - } - if (result?.inputValidationErrors?.length > 0) { - logger.warn('Some inputs were rejected during edit_workflow', { - inputValidationErrors: result.inputValidationErrors, - }) - } - - // Update diff directly with workflow state - no YAML conversion needed! - if (!result.workflowState) { - this.setState(ClientToolCallState.error) - const currentWorkflowJson = this.getCurrentWorkflowJsonSafe(logger) - await this.markToolComplete( - 500, - 'No workflow state returned from server', - currentWorkflowJson ? { userWorkflow: currentWorkflowJson } : undefined - ) - return - } - - let actualDiffWorkflow: WorkflowState | null = null - - if (!this.hasAppliedDiff) { - const diffStore = useWorkflowDiffStore.getState() - // setProposedChanges applies the state optimistically to the workflow store - await diffStore.setProposedChanges(result.workflowState) - logger.info('diff proposed changes set for edit_workflow with direct workflow state') - this.hasAppliedDiff = true - } - - // Read back the applied state from the workflow store - const workflowStore = useWorkflowStore.getState() - actualDiffWorkflow = workflowStore.getWorkflowState() - - if (!actualDiffWorkflow) { - this.setState(ClientToolCallState.error) - const currentWorkflowJson = this.getCurrentWorkflowJsonSafe(logger) - await this.markToolComplete( - 500, - 'Failed to retrieve workflow state after applying changes', - currentWorkflowJson ? { userWorkflow: currentWorkflowJson } : undefined - ) - return - } - - // Get the workflow state that was just applied, merge subblocks, and sanitize - // This matches what get_user_workflow would return (the true state after edits were applied) - let workflowJson = this.getSanitizedWorkflowJson(actualDiffWorkflow) - - // Fallback: try to get current workflow state if sanitization failed - if (!workflowJson) { - workflowJson = this.getCurrentWorkflowJsonSafe(logger) - } - - // userWorkflow must always be present on success - log error if missing - if (!workflowJson) { - logger.error('Failed to get workflow JSON on success path - this should not happen', { - toolCallId: this.toolCallId, - workflowId: this.workflowId, - }) - } - - // Build sanitized data including workflow JSON and any skipped/validation info - // Always include userWorkflow on success paths - const sanitizedData: Record = { - userWorkflow: workflowJson ?? '{}', // Fallback to empty object JSON if all else fails - } - - // Include skipped items and validation errors in the response for LLM feedback - if (result?.skippedItems?.length > 0) { - sanitizedData.skippedItems = result.skippedItems - sanitizedData.skippedItemsMessage = result.skippedItemsMessage - } - if (result?.inputValidationErrors?.length > 0) { - sanitizedData.inputValidationErrors = result.inputValidationErrors - sanitizedData.inputValidationMessage = result.inputValidationMessage - } - - // Build a message that includes info about skipped items - let completeMessage = 'Workflow diff ready for review' - if (result?.skippedItems?.length > 0 || result?.inputValidationErrors?.length > 0) { - const parts: string[] = [] - if (result?.skippedItems?.length > 0) { - parts.push(`${result.skippedItems.length} operation(s) skipped`) - } - if (result?.inputValidationErrors?.length > 0) { - parts.push(`${result.inputValidationErrors.length} input(s) rejected`) - } - completeMessage = `Workflow diff ready for review. Note: ${parts.join(', ')}.` - } - - // Mark complete early to unblock LLM stream - sanitizedData always has userWorkflow - await this.markToolComplete(200, completeMessage, sanitizedData) - - // Move into review state - this.setState(ClientToolCallState.review, { result }) - } catch (fetchError: any) { - clearTimeout(fetchTimeout) - // Handle error with current workflow state - this.setState(ClientToolCallState.error) - const currentWorkflowJson = this.getCurrentWorkflowJsonSafe(logger) - const errorMessage = - fetchError.name === 'AbortError' - ? 'Server request timed out' - : fetchError.message || String(fetchError) - await this.markToolComplete( - 500, - errorMessage, - currentWorkflowJson ? { userWorkflow: currentWorkflowJson } : undefined - ) - } - }) - } -} - -// Register UI config at module load -registerToolUIConfig(EditWorkflowClientTool.id, EditWorkflowClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/workflow/get-block-outputs.ts b/apps/sim/lib/copilot/tools/client/workflow/get-block-outputs.ts deleted file mode 100644 index d835678d3..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/get-block-outputs.ts +++ /dev/null @@ -1,144 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Tag, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { - computeBlockOutputPaths, - formatOutputsWithPrefix, - getSubflowInsidePaths, - getWorkflowSubBlockValues, - getWorkflowVariables, -} from '@/lib/copilot/tools/client/workflow/block-output-utils' -import { - GetBlockOutputsResult, - type GetBlockOutputsResultType, -} from '@/lib/copilot/tools/shared/schemas' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' -import { useWorkflowStore } from '@/stores/workflows/workflow/store' - -const logger = createLogger('GetBlockOutputsClientTool') - -interface GetBlockOutputsArgs { - blockIds?: string[] -} - -export class GetBlockOutputsClientTool extends BaseClientTool { - static readonly id = 'get_block_outputs' - - constructor(toolCallId: string) { - super(toolCallId, GetBlockOutputsClientTool.id, GetBlockOutputsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting block outputs', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting block outputs', icon: Tag }, - [ClientToolCallState.executing]: { text: 'Getting block outputs', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted getting outputs', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Retrieved block outputs', icon: Tag }, - [ClientToolCallState.error]: { text: 'Failed to get outputs', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped getting outputs', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const blockIds = params?.blockIds - if (blockIds && Array.isArray(blockIds) && blockIds.length > 0) { - const count = blockIds.length - switch (state) { - case ClientToolCallState.success: - return `Retrieved outputs for ${count} block${count > 1 ? 's' : ''}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Getting outputs for ${count} block${count > 1 ? 's' : ''}` - case ClientToolCallState.error: - return `Failed to get outputs for ${count} block${count > 1 ? 's' : ''}` - } - } - return undefined - }, - } - - async execute(args?: GetBlockOutputsArgs): Promise { - try { - this.setState(ClientToolCallState.executing) - - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (!activeWorkflowId) { - await this.markToolComplete(400, 'No active workflow found') - this.setState(ClientToolCallState.error) - return - } - - const workflowStore = useWorkflowStore.getState() - const blocks = workflowStore.blocks || {} - const loops = workflowStore.loops || {} - const parallels = workflowStore.parallels || {} - const subBlockValues = getWorkflowSubBlockValues(activeWorkflowId) - - const ctx = { workflowId: activeWorkflowId, blocks, loops, parallels, subBlockValues } - const targetBlockIds = - args?.blockIds && args.blockIds.length > 0 ? args.blockIds : Object.keys(blocks) - - const blockOutputs: GetBlockOutputsResultType['blocks'] = [] - - for (const blockId of targetBlockIds) { - const block = blocks[blockId] - if (!block?.type) continue - - const blockName = block.name || block.type - - const blockOutput: GetBlockOutputsResultType['blocks'][0] = { - blockId, - blockName, - blockType: block.type, - outputs: [], - } - - // Include triggerMode if the block is in trigger mode - if (block.triggerMode) { - blockOutput.triggerMode = true - } - - if (block.type === 'loop' || block.type === 'parallel') { - const insidePaths = getSubflowInsidePaths(block.type, blockId, loops, parallels) - blockOutput.insideSubflowOutputs = formatOutputsWithPrefix(insidePaths, blockName) - blockOutput.outsideSubflowOutputs = formatOutputsWithPrefix(['results'], blockName) - } else { - const outputPaths = computeBlockOutputPaths(block, ctx) - blockOutput.outputs = formatOutputsWithPrefix(outputPaths, blockName) - } - - blockOutputs.push(blockOutput) - } - - const includeVariables = !args?.blockIds || args.blockIds.length === 0 - const resultData: { - blocks: typeof blockOutputs - variables?: ReturnType - } = { - blocks: blockOutputs, - } - if (includeVariables) { - resultData.variables = getWorkflowVariables(activeWorkflowId) - } - - const result = GetBlockOutputsResult.parse(resultData) - - logger.info('Retrieved block outputs', { - blockCount: blockOutputs.length, - variableCount: resultData.variables?.length ?? 0, - }) - - await this.markToolComplete(200, 'Retrieved block outputs', result) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - logger.error('Error in tool execution', { toolCallId: this.toolCallId, error, message }) - await this.markToolComplete(500, message || 'Failed to get block outputs') - this.setState(ClientToolCallState.error) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/get-block-upstream-references.ts b/apps/sim/lib/copilot/tools/client/workflow/get-block-upstream-references.ts deleted file mode 100644 index f02c9958c..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/get-block-upstream-references.ts +++ /dev/null @@ -1,231 +0,0 @@ -import { createLogger } from '@sim/logger' -import { GitBranch, Loader2, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { - computeBlockOutputPaths, - formatOutputsWithPrefix, - getSubflowInsidePaths, - getWorkflowSubBlockValues, - getWorkflowVariables, -} from '@/lib/copilot/tools/client/workflow/block-output-utils' -import { - GetBlockUpstreamReferencesResult, - type GetBlockUpstreamReferencesResultType, -} from '@/lib/copilot/tools/shared/schemas' -import { BlockPathCalculator } from '@/lib/workflows/blocks/block-path-calculator' -import { isInputDefinitionTrigger } from '@/lib/workflows/triggers/input-definition-triggers' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' -import { useWorkflowStore } from '@/stores/workflows/workflow/store' -import type { Loop, Parallel } from '@/stores/workflows/workflow/types' - -const logger = createLogger('GetBlockUpstreamReferencesClientTool') - -interface GetBlockUpstreamReferencesArgs { - blockIds: string[] -} - -export class GetBlockUpstreamReferencesClientTool extends BaseClientTool { - static readonly id = 'get_block_upstream_references' - - constructor(toolCallId: string) { - super( - toolCallId, - GetBlockUpstreamReferencesClientTool.id, - GetBlockUpstreamReferencesClientTool.metadata - ) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Getting upstream references', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Getting upstream references', icon: GitBranch }, - [ClientToolCallState.executing]: { text: 'Getting upstream references', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted getting references', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Retrieved upstream references', icon: GitBranch }, - [ClientToolCallState.error]: { text: 'Failed to get references', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped getting references', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const blockIds = params?.blockIds - if (blockIds && Array.isArray(blockIds) && blockIds.length > 0) { - const count = blockIds.length - switch (state) { - case ClientToolCallState.success: - return `Retrieved references for ${count} block${count > 1 ? 's' : ''}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Getting references for ${count} block${count > 1 ? 's' : ''}` - case ClientToolCallState.error: - return `Failed to get references for ${count} block${count > 1 ? 's' : ''}` - } - } - return undefined - }, - } - - async execute(args?: GetBlockUpstreamReferencesArgs): Promise { - try { - this.setState(ClientToolCallState.executing) - - if (!args?.blockIds || args.blockIds.length === 0) { - await this.markToolComplete(400, 'blockIds array is required') - this.setState(ClientToolCallState.error) - return - } - - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (!activeWorkflowId) { - await this.markToolComplete(400, 'No active workflow found') - this.setState(ClientToolCallState.error) - return - } - - const workflowStore = useWorkflowStore.getState() - const blocks = workflowStore.blocks || {} - const edges = workflowStore.edges || [] - const loops = workflowStore.loops || {} - const parallels = workflowStore.parallels || {} - const subBlockValues = getWorkflowSubBlockValues(activeWorkflowId) - - const ctx = { workflowId: activeWorkflowId, blocks, loops, parallels, subBlockValues } - const variableOutputs = getWorkflowVariables(activeWorkflowId) - const graphEdges = edges.map((edge) => ({ source: edge.source, target: edge.target })) - - const results: GetBlockUpstreamReferencesResultType['results'] = [] - - for (const blockId of args.blockIds) { - const targetBlock = blocks[blockId] - if (!targetBlock) { - logger.warn(`Block ${blockId} not found`) - continue - } - - const insideSubflows: { blockId: string; blockName: string; blockType: string }[] = [] - const containingLoopIds = new Set() - const containingParallelIds = new Set() - - Object.values(loops as Record).forEach((loop) => { - if (loop?.nodes?.includes(blockId)) { - containingLoopIds.add(loop.id) - const loopBlock = blocks[loop.id] - if (loopBlock) { - insideSubflows.push({ - blockId: loop.id, - blockName: loopBlock.name || loopBlock.type, - blockType: 'loop', - }) - } - } - }) - - Object.values(parallels as Record).forEach((parallel) => { - if (parallel?.nodes?.includes(blockId)) { - containingParallelIds.add(parallel.id) - const parallelBlock = blocks[parallel.id] - if (parallelBlock) { - insideSubflows.push({ - blockId: parallel.id, - blockName: parallelBlock.name || parallelBlock.type, - blockType: 'parallel', - }) - } - } - }) - - const ancestorIds = BlockPathCalculator.findAllPathNodes(graphEdges, blockId) - const accessibleIds = new Set(ancestorIds) - accessibleIds.add(blockId) - - const starterBlock = Object.values(blocks).find((b) => isInputDefinitionTrigger(b.type)) - if (starterBlock && ancestorIds.includes(starterBlock.id)) { - accessibleIds.add(starterBlock.id) - } - - containingLoopIds.forEach((loopId) => { - accessibleIds.add(loopId) - loops[loopId]?.nodes?.forEach((nodeId) => accessibleIds.add(nodeId)) - }) - - containingParallelIds.forEach((parallelId) => { - accessibleIds.add(parallelId) - parallels[parallelId]?.nodes?.forEach((nodeId) => accessibleIds.add(nodeId)) - }) - - const accessibleBlocks: GetBlockUpstreamReferencesResultType['results'][0]['accessibleBlocks'] = - [] - - for (const accessibleBlockId of accessibleIds) { - const block = blocks[accessibleBlockId] - if (!block?.type) continue - - const canSelfReference = block.type === 'approval' || block.type === 'human_in_the_loop' - if (accessibleBlockId === blockId && !canSelfReference) continue - - const blockName = block.name || block.type - let accessContext: 'inside' | 'outside' | undefined - let outputPaths: string[] - - if (block.type === 'loop' || block.type === 'parallel') { - const isInside = - (block.type === 'loop' && containingLoopIds.has(accessibleBlockId)) || - (block.type === 'parallel' && containingParallelIds.has(accessibleBlockId)) - - accessContext = isInside ? 'inside' : 'outside' - outputPaths = isInside - ? getSubflowInsidePaths(block.type, accessibleBlockId, loops, parallels) - : ['results'] - } else { - outputPaths = computeBlockOutputPaths(block, ctx) - } - - const formattedOutputs = formatOutputsWithPrefix(outputPaths, blockName) - - const entry: GetBlockUpstreamReferencesResultType['results'][0]['accessibleBlocks'][0] = { - blockId: accessibleBlockId, - blockName, - blockType: block.type, - outputs: formattedOutputs, - } - - // Include triggerMode if the block is in trigger mode - if (block.triggerMode) { - entry.triggerMode = true - } - - if (accessContext) entry.accessContext = accessContext - accessibleBlocks.push(entry) - } - - const resultEntry: GetBlockUpstreamReferencesResultType['results'][0] = { - blockId, - blockName: targetBlock.name || targetBlock.type, - accessibleBlocks, - variables: variableOutputs, - } - - if (insideSubflows.length > 0) resultEntry.insideSubflows = insideSubflows - results.push(resultEntry) - } - - const result = GetBlockUpstreamReferencesResult.parse({ results }) - - logger.info('Retrieved upstream references', { - blockIds: args.blockIds, - resultCount: results.length, - }) - - await this.markToolComplete(200, 'Retrieved upstream references', result) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - logger.error('Error in tool execution', { toolCallId: this.toolCallId, error, message }) - await this.markToolComplete(500, message || 'Failed to get upstream references') - this.setState(ClientToolCallState.error) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/get-user-workflow.ts b/apps/sim/lib/copilot/tools/client/workflow/get-user-workflow.ts deleted file mode 100644 index c67f92a9e..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/get-user-workflow.ts +++ /dev/null @@ -1,187 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Workflow as WorkflowIcon, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { stripWorkflowDiffMarkers } from '@/lib/workflows/diff' -import { sanitizeForCopilot } from '@/lib/workflows/sanitization/json-sanitizer' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' -import { mergeSubblockState } from '@/stores/workflows/utils' -import { useWorkflowStore } from '@/stores/workflows/workflow/store' - -interface GetUserWorkflowArgs { - workflowId?: string - includeMetadata?: boolean -} - -const logger = createLogger('GetUserWorkflowClientTool') - -export class GetUserWorkflowClientTool extends BaseClientTool { - static readonly id = 'get_user_workflow' - - constructor(toolCallId: string) { - super(toolCallId, GetUserWorkflowClientTool.id, GetUserWorkflowClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Reading your workflow', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Reading your workflow', icon: WorkflowIcon }, - [ClientToolCallState.executing]: { text: 'Reading your workflow', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted reading your workflow', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Read your workflow', icon: WorkflowIcon }, - [ClientToolCallState.error]: { text: 'Failed to read your workflow', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped reading your workflow', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId - if (workflowId) { - const workflowName = useWorkflowRegistry.getState().workflows[workflowId]?.name - if (workflowName) { - switch (state) { - case ClientToolCallState.success: - return `Read ${workflowName}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Reading ${workflowName}` - case ClientToolCallState.error: - return `Failed to read ${workflowName}` - case ClientToolCallState.aborted: - return `Aborted reading ${workflowName}` - case ClientToolCallState.rejected: - return `Skipped reading ${workflowName}` - } - } - } - return undefined - }, - } - - async execute(args?: GetUserWorkflowArgs): Promise { - try { - this.setState(ClientToolCallState.executing) - - // Determine workflow ID (explicit or active) - let workflowId = args?.workflowId - if (!workflowId) { - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (!activeWorkflowId) { - await this.markToolComplete(400, 'No active workflow found') - this.setState(ClientToolCallState.error) - return - } - workflowId = activeWorkflowId as any - } - - logger.info('Fetching user workflow from stores', { - workflowId, - includeMetadata: args?.includeMetadata, - }) - - // Always use main workflow store as the source of truth - const workflowStore = useWorkflowStore.getState() - const fullWorkflowState = workflowStore.getWorkflowState() - - let workflowState: any = null - - if (!fullWorkflowState || !fullWorkflowState.blocks) { - const workflowRegistry = useWorkflowRegistry.getState() - const wfKey = String(workflowId) - const workflow = (workflowRegistry as any).workflows?.[wfKey] - - if (!workflow) { - await this.markToolComplete(404, `Workflow ${workflowId} not found in any store`) - this.setState(ClientToolCallState.error) - return - } - - logger.warn('No workflow state found, using workflow metadata only', { workflowId }) - workflowState = workflow - } else { - workflowState = stripWorkflowDiffMarkers(fullWorkflowState) - logger.info('Using workflow state from workflow store', { - workflowId, - blockCount: Object.keys(fullWorkflowState.blocks || {}).length, - }) - } - - // Normalize required properties - if (workflowState) { - if (!workflowState.loops) workflowState.loops = {} - if (!workflowState.parallels) workflowState.parallels = {} - if (!workflowState.edges) workflowState.edges = [] - if (!workflowState.blocks) workflowState.blocks = {} - } - - // Merge latest subblock values so edits are reflected - try { - if (workflowState?.blocks) { - workflowState = { - ...workflowState, - blocks: mergeSubblockState(workflowState.blocks, workflowId as any), - } - logger.info('Merged subblock values into workflow state', { - workflowId, - blockCount: Object.keys(workflowState.blocks || {}).length, - }) - } - } catch (mergeError) { - logger.warn('Failed to merge subblock values; proceeding with raw workflow state', { - workflowId, - error: mergeError instanceof Error ? mergeError.message : String(mergeError), - }) - } - - logger.info('Validating workflow state', { - workflowId, - hasWorkflowState: !!workflowState, - hasBlocks: !!workflowState?.blocks, - workflowStateType: typeof workflowState, - }) - - if (!workflowState || !workflowState.blocks) { - await this.markToolComplete(422, 'Workflow state is empty or invalid') - this.setState(ClientToolCallState.error) - return - } - - // Sanitize workflow state for copilot (remove UI-specific data) - const sanitizedState = sanitizeForCopilot(workflowState) - - // Convert to JSON string for transport - let workflowJson = '' - try { - workflowJson = JSON.stringify(sanitizedState, null, 2) - logger.info('Successfully stringified sanitized workflow state', { - workflowId, - jsonLength: workflowJson.length, - }) - } catch (stringifyError) { - await this.markToolComplete( - 500, - `Failed to convert workflow to JSON: ${ - stringifyError instanceof Error ? stringifyError.message : 'Unknown error' - }` - ) - this.setState(ClientToolCallState.error) - return - } - - // Mark complete with data; keep state success for store render - await this.markToolComplete(200, 'Workflow analyzed', { userWorkflow: workflowJson }) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - logger.error('Error in tool execution', { - toolCallId: this.toolCallId, - error, - message, - }) - await this.markToolComplete(500, message || 'Failed to fetch workflow') - this.setState(ClientToolCallState.error) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/get-workflow-console.ts b/apps/sim/lib/copilot/tools/client/workflow/get-workflow-console.ts deleted file mode 100644 index 328ae5aad..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/get-workflow-console.ts +++ /dev/null @@ -1,112 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, MinusCircle, TerminalSquare, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface GetWorkflowConsoleArgs { - workflowId?: string - limit?: number - includeDetails?: boolean -} - -export class GetWorkflowConsoleClientTool extends BaseClientTool { - static readonly id = 'get_workflow_console' - - constructor(toolCallId: string) { - super(toolCallId, GetWorkflowConsoleClientTool.id, GetWorkflowConsoleClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Fetching execution logs', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Fetching execution logs', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Fetched execution logs', icon: TerminalSquare }, - [ClientToolCallState.error]: { text: 'Failed to fetch execution logs', icon: XCircle }, - [ClientToolCallState.rejected]: { - text: 'Skipped fetching execution logs', - icon: MinusCircle, - }, - [ClientToolCallState.aborted]: { - text: 'Aborted fetching execution logs', - icon: MinusCircle, - }, - [ClientToolCallState.pending]: { text: 'Fetching execution logs', icon: Loader2 }, - }, - getDynamicText: (params, state) => { - const limit = params?.limit - if (limit && typeof limit === 'number') { - const logText = limit === 1 ? 'execution log' : 'execution logs' - - switch (state) { - case ClientToolCallState.success: - return `Fetched last ${limit} ${logText}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Fetching last ${limit} ${logText}` - case ClientToolCallState.error: - return `Failed to fetch last ${limit} ${logText}` - case ClientToolCallState.rejected: - return `Skipped fetching last ${limit} ${logText}` - case ClientToolCallState.aborted: - return `Aborted fetching last ${limit} ${logText}` - } - } - return undefined - }, - } - - async execute(args?: GetWorkflowConsoleArgs): Promise { - const logger = createLogger('GetWorkflowConsoleClientTool') - try { - this.setState(ClientToolCallState.executing) - - const params = args || {} - let workflowId = params.workflowId - if (!workflowId) { - const { activeWorkflowId } = useWorkflowRegistry.getState() - workflowId = activeWorkflowId || undefined - } - if (!workflowId) { - logger.error('No active workflow found for console fetch') - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, 'No active workflow found') - return - } - - const payload = { - workflowId, - limit: params.limit ?? 3, - includeDetails: params.includeDetails ?? true, - } - - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'get_workflow_console', payload }), - }) - if (!res.ok) { - const text = await res.text().catch(() => '') - throw new Error(text || `Server error (${res.status})`) - } - - const json = await res.json() - const parsed = ExecuteResponseSuccessSchema.parse(json) - - // Mark success and include result data for UI rendering - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Workflow console fetched', parsed.result) - this.setState(ClientToolCallState.success) - } catch (e: any) { - const message = e instanceof Error ? e.message : String(e) - createLogger('GetWorkflowConsoleClientTool').error('execute failed', { message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, message) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/get-workflow-data.ts b/apps/sim/lib/copilot/tools/client/workflow/get-workflow-data.ts deleted file mode 100644 index 657daa0a0..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/get-workflow-data.ts +++ /dev/null @@ -1,269 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Database, Loader2, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -const logger = createLogger('GetWorkflowDataClientTool') - -/** Data type enum for the get_workflow_data tool */ -export type WorkflowDataType = 'global_variables' | 'custom_tools' | 'mcp_tools' | 'files' - -interface GetWorkflowDataArgs { - data_type: WorkflowDataType -} - -export class GetWorkflowDataClientTool extends BaseClientTool { - static readonly id = 'get_workflow_data' - - constructor(toolCallId: string) { - super(toolCallId, GetWorkflowDataClientTool.id, GetWorkflowDataClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Fetching workflow data', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Fetching workflow data', icon: Database }, - [ClientToolCallState.executing]: { text: 'Fetching workflow data', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted fetching data', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Retrieved workflow data', icon: Database }, - [ClientToolCallState.error]: { text: 'Failed to fetch data', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped fetching data', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const dataType = params?.data_type as WorkflowDataType | undefined - if (!dataType) return undefined - - const typeLabels: Record = { - global_variables: 'variables', - custom_tools: 'custom tools', - mcp_tools: 'MCP tools', - files: 'files', - } - - const label = typeLabels[dataType] || dataType - - switch (state) { - case ClientToolCallState.success: - return `Retrieved ${label}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - return `Fetching ${label}` - case ClientToolCallState.pending: - return `Fetch ${label}?` - case ClientToolCallState.error: - return `Failed to fetch ${label}` - case ClientToolCallState.aborted: - return `Aborted fetching ${label}` - case ClientToolCallState.rejected: - return `Skipped fetching ${label}` - } - return undefined - }, - } - - async execute(args?: GetWorkflowDataArgs): Promise { - try { - this.setState(ClientToolCallState.executing) - - const dataType = args?.data_type - if (!dataType) { - await this.markToolComplete(400, 'Missing data_type parameter') - this.setState(ClientToolCallState.error) - return - } - - const { activeWorkflowId, hydration } = useWorkflowRegistry.getState() - const activeWorkspaceId = hydration.workspaceId - - switch (dataType) { - case 'global_variables': - await this.fetchGlobalVariables(activeWorkflowId) - break - case 'custom_tools': - await this.fetchCustomTools(activeWorkspaceId) - break - case 'mcp_tools': - await this.fetchMcpTools(activeWorkspaceId) - break - case 'files': - await this.fetchFiles(activeWorkspaceId) - break - default: - await this.markToolComplete(400, `Unknown data_type: ${dataType}`) - this.setState(ClientToolCallState.error) - return - } - } catch (error: unknown) { - const message = error instanceof Error ? error.message : String(error) - await this.markToolComplete(500, message || 'Failed to fetch workflow data') - this.setState(ClientToolCallState.error) - } - } - - /** - * Fetch global workflow variables - */ - private async fetchGlobalVariables(workflowId: string | null): Promise { - if (!workflowId) { - await this.markToolComplete(400, 'No active workflow found') - this.setState(ClientToolCallState.error) - return - } - - const res = await fetch(`/api/workflows/${workflowId}/variables`, { method: 'GET' }) - if (!res.ok) { - const text = await res.text().catch(() => '') - await this.markToolComplete(res.status, text || 'Failed to fetch workflow variables') - this.setState(ClientToolCallState.error) - return - } - - const json = await res.json() - const varsRecord = (json?.data as Record) || {} - const variables = Object.values(varsRecord).map((v: unknown) => { - const variable = v as { id?: string; name?: string; value?: unknown } - return { - id: String(variable?.id || ''), - name: String(variable?.name || ''), - value: variable?.value, - } - }) - - logger.info('Fetched workflow variables', { count: variables.length }) - await this.markToolComplete(200, `Found ${variables.length} variable(s)`, { variables }) - this.setState(ClientToolCallState.success) - } - - /** - * Fetch custom tools for the workspace - */ - private async fetchCustomTools(workspaceId: string | null): Promise { - if (!workspaceId) { - await this.markToolComplete(400, 'No active workspace found') - this.setState(ClientToolCallState.error) - return - } - - const res = await fetch(`/api/tools/custom?workspaceId=${workspaceId}`, { method: 'GET' }) - if (!res.ok) { - const text = await res.text().catch(() => '') - await this.markToolComplete(res.status, text || 'Failed to fetch custom tools') - this.setState(ClientToolCallState.error) - return - } - - const json = await res.json() - const toolsData = (json?.data as unknown[]) || [] - const customTools = toolsData.map((tool: unknown) => { - const t = tool as { - id?: string - title?: string - schema?: { function?: { name?: string; description?: string; parameters?: unknown } } - code?: string - } - return { - id: String(t?.id || ''), - title: String(t?.title || ''), - functionName: String(t?.schema?.function?.name || ''), - description: String(t?.schema?.function?.description || ''), - parameters: t?.schema?.function?.parameters, - } - }) - - logger.info('Fetched custom tools', { count: customTools.length }) - await this.markToolComplete(200, `Found ${customTools.length} custom tool(s)`, { customTools }) - this.setState(ClientToolCallState.success) - } - - /** - * Fetch MCP tools for the workspace - */ - private async fetchMcpTools(workspaceId: string | null): Promise { - if (!workspaceId) { - await this.markToolComplete(400, 'No active workspace found') - this.setState(ClientToolCallState.error) - return - } - - const res = await fetch(`/api/mcp/tools/discover?workspaceId=${workspaceId}`, { method: 'GET' }) - if (!res.ok) { - const text = await res.text().catch(() => '') - await this.markToolComplete(res.status, text || 'Failed to fetch MCP tools') - this.setState(ClientToolCallState.error) - return - } - - const json = await res.json() - const toolsData = (json?.data?.tools as unknown[]) || [] - const mcpTools = toolsData.map((tool: unknown) => { - const t = tool as { - name?: string - serverId?: string - serverName?: string - description?: string - inputSchema?: unknown - } - return { - name: String(t?.name || ''), - serverId: String(t?.serverId || ''), - serverName: String(t?.serverName || ''), - description: String(t?.description || ''), - inputSchema: t?.inputSchema, - } - }) - - logger.info('Fetched MCP tools', { count: mcpTools.length }) - await this.markToolComplete(200, `Found ${mcpTools.length} MCP tool(s)`, { mcpTools }) - this.setState(ClientToolCallState.success) - } - - /** - * Fetch workspace files metadata - */ - private async fetchFiles(workspaceId: string | null): Promise { - if (!workspaceId) { - await this.markToolComplete(400, 'No active workspace found') - this.setState(ClientToolCallState.error) - return - } - - const res = await fetch(`/api/workspaces/${workspaceId}/files`, { method: 'GET' }) - if (!res.ok) { - const text = await res.text().catch(() => '') - await this.markToolComplete(res.status, text || 'Failed to fetch files') - this.setState(ClientToolCallState.error) - return - } - - const json = await res.json() - const filesData = (json?.files as unknown[]) || [] - const files = filesData.map((file: unknown) => { - const f = file as { - id?: string - name?: string - key?: string - path?: string - size?: number - type?: string - uploadedAt?: string - } - return { - id: String(f?.id || ''), - name: String(f?.name || ''), - key: String(f?.key || ''), - path: String(f?.path || ''), - size: Number(f?.size || 0), - type: String(f?.type || ''), - uploadedAt: String(f?.uploadedAt || ''), - } - }) - - logger.info('Fetched workspace files', { count: files.length }) - await this.markToolComplete(200, `Found ${files.length} file(s)`, { files }) - this.setState(ClientToolCallState.success) - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/get-workflow-from-name.ts b/apps/sim/lib/copilot/tools/client/workflow/get-workflow-from-name.ts deleted file mode 100644 index 18aeb335f..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/get-workflow-from-name.ts +++ /dev/null @@ -1,119 +0,0 @@ -import { createLogger } from '@sim/logger' -import { FileText, Loader2, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { sanitizeForCopilot } from '@/lib/workflows/sanitization/json-sanitizer' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -const logger = createLogger('GetWorkflowFromNameClientTool') - -interface GetWorkflowFromNameArgs { - workflow_name: string -} - -export class GetWorkflowFromNameClientTool extends BaseClientTool { - static readonly id = 'get_workflow_from_name' - - constructor(toolCallId: string) { - super(toolCallId, GetWorkflowFromNameClientTool.id, GetWorkflowFromNameClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Reading workflow', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Reading workflow', icon: FileText }, - [ClientToolCallState.executing]: { text: 'Reading workflow', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted reading workflow', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Read workflow', icon: FileText }, - [ClientToolCallState.error]: { text: 'Failed to read workflow', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped reading workflow', icon: XCircle }, - }, - getDynamicText: (params, state) => { - if (params?.workflow_name && typeof params.workflow_name === 'string') { - const workflowName = params.workflow_name - - switch (state) { - case ClientToolCallState.success: - return `Read ${workflowName}` - case ClientToolCallState.executing: - case ClientToolCallState.generating: - case ClientToolCallState.pending: - return `Reading ${workflowName}` - case ClientToolCallState.error: - return `Failed to read ${workflowName}` - case ClientToolCallState.aborted: - return `Aborted reading ${workflowName}` - case ClientToolCallState.rejected: - return `Skipped reading ${workflowName}` - } - } - return undefined - }, - } - - async execute(args?: GetWorkflowFromNameArgs): Promise { - try { - this.setState(ClientToolCallState.executing) - - const workflowName = args?.workflow_name?.trim() - if (!workflowName) { - await this.markToolComplete(400, 'workflow_name is required') - this.setState(ClientToolCallState.error) - return - } - - // Try to find by name from registry first to get ID - const registry = useWorkflowRegistry.getState() - const match = Object.values((registry as any).workflows || {}).find( - (w: any) => - String(w?.name || '') - .trim() - .toLowerCase() === workflowName.toLowerCase() - ) as any - - if (!match?.id) { - await this.markToolComplete(404, `Workflow not found: ${workflowName}`) - this.setState(ClientToolCallState.error) - return - } - - // Fetch full workflow from API route (normalized tables) - const res = await fetch(`/api/workflows/${encodeURIComponent(match.id)}`, { method: 'GET' }) - if (!res.ok) { - const text = await res.text().catch(() => '') - await this.markToolComplete(res.status, text || 'Failed to fetch workflow by name') - this.setState(ClientToolCallState.error) - return - } - - const json = await res.json() - const wf = json?.data - if (!wf?.state?.blocks) { - await this.markToolComplete(422, 'Workflow state is empty or invalid') - this.setState(ClientToolCallState.error) - return - } - - // Convert state to the same string format as get_user_workflow - const workflowState = { - blocks: wf.state.blocks || {}, - edges: wf.state.edges || [], - loops: wf.state.loops || {}, - parallels: wf.state.parallels || {}, - } - // Sanitize workflow state for copilot (remove UI-specific data) - const sanitizedState = sanitizeForCopilot(workflowState) - const userWorkflow = JSON.stringify(sanitizedState, null, 2) - - await this.markToolComplete(200, `Retrieved workflow ${workflowName}`, { userWorkflow }) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - await this.markToolComplete(500, message || 'Failed to retrieve workflow by name') - this.setState(ClientToolCallState.error) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/list-user-workflows.ts b/apps/sim/lib/copilot/tools/client/workflow/list-user-workflows.ts deleted file mode 100644 index 551982029..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/list-user-workflows.ts +++ /dev/null @@ -1,60 +0,0 @@ -import { createLogger } from '@sim/logger' -import { ListChecks, Loader2, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' - -const logger = createLogger('ListUserWorkflowsClientTool') - -export class ListUserWorkflowsClientTool extends BaseClientTool { - static readonly id = 'list_user_workflows' - - constructor(toolCallId: string) { - super(toolCallId, ListUserWorkflowsClientTool.id, ListUserWorkflowsClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Listing your workflows', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Listing your workflows', icon: ListChecks }, - [ClientToolCallState.executing]: { text: 'Listing your workflows', icon: Loader2 }, - [ClientToolCallState.aborted]: { text: 'Aborted listing workflows', icon: XCircle }, - [ClientToolCallState.success]: { text: 'Listed your workflows', icon: ListChecks }, - [ClientToolCallState.error]: { text: 'Failed to list workflows', icon: X }, - [ClientToolCallState.rejected]: { text: 'Skipped listing workflows', icon: XCircle }, - }, - } - - async execute(): Promise { - try { - this.setState(ClientToolCallState.executing) - - const res = await fetch('/api/workflows', { method: 'GET' }) - if (!res.ok) { - const text = await res.text().catch(() => '') - await this.markToolComplete(res.status, text || 'Failed to fetch workflows') - this.setState(ClientToolCallState.error) - return - } - - const json = await res.json() - const workflows = Array.isArray(json?.data) ? json.data : [] - const names = workflows - .map((w: any) => (typeof w?.name === 'string' ? w.name : null)) - .filter((n: string | null) => !!n) - - logger.info('Found workflows', { count: names.length }) - - await this.markToolComplete(200, `Found ${names.length} workflow(s)`, { - workflow_names: names, - }) - this.setState(ClientToolCallState.success) - } catch (error: any) { - const message = error instanceof Error ? error.message : String(error) - await this.markToolComplete(500, message || 'Failed to list workflows') - this.setState(ClientToolCallState.error) - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/list-workspace-mcp-servers.ts b/apps/sim/lib/copilot/tools/client/workflow/list-workspace-mcp-servers.ts deleted file mode 100644 index 1dad9fbf7..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/list-workspace-mcp-servers.ts +++ /dev/null @@ -1,112 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Server, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface ListWorkspaceMcpServersArgs { - workspaceId?: string -} - -export interface WorkspaceMcpServer { - id: string - name: string - description: string | null - toolCount: number - toolNames: string[] -} - -/** - * List workspace MCP servers tool. - * Returns a list of MCP servers available in the workspace that workflows can be deployed to. - */ -export class ListWorkspaceMcpServersClientTool extends BaseClientTool { - static readonly id = 'list_workspace_mcp_servers' - - constructor(toolCallId: string) { - super( - toolCallId, - ListWorkspaceMcpServersClientTool.id, - ListWorkspaceMcpServersClientTool.metadata - ) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Getting MCP servers', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Getting MCP servers', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Getting MCP servers', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Retrieved MCP servers', icon: Server }, - [ClientToolCallState.error]: { text: 'Failed to get MCP servers', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted getting MCP servers', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped getting MCP servers', icon: XCircle }, - }, - interrupt: undefined, - } - - async execute(args?: ListWorkspaceMcpServersArgs): Promise { - const logger = createLogger('ListWorkspaceMcpServersClientTool') - try { - this.setState(ClientToolCallState.executing) - - // Get workspace ID from active workflow if not provided - const { activeWorkflowId, workflows } = useWorkflowRegistry.getState() - let workspaceId = args?.workspaceId - - if (!workspaceId && activeWorkflowId) { - workspaceId = workflows[activeWorkflowId]?.workspaceId - } - - if (!workspaceId) { - throw new Error('No workspace ID available') - } - - const res = await fetch(`/api/mcp/workflow-servers?workspaceId=${workspaceId}`) - - if (!res.ok) { - const data = await res.json().catch(() => ({})) - throw new Error(data.error || `Failed to fetch MCP servers (${res.status})`) - } - - const data = await res.json() - const servers: WorkspaceMcpServer[] = (data.data?.servers || []).map((s: any) => ({ - id: s.id, - name: s.name, - description: s.description, - toolCount: s.toolCount || 0, - toolNames: s.toolNames || [], - })) - - this.setState(ClientToolCallState.success) - - if (servers.length === 0) { - await this.markToolComplete( - 200, - 'No MCP servers found in this workspace. Use create_workspace_mcp_server to create one.', - { servers: [], count: 0 } - ) - } else { - await this.markToolComplete( - 200, - `Found ${servers.length} MCP server(s) in the workspace.`, - { - servers, - count: servers.length, - } - ) - } - - logger.info(`Listed ${servers.length} MCP servers`) - } catch (e: any) { - logger.error('Failed to list MCP servers', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to list MCP servers') - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/manage-custom-tool.ts b/apps/sim/lib/copilot/tools/client/workflow/manage-custom-tool.ts deleted file mode 100644 index 58a823637..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/manage-custom-tool.ts +++ /dev/null @@ -1,408 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Check, Loader2, Plus, X, XCircle } from 'lucide-react' -import { client } from '@/lib/auth/auth-client' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { getCustomTool } from '@/hooks/queries/custom-tools' -import { useCopilotStore } from '@/stores/panel/copilot/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface CustomToolSchema { - type: 'function' - function: { - name: string - description?: string - parameters: { - type: string - properties: Record - required?: string[] - } - } -} - -interface ManageCustomToolArgs { - operation: 'add' | 'edit' | 'delete' | 'list' - toolId?: string - schema?: CustomToolSchema - code?: string -} - -const API_ENDPOINT = '/api/tools/custom' - -async function checkCustomToolsPermission(): Promise { - const activeOrgResponse = await client.organization.getFullOrganization() - const organizationId = activeOrgResponse.data?.id - if (!organizationId) return - - const response = await fetch(`/api/permission-groups/user?organizationId=${organizationId}`) - if (!response.ok) return - - const data = await response.json() - if (data?.config?.disableCustomTools) { - throw new Error('Custom tools are not allowed based on your permission group settings') - } -} - -/** - * Client tool for creating, editing, and deleting custom tools via the copilot. - */ -export class ManageCustomToolClientTool extends BaseClientTool { - static readonly id = 'manage_custom_tool' - private currentArgs?: ManageCustomToolArgs - - constructor(toolCallId: string) { - super(toolCallId, ManageCustomToolClientTool.id, ManageCustomToolClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Managing custom tool', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Manage custom tool?', icon: Plus }, - [ClientToolCallState.executing]: { text: 'Managing custom tool', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Managed custom tool', icon: Check }, - [ClientToolCallState.error]: { text: 'Failed to manage custom tool', icon: X }, - [ClientToolCallState.aborted]: { - text: 'Aborted managing custom tool', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped managing custom tool', - icon: XCircle, - }, - }, - interrupt: { - accept: { text: 'Allow', icon: Check }, - reject: { text: 'Skip', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const operation = params?.operation as 'add' | 'edit' | 'delete' | 'list' | undefined - - if (!operation) return undefined - - let toolName = params?.schema?.function?.name - if (!toolName && params?.toolId) { - try { - const tool = getCustomTool(params.toolId) - toolName = tool?.schema?.function?.name - } catch { - // Ignore errors accessing cache - } - } - - const getActionText = (verb: 'present' | 'past' | 'gerund') => { - switch (operation) { - case 'add': - return verb === 'present' ? 'Create' : verb === 'past' ? 'Created' : 'Creating' - case 'edit': - return verb === 'present' ? 'Edit' : verb === 'past' ? 'Edited' : 'Editing' - case 'delete': - return verb === 'present' ? 'Delete' : verb === 'past' ? 'Deleted' : 'Deleting' - case 'list': - return verb === 'present' ? 'List' : verb === 'past' ? 'Listed' : 'Listing' - default: - return verb === 'present' ? 'Manage' : verb === 'past' ? 'Managed' : 'Managing' - } - } - - // For add: only show tool name in past tense (success) - // For edit/delete: always show tool name - // For list: never show individual tool name, use plural - const shouldShowToolName = (currentState: ClientToolCallState) => { - if (operation === 'list') return false - if (operation === 'add') { - return currentState === ClientToolCallState.success - } - return true // edit and delete always show tool name - } - - const nameText = - operation === 'list' - ? ' custom tools' - : shouldShowToolName(state) && toolName - ? ` ${toolName}` - : ' custom tool' - - switch (state) { - case ClientToolCallState.success: - return `${getActionText('past')}${nameText}` - case ClientToolCallState.executing: - return `${getActionText('gerund')}${nameText}` - case ClientToolCallState.generating: - return `${getActionText('gerund')}${nameText}` - case ClientToolCallState.pending: - return `${getActionText('present')}${nameText}?` - case ClientToolCallState.error: - return `Failed to ${getActionText('present')?.toLowerCase()}${nameText}` - case ClientToolCallState.aborted: - return `Aborted ${getActionText('gerund')?.toLowerCase()}${nameText}` - case ClientToolCallState.rejected: - return `Skipped ${getActionText('gerund')?.toLowerCase()}${nameText}` - } - return undefined - }, - } - - /** - * Gets the tool call args from the copilot store (needed before execute() is called) - */ - private getArgsFromStore(): ManageCustomToolArgs | undefined { - try { - const { toolCallsById } = useCopilotStore.getState() - const toolCall = toolCallsById[this.toolCallId] - return (toolCall as any)?.params as ManageCustomToolArgs | undefined - } catch { - return undefined - } - } - - /** - * Override getInterruptDisplays to only show confirmation for edit and delete operations. - * Add operations execute directly without confirmation. - */ - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - const args = this.currentArgs || this.getArgsFromStore() - const operation = args?.operation - if (operation === 'edit' || operation === 'delete') { - return this.metadata.interrupt - } - return undefined - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: ManageCustomToolArgs): Promise { - const logger = createLogger('ManageCustomToolClientTool') - try { - this.setState(ClientToolCallState.executing) - await this.executeOperation(args, logger) - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to manage custom tool', { - success: false, - error: e?.message || 'Failed to manage custom tool', - }) - } - } - - async execute(args?: ManageCustomToolArgs): Promise { - this.currentArgs = args - if (args?.operation === 'add' || args?.operation === 'list') { - await this.handleAccept(args) - } - } - - /** - * Executes the custom tool operation (add, edit, delete, or list) - */ - private async executeOperation( - args: ManageCustomToolArgs | undefined, - logger: ReturnType - ): Promise { - if (!args?.operation) { - throw new Error('Operation is required') - } - - await checkCustomToolsPermission() - - const { operation, toolId, schema, code } = args - - const { hydration } = useWorkflowRegistry.getState() - const workspaceId = hydration.workspaceId - if (!workspaceId) { - throw new Error('No active workspace found') - } - - logger.info(`Executing custom tool operation: ${operation}`, { - operation, - toolId, - functionName: schema?.function?.name, - workspaceId, - }) - - switch (operation) { - case 'add': - await this.addCustomTool({ schema, code, workspaceId }, logger) - break - case 'edit': - await this.editCustomTool({ toolId, schema, code, workspaceId }, logger) - break - case 'delete': - await this.deleteCustomTool({ toolId, workspaceId }, logger) - break - case 'list': - await this.markToolComplete(200, 'Listed custom tools') - break - default: - throw new Error(`Unknown operation: ${operation}`) - } - } - - /** - * Creates a new custom tool - */ - private async addCustomTool( - params: { - schema?: CustomToolSchema - code?: string - workspaceId: string - }, - logger: ReturnType - ): Promise { - const { schema, code, workspaceId } = params - - if (!schema) { - throw new Error('Schema is required for adding a custom tool') - } - if (!code) { - throw new Error('Code is required for adding a custom tool') - } - - const functionName = schema.function.name - - const response = await fetch(API_ENDPOINT, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - tools: [{ title: functionName, schema, code }], - workspaceId, - }), - }) - - const data = await response.json() - - if (!response.ok) { - throw new Error(data.error || 'Failed to create custom tool') - } - - if (!data.data || !Array.isArray(data.data) || data.data.length === 0) { - throw new Error('Invalid API response: missing tool data') - } - - const createdTool = data.data[0] - logger.info(`Created custom tool: ${functionName}`, { toolId: createdTool.id }) - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, `Created custom tool "${functionName}"`, { - success: true, - operation: 'add', - toolId: createdTool.id, - functionName, - }) - } - - /** - * Updates an existing custom tool - */ - private async editCustomTool( - params: { - toolId?: string - schema?: CustomToolSchema - code?: string - workspaceId: string - }, - logger: ReturnType - ): Promise { - const { toolId, schema, code, workspaceId } = params - - if (!toolId) { - throw new Error('Tool ID is required for editing a custom tool') - } - - if (!schema && !code) { - throw new Error('At least one of schema or code must be provided for editing') - } - - const existingResponse = await fetch(`${API_ENDPOINT}?workspaceId=${workspaceId}`) - const existingData = await existingResponse.json() - - if (!existingResponse.ok) { - throw new Error(existingData.error || 'Failed to fetch existing tools') - } - - const existingTool = existingData.data?.find((t: any) => t.id === toolId) - if (!existingTool) { - throw new Error(`Tool with ID ${toolId} not found`) - } - - const mergedSchema = schema ?? existingTool.schema - const updatedTool = { - id: toolId, - title: mergedSchema.function.name, - schema: mergedSchema, - code: code ?? existingTool.code, - } - - const response = await fetch(API_ENDPOINT, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - tools: [updatedTool], - workspaceId, - }), - }) - - const data = await response.json() - - if (!response.ok) { - throw new Error(data.error || 'Failed to update custom tool') - } - - const functionName = updatedTool.schema.function.name - logger.info(`Updated custom tool: ${functionName}`, { toolId }) - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, `Updated custom tool "${functionName}"`, { - success: true, - operation: 'edit', - toolId, - functionName, - }) - } - - /** - * Deletes a custom tool - */ - private async deleteCustomTool( - params: { - toolId?: string - workspaceId: string - }, - logger: ReturnType - ): Promise { - const { toolId, workspaceId } = params - - if (!toolId) { - throw new Error('Tool ID is required for deleting a custom tool') - } - - const url = `${API_ENDPOINT}?id=${toolId}&workspaceId=${workspaceId}` - const response = await fetch(url, { - method: 'DELETE', - }) - - const data = await response.json() - - if (!response.ok) { - throw new Error(data.error || 'Failed to delete custom tool') - } - - logger.info(`Deleted custom tool: ${toolId}`) - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, `Deleted custom tool`, { - success: true, - operation: 'delete', - toolId, - }) - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/manage-mcp-tool.ts b/apps/sim/lib/copilot/tools/client/workflow/manage-mcp-tool.ts deleted file mode 100644 index 796574dc1..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/manage-mcp-tool.ts +++ /dev/null @@ -1,360 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Check, Loader2, Server, X, XCircle } from 'lucide-react' -import { client } from '@/lib/auth/auth-client' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { useCopilotStore } from '@/stores/panel/copilot/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface McpServerConfig { - name: string - transport: 'streamable-http' - url?: string - headers?: Record - timeout?: number - enabled?: boolean -} - -interface ManageMcpToolArgs { - operation: 'add' | 'edit' | 'delete' - serverId?: string - config?: McpServerConfig -} - -const API_ENDPOINT = '/api/mcp/servers' - -async function checkMcpToolsPermission(): Promise { - const activeOrgResponse = await client.organization.getFullOrganization() - const organizationId = activeOrgResponse.data?.id - if (!organizationId) return - - const response = await fetch(`/api/permission-groups/user?organizationId=${organizationId}`) - if (!response.ok) return - - const data = await response.json() - if (data?.config?.disableMcpTools) { - throw new Error('MCP tools are not allowed based on your permission group settings') - } -} - -/** - * Client tool for creating, editing, and deleting MCP tool servers via the copilot. - */ -export class ManageMcpToolClientTool extends BaseClientTool { - static readonly id = 'manage_mcp_tool' - private currentArgs?: ManageMcpToolArgs - - constructor(toolCallId: string) { - super(toolCallId, ManageMcpToolClientTool.id, ManageMcpToolClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Managing MCP tool', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Manage MCP tool?', icon: Server }, - [ClientToolCallState.executing]: { text: 'Managing MCP tool', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Managed MCP tool', icon: Check }, - [ClientToolCallState.error]: { text: 'Failed to manage MCP tool', icon: X }, - [ClientToolCallState.aborted]: { - text: 'Aborted managing MCP tool', - icon: XCircle, - }, - [ClientToolCallState.rejected]: { - text: 'Skipped managing MCP tool', - icon: XCircle, - }, - }, - interrupt: { - accept: { text: 'Allow', icon: Check }, - reject: { text: 'Skip', icon: XCircle }, - }, - getDynamicText: (params, state) => { - const operation = params?.operation as 'add' | 'edit' | 'delete' | undefined - - if (!operation) return undefined - - const serverName = params?.config?.name || params?.serverName - - const getActionText = (verb: 'present' | 'past' | 'gerund') => { - switch (operation) { - case 'add': - return verb === 'present' ? 'Add' : verb === 'past' ? 'Added' : 'Adding' - case 'edit': - return verb === 'present' ? 'Edit' : verb === 'past' ? 'Edited' : 'Editing' - case 'delete': - return verb === 'present' ? 'Delete' : verb === 'past' ? 'Deleted' : 'Deleting' - } - } - - const shouldShowServerName = (currentState: ClientToolCallState) => { - if (operation === 'add') { - return currentState === ClientToolCallState.success - } - return true - } - - const nameText = shouldShowServerName(state) && serverName ? ` ${serverName}` : ' MCP tool' - - switch (state) { - case ClientToolCallState.success: - return `${getActionText('past')}${nameText}` - case ClientToolCallState.executing: - return `${getActionText('gerund')}${nameText}` - case ClientToolCallState.generating: - return `${getActionText('gerund')}${nameText}` - case ClientToolCallState.pending: - return `${getActionText('present')}${nameText}?` - case ClientToolCallState.error: - return `Failed to ${getActionText('present')?.toLowerCase()}${nameText}` - case ClientToolCallState.aborted: - return `Aborted ${getActionText('gerund')?.toLowerCase()}${nameText}` - case ClientToolCallState.rejected: - return `Skipped ${getActionText('gerund')?.toLowerCase()}${nameText}` - } - return undefined - }, - } - - /** - * Gets the tool call args from the copilot store (needed before execute() is called) - */ - private getArgsFromStore(): ManageMcpToolArgs | undefined { - try { - const { toolCallsById } = useCopilotStore.getState() - const toolCall = toolCallsById[this.toolCallId] - return (toolCall as any)?.params as ManageMcpToolArgs | undefined - } catch { - return undefined - } - } - - /** - * Override getInterruptDisplays to only show confirmation for edit and delete operations. - * Add operations execute directly without confirmation. - */ - getInterruptDisplays(): BaseClientToolMetadata['interrupt'] | undefined { - const args = this.currentArgs || this.getArgsFromStore() - const operation = args?.operation - if (operation === 'edit' || operation === 'delete') { - return this.metadata.interrupt - } - return undefined - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: ManageMcpToolArgs): Promise { - const logger = createLogger('ManageMcpToolClientTool') - try { - this.setState(ClientToolCallState.executing) - await this.executeOperation(args, logger) - } catch (e: any) { - logger.error('execute failed', { message: e?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, e?.message || 'Failed to manage MCP tool', { - success: false, - error: e?.message || 'Failed to manage MCP tool', - }) - } - } - - async execute(args?: ManageMcpToolArgs): Promise { - this.currentArgs = args - if (args?.operation === 'add') { - await this.handleAccept(args) - } - } - - /** - * Executes the MCP tool operation (add, edit, or delete) - */ - private async executeOperation( - args: ManageMcpToolArgs | undefined, - logger: ReturnType - ): Promise { - if (!args?.operation) { - throw new Error('Operation is required') - } - - await checkMcpToolsPermission() - - const { operation, serverId, config } = args - - const { hydration } = useWorkflowRegistry.getState() - const workspaceId = hydration.workspaceId - if (!workspaceId) { - throw new Error('No active workspace found') - } - - logger.info(`Executing MCP tool operation: ${operation}`, { - operation, - serverId, - serverName: config?.name, - workspaceId, - }) - - switch (operation) { - case 'add': - await this.addMcpServer({ config, workspaceId }, logger) - break - case 'edit': - await this.editMcpServer({ serverId, config, workspaceId }, logger) - break - case 'delete': - await this.deleteMcpServer({ serverId, workspaceId }, logger) - break - default: - throw new Error(`Unknown operation: ${operation}`) - } - } - - /** - * Creates a new MCP server - */ - private async addMcpServer( - params: { - config?: McpServerConfig - workspaceId: string - }, - logger: ReturnType - ): Promise { - const { config, workspaceId } = params - - if (!config) { - throw new Error('Config is required for adding an MCP tool') - } - if (!config.name) { - throw new Error('Server name is required') - } - if (!config.url) { - throw new Error('Server URL is required for streamable-http transport') - } - - const serverData = { - ...config, - workspaceId, - transport: config.transport || 'streamable-http', - timeout: config.timeout || 30000, - enabled: config.enabled !== false, - } - - const response = await fetch(API_ENDPOINT, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify(serverData), - }) - - const data = await response.json() - - if (!response.ok) { - throw new Error(data.error || 'Failed to create MCP tool') - } - - const serverId = data.data?.serverId - logger.info(`Created MCP tool: ${config.name}`, { serverId }) - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, `Created MCP tool "${config.name}"`, { - success: true, - operation: 'add', - serverId, - serverName: config.name, - }) - } - - /** - * Updates an existing MCP server - */ - private async editMcpServer( - params: { - serverId?: string - config?: McpServerConfig - workspaceId: string - }, - logger: ReturnType - ): Promise { - const { serverId, config, workspaceId } = params - - if (!serverId) { - throw new Error('Server ID is required for editing an MCP tool') - } - - if (!config) { - throw new Error('Config is required for editing an MCP tool') - } - - const updateData = { - ...config, - workspaceId, - } - - const response = await fetch(`${API_ENDPOINT}/${serverId}?workspaceId=${workspaceId}`, { - method: 'PATCH', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify(updateData), - }) - - const data = await response.json() - - if (!response.ok) { - throw new Error(data.error || 'Failed to update MCP tool') - } - - const serverName = config.name || data.data?.server?.name || serverId - logger.info(`Updated MCP tool: ${serverName}`, { serverId }) - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, `Updated MCP tool "${serverName}"`, { - success: true, - operation: 'edit', - serverId, - serverName, - }) - } - - /** - * Deletes an MCP server - */ - private async deleteMcpServer( - params: { - serverId?: string - workspaceId: string - }, - logger: ReturnType - ): Promise { - const { serverId, workspaceId } = params - - if (!serverId) { - throw new Error('Server ID is required for deleting an MCP tool') - } - - const url = `${API_ENDPOINT}?serverId=${serverId}&workspaceId=${workspaceId}` - const response = await fetch(url, { - method: 'DELETE', - }) - - const data = await response.json() - - if (!response.ok) { - throw new Error(data.error || 'Failed to delete MCP tool') - } - - logger.info(`Deleted MCP tool: ${serverId}`) - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, `Deleted MCP tool`, { - success: true, - operation: 'delete', - serverId, - }) - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/redeploy.ts b/apps/sim/lib/copilot/tools/client/workflow/redeploy.ts deleted file mode 100644 index 2fef023fb..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/redeploy.ts +++ /dev/null @@ -1,71 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Rocket, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -export class RedeployClientTool extends BaseClientTool { - static readonly id = 'redeploy' - private hasExecuted = false - - constructor(toolCallId: string) { - super(toolCallId, RedeployClientTool.id, RedeployClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Redeploying workflow', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Redeploy workflow', icon: Loader2 }, - [ClientToolCallState.executing]: { text: 'Redeploying workflow', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Redeployed workflow', icon: Rocket }, - [ClientToolCallState.error]: { text: 'Failed to redeploy workflow', icon: XCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted redeploy', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped redeploy', icon: XCircle }, - }, - interrupt: undefined, - } - - async execute(): Promise { - const logger = createLogger('RedeployClientTool') - try { - if (this.hasExecuted) { - logger.info('execute skipped (already executed)', { toolCallId: this.toolCallId }) - return - } - this.hasExecuted = true - - this.setState(ClientToolCallState.executing) - - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (!activeWorkflowId) { - throw new Error('No workflow ID provided') - } - - const res = await fetch(`/api/workflows/${activeWorkflowId}/deploy`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ deployChatEnabled: false }), - }) - - const json = await res.json().catch(() => ({})) - if (!res.ok) { - const errorText = json?.error || `Server error (${res.status})` - throw new Error(errorText) - } - - this.setState(ClientToolCallState.success) - await this.markToolComplete(200, 'Workflow redeployed', { - workflowId: activeWorkflowId, - deployedAt: json?.deployedAt || null, - schedule: json?.schedule, - }) - } catch (error: any) { - logger.error('Redeploy failed', { message: error?.message }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, error?.message || 'Failed to redeploy workflow') - } - } -} diff --git a/apps/sim/lib/copilot/tools/client/workflow/run-workflow.ts b/apps/sim/lib/copilot/tools/client/workflow/run-workflow.ts deleted file mode 100644 index 3b2c89df6..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/run-workflow.ts +++ /dev/null @@ -1,231 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, MinusCircle, Play, XCircle } from 'lucide-react' -import { v4 as uuidv4 } from 'uuid' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, - WORKFLOW_EXECUTION_TIMEOUT_MS, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' -import { executeWorkflowWithFullLogging } from '@/app/workspace/[workspaceId]/w/[workflowId]/utils' -import { useExecutionStore } from '@/stores/execution' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface RunWorkflowArgs { - workflowId?: string - description?: string - workflow_input?: Record -} - -export class RunWorkflowClientTool extends BaseClientTool { - static readonly id = 'run_workflow' - - constructor(toolCallId: string) { - super(toolCallId, RunWorkflowClientTool.id, RunWorkflowClientTool.metadata) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { text: 'Preparing to run your workflow', icon: Loader2 }, - [ClientToolCallState.pending]: { text: 'Run this workflow?', icon: Play }, - [ClientToolCallState.executing]: { text: 'Running your workflow', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Executed workflow', icon: Play }, - [ClientToolCallState.error]: { text: 'Errored running workflow', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped workflow execution', icon: MinusCircle }, - [ClientToolCallState.aborted]: { text: 'Aborted workflow execution', icon: MinusCircle }, - [ClientToolCallState.background]: { text: 'Running in background', icon: Play }, - }, - interrupt: { - accept: { text: 'Run', icon: Play }, - reject: { text: 'Skip', icon: MinusCircle }, - }, - uiConfig: { - isSpecial: true, - interrupt: { - accept: { text: 'Run', icon: Play }, - reject: { text: 'Skip', icon: MinusCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - secondaryAction: { - text: 'Move to Background', - title: 'Move to Background', - variant: 'tertiary', - showInStates: [ClientToolCallState.executing], - completionMessage: - 'The user has chosen to move the workflow execution to the background. Check back with them later to know when the workflow execution is complete', - targetState: ClientToolCallState.background, - }, - paramsTable: { - columns: [ - { key: 'input', label: 'Input', width: '36%' }, - { key: 'value', label: 'Value', width: '64%', editable: true, mono: true }, - ], - extractRows: (params) => { - let inputs = params.input || params.inputs || params.workflow_input - if (typeof inputs === 'string') { - try { - inputs = JSON.parse(inputs) - } catch { - inputs = {} - } - } - if (params.workflow_input && typeof params.workflow_input === 'object') { - inputs = params.workflow_input - } - if (!inputs || typeof inputs !== 'object') { - const { workflowId, workflow_input, ...rest } = params - inputs = rest - } - const safeInputs = inputs && typeof inputs === 'object' ? inputs : {} - return Object.entries(safeInputs).map(([key, value]) => [key, key, String(value)]) - }, - }, - }, - getDynamicText: (params, state) => { - const workflowId = params?.workflowId || useWorkflowRegistry.getState().activeWorkflowId - if (workflowId) { - const workflowName = useWorkflowRegistry.getState().workflows[workflowId]?.name - if (workflowName) { - switch (state) { - case ClientToolCallState.success: - return `Ran ${workflowName}` - case ClientToolCallState.executing: - return `Running ${workflowName}` - case ClientToolCallState.generating: - return `Preparing to run ${workflowName}` - case ClientToolCallState.pending: - return `Run ${workflowName}?` - case ClientToolCallState.error: - return `Failed to run ${workflowName}` - case ClientToolCallState.rejected: - return `Skipped running ${workflowName}` - case ClientToolCallState.aborted: - return `Aborted running ${workflowName}` - case ClientToolCallState.background: - return `Running ${workflowName} in background` - } - } - } - return undefined - }, - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: RunWorkflowArgs): Promise { - const logger = createLogger('RunWorkflowClientTool') - - // Use longer timeout for workflow execution (10 minutes) - await this.executeWithTimeout(async () => { - const params = args || {} - logger.debug('handleAccept() called', { - toolCallId: this.toolCallId, - state: this.getState(), - hasArgs: !!args, - argKeys: args ? Object.keys(args) : [], - }) - - // prevent concurrent execution - const { isExecuting, setIsExecuting } = useExecutionStore.getState() - if (isExecuting) { - logger.debug('Execution prevented: already executing') - this.setState(ClientToolCallState.error) - await this.markToolComplete( - 409, - 'The workflow is already in the middle of an execution. Try again later' - ) - return - } - - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (!activeWorkflowId) { - logger.debug('Execution prevented: no active workflow') - this.setState(ClientToolCallState.error) - await this.markToolComplete(400, 'No active workflow found') - return - } - logger.debug('Using active workflow', { activeWorkflowId }) - - const workflowInput = params.workflow_input || undefined - if (workflowInput) { - logger.debug('Workflow input provided', { - inputFields: Object.keys(workflowInput), - inputPreview: JSON.stringify(workflowInput).slice(0, 120), - }) - } - - setIsExecuting(true) - logger.debug('Set isExecuting(true) and switching state to executing') - this.setState(ClientToolCallState.executing) - - const executionId = uuidv4() - const executionStartTime = new Date().toISOString() - logger.debug('Starting workflow execution', { - executionStartTime, - executionId, - toolCallId: this.toolCallId, - }) - - try { - const result = await executeWorkflowWithFullLogging({ - workflowInput, - executionId, - }) - - // Determine success for both non-streaming and streaming executions - let succeeded = true - let errorMessage: string | undefined - try { - if (result && typeof result === 'object' && 'success' in (result as any)) { - succeeded = Boolean((result as any).success) - if (!succeeded) { - errorMessage = (result as any)?.error || (result as any)?.output?.error - } - } else if ( - result && - typeof result === 'object' && - 'execution' in (result as any) && - (result as any).execution && - typeof (result as any).execution === 'object' - ) { - succeeded = Boolean((result as any).execution.success) - if (!succeeded) { - errorMessage = - (result as any).execution?.error || (result as any).execution?.output?.error - } - } - } catch {} - - if (succeeded) { - logger.debug('Workflow execution finished with success') - this.setState(ClientToolCallState.success) - await this.markToolComplete( - 200, - `Workflow execution completed. Started at: ${executionStartTime}` - ) - } else { - const msg = errorMessage || 'Workflow execution failed' - logger.error('Workflow execution finished with failure', { message: msg }) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, msg) - } - } finally { - // Always clean up execution state - setIsExecuting(false) - } - }, WORKFLOW_EXECUTION_TIMEOUT_MS) - } - - async execute(args?: RunWorkflowArgs): Promise { - // For compatibility if execute() is explicitly invoked, route to handleAccept - await this.handleAccept(args) - } -} - -// Register UI config at module load -registerToolUIConfig(RunWorkflowClientTool.id, RunWorkflowClientTool.metadata.uiConfig!) diff --git a/apps/sim/lib/copilot/tools/client/workflow/set-global-workflow-variables.ts b/apps/sim/lib/copilot/tools/client/workflow/set-global-workflow-variables.ts deleted file mode 100644 index 63f4c6c6f..000000000 --- a/apps/sim/lib/copilot/tools/client/workflow/set-global-workflow-variables.ts +++ /dev/null @@ -1,278 +0,0 @@ -import { createLogger } from '@sim/logger' -import { Loader2, Settings2, X, XCircle } from 'lucide-react' -import { - BaseClientTool, - type BaseClientToolMetadata, - ClientToolCallState, -} from '@/lib/copilot/tools/client/base-tool' -import { registerToolUIConfig } from '@/lib/copilot/tools/client/ui-config' -import { useVariablesStore } from '@/stores/panel/variables/store' -import { useWorkflowRegistry } from '@/stores/workflows/registry/store' - -interface OperationItem { - operation: 'add' | 'edit' | 'delete' - name: string - type?: 'plain' | 'number' | 'boolean' | 'array' | 'object' - value?: string -} - -interface SetGlobalVarsArgs { - operations: OperationItem[] - workflowId?: string -} - -export class SetGlobalWorkflowVariablesClientTool extends BaseClientTool { - static readonly id = 'set_global_workflow_variables' - - constructor(toolCallId: string) { - super( - toolCallId, - SetGlobalWorkflowVariablesClientTool.id, - SetGlobalWorkflowVariablesClientTool.metadata - ) - } - - static readonly metadata: BaseClientToolMetadata = { - displayNames: { - [ClientToolCallState.generating]: { - text: 'Preparing to set workflow variables', - icon: Loader2, - }, - [ClientToolCallState.pending]: { text: 'Set workflow variables?', icon: Settings2 }, - [ClientToolCallState.executing]: { text: 'Setting workflow variables', icon: Loader2 }, - [ClientToolCallState.success]: { text: 'Updated workflow variables', icon: Settings2 }, - [ClientToolCallState.error]: { text: 'Failed to set workflow variables', icon: X }, - [ClientToolCallState.aborted]: { text: 'Aborted setting variables', icon: XCircle }, - [ClientToolCallState.rejected]: { text: 'Skipped setting variables', icon: XCircle }, - }, - interrupt: { - accept: { text: 'Apply', icon: Settings2 }, - reject: { text: 'Skip', icon: XCircle }, - }, - uiConfig: { - interrupt: { - accept: { text: 'Apply', icon: Settings2 }, - reject: { text: 'Skip', icon: XCircle }, - showAllowOnce: true, - showAllowAlways: true, - }, - paramsTable: { - columns: [ - { key: 'name', label: 'Name', width: '40%', editable: true, mono: true }, - { key: 'value', label: 'Value', width: '60%', editable: true, mono: true }, - ], - extractRows: (params) => { - const operations = params.operations || [] - return operations.map((op: any, idx: number) => [ - String(idx), - op.name || '', - String(op.value ?? ''), - ]) - }, - }, - }, - getDynamicText: (params, state) => { - if (params?.operations && Array.isArray(params.operations)) { - const varNames = params.operations - .slice(0, 2) - .map((op: any) => op.name) - .filter(Boolean) - - if (varNames.length > 0) { - const varList = varNames.join(', ') - const more = params.operations.length > 2 ? '...' : '' - const displayText = `${varList}${more}` - - switch (state) { - case ClientToolCallState.success: - return `Set ${displayText}` - case ClientToolCallState.executing: - return `Setting ${displayText}` - case ClientToolCallState.generating: - return `Preparing to set ${displayText}` - case ClientToolCallState.pending: - return `Set ${displayText}?` - case ClientToolCallState.error: - return `Failed to set ${displayText}` - case ClientToolCallState.aborted: - return `Aborted setting ${displayText}` - case ClientToolCallState.rejected: - return `Skipped setting ${displayText}` - } - } - } - return undefined - }, - } - - async handleReject(): Promise { - await super.handleReject() - this.setState(ClientToolCallState.rejected) - } - - async handleAccept(args?: SetGlobalVarsArgs): Promise { - const logger = createLogger('SetGlobalWorkflowVariablesClientTool') - try { - this.setState(ClientToolCallState.executing) - const payload: SetGlobalVarsArgs = { ...(args || { operations: [] }) } - if (!payload.workflowId) { - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (activeWorkflowId) payload.workflowId = activeWorkflowId - } - if (!payload.workflowId) { - throw new Error('No active workflow found') - } - - // Fetch current variables so we can construct full array payload - const getRes = await fetch(`/api/workflows/${payload.workflowId}/variables`, { - method: 'GET', - }) - if (!getRes.ok) { - const txt = await getRes.text().catch(() => '') - throw new Error(txt || 'Failed to load current variables') - } - const currentJson = await getRes.json() - const currentVarsRecord = (currentJson?.data as Record) || {} - - // Helper to convert string -> typed value - function coerceValue( - value: string | undefined, - type?: 'plain' | 'number' | 'boolean' | 'array' | 'object' - ) { - if (value === undefined) return value - const t = type || 'plain' - try { - if (t === 'number') { - const n = Number(value) - if (Number.isNaN(n)) return value - return n - } - if (t === 'boolean') { - const v = String(value).trim().toLowerCase() - if (v === 'true') return true - if (v === 'false') return false - return value - } - if (t === 'array' || t === 'object') { - const parsed = JSON.parse(value) - if (t === 'array' && Array.isArray(parsed)) return parsed - if (t === 'object' && parsed && typeof parsed === 'object' && !Array.isArray(parsed)) - return parsed - return value - } - } catch {} - return value - } - - // Build mutable map by variable name - const byName: Record = {} - Object.values(currentVarsRecord).forEach((v: any) => { - if (v && typeof v === 'object' && v.id && v.name) byName[String(v.name)] = v - }) - - // Apply operations in order - for (const op of payload.operations || []) { - const key = String(op.name) - const nextType = (op.type as any) || byName[key]?.type || 'plain' - if (op.operation === 'delete') { - delete byName[key] - continue - } - const typedValue = coerceValue(op.value, nextType) - if (op.operation === 'add') { - byName[key] = { - id: crypto.randomUUID(), - workflowId: payload.workflowId, - name: key, - type: nextType, - value: typedValue, - } - continue - } - if (op.operation === 'edit') { - if (!byName[key]) { - // If editing a non-existent variable, create it - byName[key] = { - id: crypto.randomUUID(), - workflowId: payload.workflowId, - name: key, - type: nextType, - value: typedValue, - } - } else { - byName[key] = { - ...byName[key], - type: nextType, - ...(op.value !== undefined ? { value: typedValue } : {}), - } - } - } - } - - // Convert byName (keyed by name) to record keyed by ID for the API - const variablesRecord: Record = {} - for (const v of Object.values(byName)) { - variablesRecord[v.id] = v - } - - // POST full variables record to persist - const res = await fetch(`/api/workflows/${payload.workflowId}/variables`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ variables: variablesRecord }), - }) - if (!res.ok) { - const txt = await res.text().catch(() => '') - throw new Error(txt || `Failed to update variables (${res.status})`) - } - - try { - const { activeWorkflowId } = useWorkflowRegistry.getState() - if (activeWorkflowId) { - // Fetch the updated variables from the API - const refreshRes = await fetch(`/api/workflows/${activeWorkflowId}/variables`, { - method: 'GET', - }) - - if (refreshRes.ok) { - const refreshJson = await refreshRes.json() - const updatedVarsRecord = (refreshJson?.data as Record) || {} - - // Update the variables store with the fresh data - useVariablesStore.setState((state) => { - // Remove old variables for this workflow - const withoutWorkflow = Object.fromEntries( - Object.entries(state.variables).filter(([, v]) => v.workflowId !== activeWorkflowId) - ) - // Add the updated variables - return { - variables: { ...withoutWorkflow, ...updatedVarsRecord }, - } - }) - - logger.info('Refreshed variables in store', { workflowId: activeWorkflowId }) - } - } - } catch (refreshError) { - logger.warn('Failed to refresh variables in store', { error: refreshError }) - } - - await this.markToolComplete(200, 'Workflow variables updated', { variables: byName }) - this.setState(ClientToolCallState.success) - } catch (e: any) { - const message = e instanceof Error ? e.message : String(e) - this.setState(ClientToolCallState.error) - await this.markToolComplete(500, message || 'Failed to set workflow variables') - } - } - - async execute(args?: SetGlobalVarsArgs): Promise { - await this.handleAccept(args) - } -} - -// Register UI config at module load -registerToolUIConfig( - SetGlobalWorkflowVariablesClientTool.id, - SetGlobalWorkflowVariablesClientTool.metadata.uiConfig! -) diff --git a/apps/sim/lib/copilot/tools/mcp/definitions.ts b/apps/sim/lib/copilot/tools/mcp/definitions.ts new file mode 100644 index 000000000..0dc26951b --- /dev/null +++ b/apps/sim/lib/copilot/tools/mcp/definitions.ts @@ -0,0 +1,680 @@ +export type DirectToolDef = { + name: string + description: string + inputSchema: { type: 'object'; properties?: Record; required?: string[] } + toolId: string +} + +export type SubagentToolDef = { + name: string + description: string + inputSchema: { type: 'object'; properties?: Record; required?: string[] } + agentId: string +} + +/** + * Direct tools that execute immediately without LLM orchestration. + * These are fast database queries that don't need AI reasoning. + */ +export const DIRECT_TOOL_DEFS: DirectToolDef[] = [ + { + name: 'list_workspaces', + toolId: 'list_user_workspaces', + description: + 'List all workspaces the user has access to. Returns workspace IDs, names, and roles. Use this first to determine which workspace to operate in.', + inputSchema: { + type: 'object', + properties: {}, + }, + }, + { + name: 'list_workflows', + toolId: 'list_user_workflows', + description: + 'List all workflows the user has access to. Returns workflow IDs, names, workspace, and folder info. Use workspaceId/folderId to scope results.', + inputSchema: { + type: 'object', + properties: { + workspaceId: { + type: 'string', + description: 'Optional workspace ID to filter workflows.', + }, + folderId: { + type: 'string', + description: 'Optional folder ID to filter workflows.', + }, + }, + }, + }, + { + name: 'list_folders', + toolId: 'list_folders', + description: + 'List all folders in a workspace. Returns folder IDs, names, and parent relationships for organizing workflows.', + inputSchema: { + type: 'object', + properties: { + workspaceId: { + type: 'string', + description: 'Workspace ID to list folders from.', + }, + }, + required: ['workspaceId'], + }, + }, + { + name: 'get_workflow', + toolId: 'get_user_workflow', + description: + 'Get a workflow by ID. Returns the full workflow definition including all blocks, connections, and configuration.', + inputSchema: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'Workflow ID to retrieve.', + }, + }, + required: ['workflowId'], + }, + }, + { + name: 'create_workflow', + toolId: 'create_workflow', + description: + 'Create a new empty workflow. Returns the new workflow ID. Always call this FIRST before sim_build for new workflows. Use workspaceId to place it in a specific workspace.', + inputSchema: { + type: 'object', + properties: { + name: { + type: 'string', + description: 'Name for the new workflow.', + }, + workspaceId: { + type: 'string', + description: 'Optional workspace ID. Uses default workspace if not provided.', + }, + folderId: { + type: 'string', + description: 'Optional folder ID to place the workflow in.', + }, + description: { + type: 'string', + description: 'Optional description for the workflow.', + }, + }, + required: ['name'], + }, + }, + { + name: 'create_folder', + toolId: 'create_folder', + description: + 'Create a new folder for organizing workflows. Use parentId to create nested folder hierarchies.', + inputSchema: { + type: 'object', + properties: { + name: { + type: 'string', + description: 'Name for the new folder.', + }, + workspaceId: { + type: 'string', + description: 'Optional workspace ID. Uses default workspace if not provided.', + }, + parentId: { + type: 'string', + description: 'Optional parent folder ID for nested folders.', + }, + }, + required: ['name'], + }, + }, + { + name: 'rename_workflow', + toolId: 'rename_workflow', + description: 'Rename an existing workflow.', + inputSchema: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'The workflow ID to rename.', + }, + name: { + type: 'string', + description: 'The new name for the workflow.', + }, + }, + required: ['workflowId', 'name'], + }, + }, + { + name: 'move_workflow', + toolId: 'move_workflow', + description: + 'Move a workflow into a different folder. Omit folderId or pass empty string to move to workspace root.', + inputSchema: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'The workflow ID to move.', + }, + folderId: { + type: 'string', + description: 'Target folder ID. Omit or pass empty string to move to workspace root.', + }, + }, + required: ['workflowId'], + }, + }, + { + name: 'move_folder', + toolId: 'move_folder', + description: + 'Move a folder into another folder. Omit parentId or pass empty string to move to workspace root.', + inputSchema: { + type: 'object', + properties: { + folderId: { + type: 'string', + description: 'The folder ID to move.', + }, + parentId: { + type: 'string', + description: + 'Target parent folder ID. Omit or pass empty string to move to workspace root.', + }, + }, + required: ['folderId'], + }, + }, + { + name: 'run_workflow', + toolId: 'run_workflow', + description: + 'Run a workflow and return its output. Works on both draft and deployed states. By default runs the draft (live) state.', + inputSchema: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'REQUIRED. The workflow ID to run.', + }, + workflow_input: { + type: 'object', + description: + 'JSON object with input values. Keys should match the workflow start block input field names.', + }, + useDeployedState: { + type: 'boolean', + description: 'When true, runs the deployed version instead of the draft. Default: false.', + }, + }, + required: ['workflowId'], + }, + }, + { + name: 'run_workflow_until_block', + toolId: 'run_workflow_until_block', + description: + 'Run a workflow and stop after a specific block completes. Useful for testing partial execution or debugging specific blocks.', + inputSchema: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'REQUIRED. The workflow ID to run.', + }, + stopAfterBlockId: { + type: 'string', + description: + 'REQUIRED. The block ID to stop after. Execution halts once this block completes.', + }, + workflow_input: { + type: 'object', + description: 'JSON object with input values for the workflow.', + }, + useDeployedState: { + type: 'boolean', + description: 'When true, runs the deployed version instead of the draft. Default: false.', + }, + }, + required: ['workflowId', 'stopAfterBlockId'], + }, + }, + { + name: 'run_from_block', + toolId: 'run_from_block', + description: + 'Run a workflow starting from a specific block, using cached outputs from a prior execution for upstream blocks. The workflow must have been run at least once first.', + inputSchema: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'REQUIRED. The workflow ID to run.', + }, + startBlockId: { + type: 'string', + description: 'REQUIRED. The block ID to start execution from.', + }, + executionId: { + type: 'string', + description: + 'Optional. Specific execution ID to load the snapshot from. Uses latest if omitted.', + }, + workflow_input: { + type: 'object', + description: 'Optional input values for the workflow.', + }, + useDeployedState: { + type: 'boolean', + description: 'When true, runs the deployed version instead of the draft. Default: false.', + }, + }, + required: ['workflowId', 'startBlockId'], + }, + }, + { + name: 'run_block', + toolId: 'run_block', + description: + 'Run a single block in isolation using cached outputs from a prior execution. Only the specified block executes — nothing upstream or downstream. The workflow must have been run at least once first.', + inputSchema: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'REQUIRED. The workflow ID.', + }, + blockId: { + type: 'string', + description: 'REQUIRED. The block ID to run in isolation.', + }, + executionId: { + type: 'string', + description: + 'Optional. Specific execution ID to load the snapshot from. Uses latest if omitted.', + }, + workflow_input: { + type: 'object', + description: 'Optional input values for the workflow.', + }, + useDeployedState: { + type: 'boolean', + description: 'When true, runs the deployed version instead of the draft. Default: false.', + }, + }, + required: ['workflowId', 'blockId'], + }, + }, + { + name: 'get_deployed_workflow_state', + toolId: 'get_deployed_workflow_state', + description: + 'Get the deployed (production) state of a workflow. Returns the full workflow definition as deployed, or indicates if the workflow is not yet deployed.', + inputSchema: { + type: 'object', + properties: { + workflowId: { + type: 'string', + description: 'REQUIRED. The workflow ID to get the deployed state for.', + }, + }, + required: ['workflowId'], + }, + }, + { + name: 'generate_api_key', + toolId: 'generate_api_key', + description: + 'Generate a new workspace API key for calling workflow API endpoints. The key is only shown once — tell the user to save it immediately.', + inputSchema: { + type: 'object', + properties: { + name: { + type: 'string', + description: + 'A descriptive name for the API key (e.g., "production-key", "dev-testing").', + }, + workspaceId: { + type: 'string', + description: "Optional workspace ID. Defaults to user's default workspace.", + }, + }, + required: ['name'], + }, + }, +] + +export const SUBAGENT_TOOL_DEFS: SubagentToolDef[] = [ + { + name: 'sim_build', + agentId: 'build', + description: `Build a workflow end-to-end in a single step. This is the fast mode equivalent for headless/MCP usage. + +USE THIS WHEN: +- Building a new workflow from scratch +- Modifying an existing workflow +- You want to gather information and build in one pass without separate plan→edit steps + +WORKFLOW ID (REQUIRED): +- For NEW workflows: First call create_workflow to get a workflowId, then pass it here +- For EXISTING workflows: Always pass the workflowId parameter + +CAN DO: +- Gather information about blocks, credentials, patterns +- Search documentation and patterns for best practices +- Add, modify, or remove blocks +- Configure block settings and connections +- Set environment variables and workflow variables + +CANNOT DO: +- Run or test workflows (use sim_test separately) +- Deploy workflows (use sim_deploy separately) + +WORKFLOW: +1. Call create_workflow to get a workflowId (for new workflows) +2. Call sim_build with the request and workflowId +3. Build agent gathers info and builds in one pass +4. Call sim_test to verify it works +5. Optionally call sim_deploy to make it externally accessible`, + inputSchema: { + type: 'object', + properties: { + request: { + type: 'string', + description: 'What you want to build or modify in the workflow.', + }, + workflowId: { + type: 'string', + description: + 'REQUIRED. The workflow ID. For new workflows, call create_workflow first to get this.', + }, + context: { type: 'object' }, + }, + required: ['request', 'workflowId'], + }, + }, + { + name: 'sim_discovery', + agentId: 'discovery', + description: `Find workflows by their contents or functionality when the user doesn't know the exact name or ID. + +USE THIS WHEN: +- User describes a workflow by what it does: "the one that sends emails", "my Slack notification workflow" +- User refers to workflow contents: "the workflow with the OpenAI block" +- User needs to search/match workflows by functionality or description + +DO NOT USE (use direct tools instead): +- User knows the workflow name → use get_workflow +- User wants to list all workflows → use list_workflows +- User wants to list workspaces → use list_workspaces +- User wants to list folders → use list_folders`, + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + workspaceId: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'sim_plan', + agentId: 'plan', + description: `Plan workflow changes by gathering required information. For most cases, prefer sim_build which combines planning and editing in one step. + +USE THIS WHEN: +- You need fine-grained control over the build process +- You want to inspect the plan before executing it + +WORKFLOW ID (REQUIRED): +- For NEW workflows: First call create_workflow to get a workflowId, then pass it here +- For EXISTING workflows: Always pass the workflowId parameter + +This tool gathers information about available blocks, credentials, and the current workflow state. + +RETURNS: A plan object containing block configurations, connections, and technical details. +IMPORTANT: Pass the returned plan EXACTLY to sim_edit - do not modify or summarize it.`, + inputSchema: { + type: 'object', + properties: { + request: { + type: 'string', + description: 'What you want to build or modify in the workflow.', + }, + workflowId: { + type: 'string', + description: + 'REQUIRED. The workflow ID. For new workflows, call create_workflow first to get this.', + }, + context: { type: 'object' }, + }, + required: ['request', 'workflowId'], + }, + }, + { + name: 'sim_edit', + agentId: 'edit', + description: `Execute a workflow plan from sim_plan. For most cases, prefer sim_build which combines planning and editing in one step. + +WORKFLOW ID (REQUIRED): +- You MUST provide the workflowId parameter + +PLAN (REQUIRED): +- Pass the EXACT plan object from sim_plan in the context.plan field +- Do NOT modify, summarize, or interpret the plan - pass it verbatim + +After sim_edit completes, you can test immediately with sim_test, or deploy with sim_deploy to make it accessible externally.`, + inputSchema: { + type: 'object', + properties: { + message: { type: 'string', description: 'Optional additional instructions for the edit.' }, + workflowId: { + type: 'string', + description: + 'REQUIRED. The workflow ID to edit. Get this from create_workflow for new workflows.', + }, + plan: { + type: 'object', + description: 'The plan object from sim_plan. Pass it EXACTLY as returned, do not modify.', + }, + context: { + type: 'object', + description: + 'Additional context. Put the plan in context.plan if not using the plan field directly.', + }, + }, + required: ['workflowId'], + }, + }, + { + name: 'sim_deploy', + agentId: 'deploy', + description: `Deploy a workflow to make it accessible externally. Workflows can be tested without deploying, but deployment is needed for API access, chat UIs, or MCP exposure. + +DEPLOYMENT TYPES: +- "deploy as api" - REST API endpoint for programmatic access +- "deploy as chat" - Managed chat UI with auth options +- "deploy as mcp" - Expose as MCP tool on an MCP server for AI agents to call + +MCP DEPLOYMENT FLOW: +The deploy subagent will automatically: list available MCP servers → create one if needed → deploy the workflow as an MCP tool to that server. You can specify server name, tool name, and tool description. + +ALSO CAN: +- Get the deployed (production) state to compare with draft +- Generate workspace API keys for calling deployed workflows +- List and create MCP servers in the workspace`, + inputSchema: { + type: 'object', + properties: { + request: { + type: 'string', + description: 'The deployment request, e.g. "deploy as api" or "deploy as chat"', + }, + workflowId: { + type: 'string', + description: 'REQUIRED. The workflow ID to deploy.', + }, + context: { type: 'object' }, + }, + required: ['request', 'workflowId'], + }, + }, + { + name: 'sim_test', + agentId: 'test', + description: `Run a workflow and verify its outputs. Works on both deployed and undeployed (draft) workflows. Use after building to verify correctness. + +Supports full and partial execution: +- Full run with test inputs +- Stop after a specific block (run_workflow_until_block) +- Run a single block in isolation (run_block) +- Resume from a specific block (run_from_block)`, + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + workflowId: { + type: 'string', + description: 'REQUIRED. The workflow ID to test.', + }, + context: { type: 'object' }, + }, + required: ['request', 'workflowId'], + }, + }, + { + name: 'sim_debug', + agentId: 'debug', + description: + 'Diagnose errors or unexpected workflow behavior. Provide the error message and workflowId. Returns root cause analysis and fix suggestions.', + inputSchema: { + type: 'object', + properties: { + error: { type: 'string', description: 'The error message or description of the issue.' }, + workflowId: { type: 'string', description: 'REQUIRED. The workflow ID to debug.' }, + context: { type: 'object' }, + }, + required: ['error', 'workflowId'], + }, + }, + { + name: 'sim_auth', + agentId: 'auth', + description: + 'Check OAuth connection status, list connected services, and initiate new OAuth connections. Use when a workflow needs third-party service access (Google, Slack, GitHub, etc.). In MCP/headless mode, returns an authorization URL the user must open in their browser to complete the OAuth flow.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'sim_knowledge', + agentId: 'knowledge', + description: + 'Manage knowledge bases for RAG-powered document retrieval. Supports listing, creating, updating, and deleting knowledge bases. Knowledge bases can be attached to agent blocks for context-aware responses.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'sim_custom_tool', + agentId: 'custom_tool', + description: + 'Manage custom tools (reusable API integrations). Supports listing, creating, updating, and deleting custom tools. Custom tools can be added to agent blocks as callable functions.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'sim_info', + agentId: 'info', + description: + "Inspect a workflow's blocks, connections, outputs, variables, and metadata. Use for questions about the Sim platform itself — how blocks work, what integrations are available, platform concepts, etc. Always provide workflowId to scope results to a specific workflow.", + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + workflowId: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'sim_workflow', + agentId: 'workflow', + description: + 'Manage workflow-level configuration: environment variables, settings, scheduling, and deployment status. Use for any data about a specific workflow — its settings, credentials, variables, or deployment state.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + workflowId: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'sim_research', + agentId: 'research', + description: + 'Research external APIs and documentation. Use when you need to understand third-party services, external APIs, authentication flows, or data formats OUTSIDE of Sim. For questions about Sim itself, use sim_info instead.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'sim_superagent', + agentId: 'superagent', + description: + 'Execute direct actions NOW: send an email, post to Slack, make an API call, etc. Use when the user wants to DO something immediately rather than build a workflow for it.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, + { + name: 'sim_platform', + agentId: 'tour', + description: + 'Get help with Sim platform navigation, keyboard shortcuts, and UI actions. Use when the user asks "how do I..." about the Sim editor, wants keyboard shortcuts, or needs to know what actions are available in the UI.', + inputSchema: { + type: 'object', + properties: { + request: { type: 'string' }, + context: { type: 'object' }, + }, + required: ['request'], + }, + }, +] diff --git a/apps/sim/lib/copilot/tools/server/base-tool.ts b/apps/sim/lib/copilot/tools/server/base-tool.ts index 40ec3584c..176059734 100644 --- a/apps/sim/lib/copilot/tools/server/base-tool.ts +++ b/apps/sim/lib/copilot/tools/server/base-tool.ts @@ -1,4 +1,20 @@ -export interface BaseServerTool { - name: string - execute(args: TArgs, context?: { userId: string }): Promise +import type { z } from 'zod' + +export interface ServerToolContext { + userId: string +} + +/** + * Base interface for server-side copilot tools. + * + * Tools can optionally declare Zod schemas for input/output validation. + * If provided, the router validates automatically. + */ +export interface BaseServerTool { + name: string + execute(args: TArgs, context?: ServerToolContext): Promise + /** Optional Zod schema for input validation */ + inputSchema?: z.ZodType + /** Optional Zod schema for output validation */ + outputSchema?: z.ZodType } diff --git a/apps/sim/lib/copilot/tools/server/blocks/get-block-config.ts b/apps/sim/lib/copilot/tools/server/blocks/get-block-config.ts index cd95577d7..64021e07c 100644 --- a/apps/sim/lib/copilot/tools/server/blocks/get-block-config.ts +++ b/apps/sim/lib/copilot/tools/server/blocks/get-block-config.ts @@ -1,6 +1,7 @@ import { createLogger } from '@sim/logger' import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' import { + GetBlockConfigInput, type GetBlockConfigInputType, GetBlockConfigResult, type GetBlockConfigResultType, @@ -370,6 +371,8 @@ export const getBlockConfigServerTool: BaseServerTool< GetBlockConfigResultType > = { name: 'get_block_config', + inputSchema: GetBlockConfigInput, + outputSchema: GetBlockConfigResult, async execute( { blockType, operation, trigger }: GetBlockConfigInputType, context?: { userId: string } diff --git a/apps/sim/lib/copilot/tools/server/blocks/get-block-options.ts b/apps/sim/lib/copilot/tools/server/blocks/get-block-options.ts index 177482fc3..49c0648b2 100644 --- a/apps/sim/lib/copilot/tools/server/blocks/get-block-options.ts +++ b/apps/sim/lib/copilot/tools/server/blocks/get-block-options.ts @@ -1,6 +1,7 @@ import { createLogger } from '@sim/logger' import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' import { + GetBlockOptionsInput, type GetBlockOptionsInputType, GetBlockOptionsResult, type GetBlockOptionsResultType, @@ -14,6 +15,8 @@ export const getBlockOptionsServerTool: BaseServerTool< GetBlockOptionsResultType > = { name: 'get_block_options', + inputSchema: GetBlockOptionsInput, + outputSchema: GetBlockOptionsResult, async execute( { blockId }: GetBlockOptionsInputType, context?: { userId: string } diff --git a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-and-tools.ts b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-and-tools.ts index 9413dc278..e695f270e 100644 --- a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-and-tools.ts +++ b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-and-tools.ts @@ -1,9 +1,6 @@ import { createLogger } from '@sim/logger' import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' -import { - type GetBlocksAndToolsInput, - GetBlocksAndToolsResult, -} from '@/lib/copilot/tools/shared/schemas' +import { GetBlocksAndToolsInput, GetBlocksAndToolsResult } from '@/lib/copilot/tools/shared/schemas' import { registry as blockRegistry } from '@/blocks/registry' import type { BlockConfig } from '@/blocks/types' import { getUserPermissionConfig } from '@/ee/access-control/utils/permission-check' @@ -13,6 +10,8 @@ export const getBlocksAndToolsServerTool: BaseServerTool< ReturnType > = { name: 'get_blocks_and_tools', + inputSchema: GetBlocksAndToolsInput, + outputSchema: GetBlocksAndToolsResult, async execute(_args: unknown, context?: { userId: string }) { const logger = createLogger('GetBlocksAndToolsServerTool') logger.debug('Executing get_blocks_and_tools') diff --git a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts index 5203151da..ed6b60acb 100644 --- a/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts +++ b/apps/sim/lib/copilot/tools/server/blocks/get-blocks-metadata-tool.ts @@ -2,10 +2,7 @@ import { existsSync, readFileSync } from 'fs' import { join } from 'path' import { createLogger } from '@sim/logger' import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' -import { - type GetBlocksMetadataInput, - GetBlocksMetadataResult, -} from '@/lib/copilot/tools/shared/schemas' +import { GetBlocksMetadataInput, GetBlocksMetadataResult } from '@/lib/copilot/tools/shared/schemas' import { registry as blockRegistry } from '@/blocks/registry' import { AuthMode, type BlockConfig, isHiddenFromDisplay } from '@/blocks/types' import { getUserPermissionConfig } from '@/ee/access-control/utils/permission-check' @@ -105,6 +102,8 @@ export const getBlocksMetadataServerTool: BaseServerTool< ReturnType > = { name: 'get_blocks_metadata', + inputSchema: GetBlocksMetadataInput, + outputSchema: GetBlocksMetadataResult, async execute( { blockIds }: ReturnType, context?: { userId: string } @@ -292,7 +291,11 @@ export const getBlocksMetadataServerTool: BaseServerTool< if (existsSync(docPath)) { metadata.yamlDocumentation = readFileSync(docPath, 'utf-8') } - } catch {} + } catch (error) { + logger.warn('Failed to read YAML documentation file', { + error: error instanceof Error ? error.message : String(error), + }) + } if (metadata) { result[blockId] = removeNullish(metadata) as CopilotBlockMetadata @@ -955,7 +958,12 @@ function resolveToolIdForOperation(blockConfig: BlockConfig, opId: string): stri const maybeToolId = toolSelector({ operation: opId }) if (typeof maybeToolId === 'string') return maybeToolId } - } catch {} + } catch (error) { + const toolLogger = createLogger('GetBlocksMetadataServerTool') + toolLogger.warn('Failed to resolve tool ID for operation', { + error: error instanceof Error ? error.message : String(error), + }) + } return undefined } diff --git a/apps/sim/lib/copilot/tools/server/blocks/get-trigger-blocks.ts b/apps/sim/lib/copilot/tools/server/blocks/get-trigger-blocks.ts index 5f5820e20..367c61475 100644 --- a/apps/sim/lib/copilot/tools/server/blocks/get-trigger-blocks.ts +++ b/apps/sim/lib/copilot/tools/server/blocks/get-trigger-blocks.ts @@ -15,6 +15,8 @@ export const getTriggerBlocksServerTool: BaseServerTool< ReturnType > = { name: 'get_trigger_blocks', + inputSchema: GetTriggerBlocksInput, + outputSchema: GetTriggerBlocksResult, async execute(_args: unknown, context?: { userId: string }) { const logger = createLogger('GetTriggerBlocksServerTool') logger.debug('Executing get_trigger_blocks') diff --git a/apps/sim/lib/copilot/tools/server/knowledge/knowledge-base.ts b/apps/sim/lib/copilot/tools/server/knowledge/knowledge-base.ts index d6d5af7ba..5b00c421d 100644 --- a/apps/sim/lib/copilot/tools/server/knowledge/knowledge-base.ts +++ b/apps/sim/lib/copilot/tools/server/knowledge/knowledge-base.ts @@ -7,6 +7,14 @@ import { getKnowledgeBaseById, getKnowledgeBases, } from '@/lib/knowledge/service' +import { + createTagDefinition, + deleteTagDefinition, + getDocumentTagDefinitions, + getNextAvailableSlot, + getTagUsageStats, + updateTagDefinition, +} from '@/lib/knowledge/tags/service' import { getQueryStrategy, handleVectorOnlySearch } from '@/app/api/knowledge/search/utils' const logger = createLogger('KnowledgeBaseServerTool') @@ -213,10 +221,177 @@ export const knowledgeBaseServerTool: BaseServerTool ({ + id: td.id, + tagSlot: td.tagSlot, + displayName: td.displayName, + fieldType: td.fieldType, + createdAt: td.createdAt, + })), + } + } + + case 'create_tag': { + if (!args.knowledgeBaseId) { + return { + success: false, + message: 'Knowledge base ID is required for create_tag operation', + } + } + if (!args.tagDisplayName) { + return { + success: false, + message: 'tagDisplayName is required for create_tag operation', + } + } + const fieldType = args.tagFieldType || 'text' + + const tagSlot = await getNextAvailableSlot(args.knowledgeBaseId, fieldType) + if (!tagSlot) { + return { + success: false, + message: `No available slots for field type "${fieldType}". Maximum tags of this type reached.`, + } + } + + const requestId = crypto.randomUUID().slice(0, 8) + const newTag = await createTagDefinition( + { + knowledgeBaseId: args.knowledgeBaseId, + tagSlot, + displayName: args.tagDisplayName, + fieldType, + }, + requestId + ) + + logger.info('Tag definition created via copilot', { + knowledgeBaseId: args.knowledgeBaseId, + tagId: newTag.id, + displayName: newTag.displayName, + userId: context.userId, + }) + + return { + success: true, + message: `Tag "${newTag.displayName}" created successfully`, + data: { + id: newTag.id, + tagSlot: newTag.tagSlot, + displayName: newTag.displayName, + fieldType: newTag.fieldType, + }, + } + } + + case 'update_tag': { + if (!args.tagDefinitionId) { + return { + success: false, + message: 'tagDefinitionId is required for update_tag operation', + } + } + + const updateData: { displayName?: string; fieldType?: string } = {} + if (args.tagDisplayName) updateData.displayName = args.tagDisplayName + if (args.tagFieldType) updateData.fieldType = args.tagFieldType + + if (!updateData.displayName && !updateData.fieldType) { + return { + success: false, + message: 'At least one of tagDisplayName or tagFieldType is required for update_tag', + } + } + + const requestId = crypto.randomUUID().slice(0, 8) + const updatedTag = await updateTagDefinition(args.tagDefinitionId, updateData, requestId) + + logger.info('Tag definition updated via copilot', { + tagId: args.tagDefinitionId, + userId: context.userId, + }) + + return { + success: true, + message: `Tag "${updatedTag.displayName}" updated successfully`, + data: { + id: updatedTag.id, + tagSlot: updatedTag.tagSlot, + displayName: updatedTag.displayName, + fieldType: updatedTag.fieldType, + }, + } + } + + case 'delete_tag': { + if (!args.tagDefinitionId) { + return { + success: false, + message: 'tagDefinitionId is required for delete_tag operation', + } + } + + const requestId = crypto.randomUUID().slice(0, 8) + const deleted = await deleteTagDefinition(args.tagDefinitionId, requestId) + + logger.info('Tag definition deleted via copilot', { + tagId: args.tagDefinitionId, + tagSlot: deleted.tagSlot, + displayName: deleted.displayName, + userId: context.userId, + }) + + return { + success: true, + message: `Tag "${deleted.displayName}" deleted successfully. All document/chunk references cleared.`, + data: { + tagSlot: deleted.tagSlot, + displayName: deleted.displayName, + }, + } + } + + case 'get_tag_usage': { + if (!args.knowledgeBaseId) { + return { + success: false, + message: 'Knowledge base ID is required for get_tag_usage operation', + } + } + + const requestId = crypto.randomUUID().slice(0, 8) + const stats = await getTagUsageStats(args.knowledgeBaseId, requestId) + + return { + success: true, + message: `Retrieved usage stats for ${stats.length} tag(s)`, + data: stats, + } + } + default: return { success: false, - message: `Unknown operation: ${operation}. Supported operations: create, list, get, query`, + message: `Unknown operation: ${operation}. Supported operations: create, list, get, query, list_tags, create_tag, update_tag, delete_tag, get_tag_usage`, } } } catch (error) { diff --git a/apps/sim/lib/copilot/tools/server/other/make-api-request.ts b/apps/sim/lib/copilot/tools/server/other/make-api-request.ts index 8d47d7c82..3f9546051 100644 --- a/apps/sim/lib/copilot/tools/server/other/make-api-request.ts +++ b/apps/sim/lib/copilot/tools/server/other/make-api-request.ts @@ -3,22 +3,34 @@ import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' import { executeTool } from '@/tools' import type { TableRow } from '@/tools/types' +const RESULT_CHAR_CAP = Number(process.env.COPILOT_TOOL_RESULT_CHAR_CAP || 20000) + interface MakeApiRequestParams { url: string method: 'GET' | 'POST' | 'PUT' queryParams?: Record headers?: Record - body?: any + body?: unknown } -export const makeApiRequestServerTool: BaseServerTool = { +interface ApiResponse { + data: string + status: number + headers: Record + truncated?: boolean + totalChars?: number + previewChars?: number + note?: string +} + +export const makeApiRequestServerTool: BaseServerTool = { name: 'make_api_request', - async execute(params: MakeApiRequestParams): Promise { + async execute(params: MakeApiRequestParams): Promise { const logger = createLogger('MakeApiRequestServerTool') - const { url, method, queryParams, headers, body } = params || ({} as MakeApiRequestParams) + const { url, method, queryParams, headers, body } = params if (!url || !method) throw new Error('url and method are required') - const toTableRows = (obj?: Record): TableRow[] | null => { + const toTableRows = (obj?: Record): TableRow[] | null => { if (!obj || typeof obj !== 'object') return null return Object.entries(obj).map(([key, value]) => ({ id: key, @@ -26,21 +38,22 @@ export const makeApiRequestServerTool: BaseServerTool })) } const headersTable = toTableRows(headers) - const queryParamsTable = toTableRows(queryParams as Record | undefined) + const queryParamsTable = toTableRows(queryParams as Record | undefined) const result = await executeTool( 'http_request', { url, method, params: queryParamsTable, headers: headersTable, body }, true ) - if (!result.success) throw new Error(result.error || 'API request failed') - const output = (result as any).output || result - const data = output.output?.data ?? output.data - const status = output.output?.status ?? output.status ?? 200 - const respHeaders = output.output?.headers ?? output.headers ?? {} + if (!result.success) throw new Error(result.error ?? 'API request failed') - const CAP = Number(process.env.COPILOT_TOOL_RESULT_CHAR_CAP || 20000) - const toStringSafe = (val: any): string => { + const output = result.output as Record | undefined + const nestedOutput = output?.output as Record | undefined + const data = nestedOutput?.data ?? output?.data + const status = (nestedOutput?.status ?? output?.status ?? 200) as number + const respHeaders = (nestedOutput?.headers ?? output?.headers ?? {}) as Record + + const toStringSafe = (val: unknown): string => { if (typeof val === 'string') return val try { return JSON.stringify(val) @@ -53,7 +66,6 @@ export const makeApiRequestServerTool: BaseServerTool try { let text = html let previous: string - do { previous = text text = text.replace(//gi, '') @@ -61,26 +73,21 @@ export const makeApiRequestServerTool: BaseServerTool text = text.replace(/<[^>]*>/g, ' ') text = text.replace(/[<>]/g, ' ') } while (text !== previous) - return text.replace(/\s+/g, ' ').trim() } catch { return html } } + let normalized = toStringSafe(data) const looksLikeHtml = //i.test(normalized) || //i.test(normalized) if (looksLikeHtml) normalized = stripHtml(normalized) + const totalChars = normalized.length - if (totalChars > CAP) { - const preview = normalized.slice(0, CAP) - logger.warn('API response truncated by character cap', { - url, - method, - totalChars, - previewChars: preview.length, - cap: CAP, - }) + if (totalChars > RESULT_CHAR_CAP) { + const preview = normalized.slice(0, RESULT_CHAR_CAP) + logger.warn('API response truncated', { url, method, totalChars, cap: RESULT_CHAR_CAP }) return { data: preview, status, @@ -88,10 +95,11 @@ export const makeApiRequestServerTool: BaseServerTool truncated: true, totalChars, previewChars: preview.length, - note: `Response truncated to ${CAP} characters to avoid large payloads`, + note: `Response truncated to ${RESULT_CHAR_CAP} characters`, } } - logger.info('API request executed', { url, method, status, totalChars }) + + logger.debug('API request executed', { url, method, status, totalChars }) return { data: normalized, status, headers: respHeaders } }, } diff --git a/apps/sim/lib/copilot/tools/server/other/search-online.ts b/apps/sim/lib/copilot/tools/server/other/search-online.ts index e8b725b05..75f59b507 100644 --- a/apps/sim/lib/copilot/tools/server/other/search-online.ts +++ b/apps/sim/lib/copilot/tools/server/other/search-online.ts @@ -11,78 +11,83 @@ interface OnlineSearchParams { hl?: string } -export const searchOnlineServerTool: BaseServerTool = { +interface SearchResult { + title: string + link: string + snippet: string + date?: string + position?: number +} + +interface SearchResponse { + results: SearchResult[] + query: string + type: string + totalResults: number + source: 'exa' | 'serper' +} + +export const searchOnlineServerTool: BaseServerTool = { name: 'search_online', - async execute(params: OnlineSearchParams): Promise { + async execute(params: OnlineSearchParams): Promise { const logger = createLogger('SearchOnlineServerTool') const { query, num = 10, type = 'search', gl, hl } = params if (!query || typeof query !== 'string') throw new Error('query is required') - // Check which API keys are available const hasExaApiKey = Boolean(env.EXA_API_KEY && String(env.EXA_API_KEY).length > 0) const hasSerperApiKey = Boolean(env.SERPER_API_KEY && String(env.SERPER_API_KEY).length > 0) - logger.info('Performing online search', { - queryLength: query.length, - num, - type, - gl, - hl, - hasExaApiKey, - hasSerperApiKey, - }) + logger.debug('Performing online search', { queryLength: query.length, num, type }) // Try Exa first if available if (hasExaApiKey) { try { - logger.debug('Attempting exa_search', { num }) const exaResult = await executeTool('exa_search', { query, numResults: num, type: 'auto', - apiKey: env.EXA_API_KEY || '', + apiKey: env.EXA_API_KEY ?? '', }) - const exaResults = (exaResult as any)?.output?.results || [] - const count = Array.isArray(exaResults) ? exaResults.length : 0 - const firstTitle = count > 0 ? String(exaResults[0]?.title || '') : undefined + const output = exaResult.output as + | { + results?: Array<{ + title?: string + url?: string + text?: string + summary?: string + publishedDate?: string + }> + } + | undefined + const exaResults = output?.results ?? [] - logger.info('exa_search completed', { - success: exaResult.success, - resultsCount: count, - firstTitlePreview: firstTitle?.slice(0, 120), - }) - - if (exaResult.success && count > 0) { - // Transform Exa results to match expected format - const transformedResults = exaResults.map((result: any) => ({ - title: result.title || '', - link: result.url || '', - snippet: result.text || result.summary || '', + if (exaResult.success && exaResults.length > 0) { + const transformedResults: SearchResult[] = exaResults.map((result, index) => ({ + title: result.title ?? '', + link: result.url ?? '', + snippet: result.text ?? result.summary ?? '', date: result.publishedDate, - position: exaResults.indexOf(result) + 1, + position: index + 1, })) return { results: transformedResults, query, type, - totalResults: count, + totalResults: transformedResults.length, source: 'exa', } } - logger.warn('exa_search returned no results, falling back to Serper', { - queryLength: query.length, - }) - } catch (exaError: any) { + logger.debug('exa_search returned no results, falling back to Serper') + } catch (exaError) { logger.warn('exa_search failed, falling back to Serper', { - error: exaError?.message, + error: exaError instanceof Error ? exaError.message : String(exaError), }) } } - // Fall back to Serper if Exa failed or wasn't available if (!hasSerperApiKey) { throw new Error('No search API keys available (EXA_API_KEY or SERPER_API_KEY required)') } @@ -93,41 +98,24 @@ export const searchOnlineServerTool: BaseServerTool = { type, gl, hl, - apiKey: env.SERPER_API_KEY || '', + apiKey: env.SERPER_API_KEY ?? '', } - try { - logger.debug('Calling serper_search tool', { type, num, gl, hl }) - const result = await executeTool('serper_search', toolParams) - const results = (result as any)?.output?.searchResults || [] - const count = Array.isArray(results) ? results.length : 0 - const firstTitle = count > 0 ? String(results[0]?.title || '') : undefined + const result = await executeTool('serper_search', toolParams) + const output = result.output as { searchResults?: SearchResult[] } | undefined + const results = output?.searchResults ?? [] - logger.info('serper_search completed', { - success: result.success, - resultsCount: count, - firstTitlePreview: firstTitle?.slice(0, 120), - }) + if (!result.success) { + const errorMsg = (result as { error?: string }).error ?? 'Search failed' + throw new Error(errorMsg) + } - if (!result.success) { - logger.error('serper_search failed', { error: (result as any)?.error }) - throw new Error((result as any)?.error || 'Search failed') - } - - if (count === 0) { - logger.warn('serper_search returned no results', { queryLength: query.length }) - } - - return { - results, - query, - type, - totalResults: count, - source: 'serper', - } - } catch (e: any) { - logger.error('search_online execution error', { message: e?.message }) - throw e + return { + results, + query, + type, + totalResults: results.length, + source: 'serper', } }, } diff --git a/apps/sim/lib/copilot/tools/server/router.ts b/apps/sim/lib/copilot/tools/server/router.ts index 2c79cff74..e17b1364f 100644 --- a/apps/sim/lib/copilot/tools/server/router.ts +++ b/apps/sim/lib/copilot/tools/server/router.ts @@ -1,5 +1,5 @@ import { createLogger } from '@sim/logger' -import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' +import type { BaseServerTool, ServerToolContext } from '@/lib/copilot/tools/server/base-tool' import { getBlockConfigServerTool } from '@/lib/copilot/tools/server/blocks/get-block-config' import { getBlockOptionsServerTool } from '@/lib/copilot/tools/server/blocks/get-block-options' import { getBlocksAndToolsServerTool } from '@/lib/copilot/tools/server/blocks/get-blocks-and-tools' @@ -13,101 +13,52 @@ import { getCredentialsServerTool } from '@/lib/copilot/tools/server/user/get-cr import { setEnvironmentVariablesServerTool } from '@/lib/copilot/tools/server/user/set-environment-variables' import { editWorkflowServerTool } from '@/lib/copilot/tools/server/workflow/edit-workflow' import { getWorkflowConsoleServerTool } from '@/lib/copilot/tools/server/workflow/get-workflow-console' -import { - ExecuteResponseSuccessSchema, - GetBlockConfigInput, - GetBlockConfigResult, - GetBlockOptionsInput, - GetBlockOptionsResult, - GetBlocksAndToolsInput, - GetBlocksAndToolsResult, - GetBlocksMetadataInput, - GetBlocksMetadataResult, - GetTriggerBlocksInput, - GetTriggerBlocksResult, - KnowledgeBaseArgsSchema, -} from '@/lib/copilot/tools/shared/schemas' +import { ExecuteResponseSuccessSchema } from '@/lib/copilot/tools/shared/schemas' -// Generic execute response schemas (success path only for this route; errors handled via HTTP status) export { ExecuteResponseSuccessSchema } export type ExecuteResponseSuccess = (typeof ExecuteResponseSuccessSchema)['_type'] -// Define server tool registry for the new copilot runtime -const serverToolRegistry: Record> = {} const logger = createLogger('ServerToolRouter') -// Register tools -serverToolRegistry[getBlocksAndToolsServerTool.name] = getBlocksAndToolsServerTool -serverToolRegistry[getBlocksMetadataServerTool.name] = getBlocksMetadataServerTool -serverToolRegistry[getBlockOptionsServerTool.name] = getBlockOptionsServerTool -serverToolRegistry[getBlockConfigServerTool.name] = getBlockConfigServerTool -serverToolRegistry[getTriggerBlocksServerTool.name] = getTriggerBlocksServerTool -serverToolRegistry[editWorkflowServerTool.name] = editWorkflowServerTool -serverToolRegistry[getWorkflowConsoleServerTool.name] = getWorkflowConsoleServerTool -serverToolRegistry[searchDocumentationServerTool.name] = searchDocumentationServerTool -serverToolRegistry[searchOnlineServerTool.name] = searchOnlineServerTool -serverToolRegistry[setEnvironmentVariablesServerTool.name] = setEnvironmentVariablesServerTool -serverToolRegistry[getCredentialsServerTool.name] = getCredentialsServerTool -serverToolRegistry[makeApiRequestServerTool.name] = makeApiRequestServerTool -serverToolRegistry[knowledgeBaseServerTool.name] = knowledgeBaseServerTool +/** Registry of all server tools. Tools self-declare their validation schemas. */ +const serverToolRegistry: Record = { + [getBlocksAndToolsServerTool.name]: getBlocksAndToolsServerTool, + [getBlocksMetadataServerTool.name]: getBlocksMetadataServerTool, + [getBlockOptionsServerTool.name]: getBlockOptionsServerTool, + [getBlockConfigServerTool.name]: getBlockConfigServerTool, + [getTriggerBlocksServerTool.name]: getTriggerBlocksServerTool, + [editWorkflowServerTool.name]: editWorkflowServerTool, + [getWorkflowConsoleServerTool.name]: getWorkflowConsoleServerTool, + [searchDocumentationServerTool.name]: searchDocumentationServerTool, + [searchOnlineServerTool.name]: searchOnlineServerTool, + [setEnvironmentVariablesServerTool.name]: setEnvironmentVariablesServerTool, + [getCredentialsServerTool.name]: getCredentialsServerTool, + [makeApiRequestServerTool.name]: makeApiRequestServerTool, + [knowledgeBaseServerTool.name]: knowledgeBaseServerTool, +} +/** + * Route a tool execution request to the appropriate server tool. + * Validates input/output using the tool's declared Zod schemas if present. + */ export async function routeExecution( toolName: string, payload: unknown, - context?: { userId: string } -): Promise { + context?: ServerToolContext +): Promise { const tool = serverToolRegistry[toolName] if (!tool) { throw new Error(`Unknown server tool: ${toolName}`) } - logger.debug('Routing to tool', { - toolName, - payloadPreview: (() => { - try { - return JSON.stringify(payload).slice(0, 200) - } catch { - return undefined - } - })(), - }) - let args: any = payload || {} - if (toolName === 'get_blocks_and_tools') { - args = GetBlocksAndToolsInput.parse(args) - } - if (toolName === 'get_blocks_metadata') { - args = GetBlocksMetadataInput.parse(args) - } - if (toolName === 'get_block_options') { - args = GetBlockOptionsInput.parse(args) - } - if (toolName === 'get_block_config') { - args = GetBlockConfigInput.parse(args) - } - if (toolName === 'get_trigger_blocks') { - args = GetTriggerBlocksInput.parse(args) - } - if (toolName === 'knowledge_base') { - args = KnowledgeBaseArgsSchema.parse(args) - } + logger.debug('Routing to tool', { toolName }) + // Validate input if tool declares a schema + const args = tool.inputSchema ? tool.inputSchema.parse(payload ?? {}) : (payload ?? {}) + + // Execute const result = await tool.execute(args, context) - if (toolName === 'get_blocks_and_tools') { - return GetBlocksAndToolsResult.parse(result) - } - if (toolName === 'get_blocks_metadata') { - return GetBlocksMetadataResult.parse(result) - } - if (toolName === 'get_block_options') { - return GetBlockOptionsResult.parse(result) - } - if (toolName === 'get_block_config') { - return GetBlockConfigResult.parse(result) - } - if (toolName === 'get_trigger_blocks') { - return GetTriggerBlocksResult.parse(result) - } - - return result + // Validate output if tool declares a schema + return tool.outputSchema ? tool.outputSchema.parse(result) : result } diff --git a/apps/sim/lib/copilot/tools/server/user/get-credentials.ts b/apps/sim/lib/copilot/tools/server/user/get-credentials.ts index 5aafc2dcb..9f0c8b411 100644 --- a/apps/sim/lib/copilot/tools/server/user/get-credentials.ts +++ b/apps/sim/lib/copilot/tools/server/user/get-credentials.ts @@ -89,7 +89,11 @@ export const getCredentialsServerTool: BaseServerTool try { const decoded = jwtDecode<{ email?: string; name?: string }>(acc.idToken) displayName = decoded.email || decoded.name || '' - } catch {} + } catch (error) { + logger.warn('Failed to decode JWT id token', { + error: error instanceof Error ? error.message : String(error), + }) + } } if (!displayName && baseProvider === 'github') displayName = `${acc.accountId} (GitHub)` if (!displayName && userEmail) displayName = userEmail @@ -107,7 +111,11 @@ export const getCredentialsServerTool: BaseServerTool acc.id ) accessToken = refreshedToken || accessToken - } catch {} + } catch (error) { + logger.warn('Failed to refresh OAuth access token', { + error: error instanceof Error ? error.message : String(error), + }) + } connectedCredentials.push({ id: acc.id, name: displayName, diff --git a/apps/sim/lib/copilot/tools/server/user/set-environment-variables.ts b/apps/sim/lib/copilot/tools/server/user/set-environment-variables.ts index a4f7959b5..000cb65ab 100644 --- a/apps/sim/lib/copilot/tools/server/user/set-environment-variables.ts +++ b/apps/sim/lib/copilot/tools/server/user/set-environment-variables.ts @@ -1,9 +1,8 @@ import { db } from '@sim/db' -import { workspaceEnvironment } from '@sim/db/schema' +import { environment } from '@sim/db/schema' import { createLogger } from '@sim/logger' import { eq } from 'drizzle-orm' import { z } from 'zod' -import { createPermissionError, verifyWorkflowAccess } from '@/lib/copilot/auth/permissions' import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' import { decryptSecret, encryptSecret } from '@/lib/core/security/encryption' @@ -50,35 +49,16 @@ export const setEnvironmentVariablesServerTool: BaseServerTool) || {} @@ -114,36 +94,34 @@ export const setEnvironmentVariablesServerTool: BaseServerTool -} - -/** - * Logs and records a skipped item - */ -function logSkippedItem(skippedItems: SkippedItem[], item: SkippedItem): void { - validationLogger.warn(`Skipped ${item.operationType} operation: ${item.reason}`, { - type: item.type, - operationType: item.operationType, - blockId: item.blockId, - ...(item.details && { details: item.details }), - }) - skippedItems.push(item) -} - -/** - * Finds an existing block with the same normalized name. - */ -function findBlockWithDuplicateNormalizedName( - blocks: Record, - name: string, - excludeBlockId: string -): [string, any] | undefined { - const normalizedName = normalizeName(name) - return Object.entries(blocks).find( - ([blockId, block]: [string, any]) => - blockId !== excludeBlockId && normalizeName(block.name || '') === normalizedName - ) -} - -/** - * Result of input validation - */ -interface ValidationResult { - validInputs: Record - errors: ValidationError[] -} - -/** - * Validates and filters inputs against a block's subBlock configuration - * Returns valid inputs and any validation errors encountered - */ -function validateInputsForBlock( - blockType: string, - inputs: Record, - blockId: string -): ValidationResult { - const errors: ValidationError[] = [] - const blockConfig = getBlock(blockType) - - if (!blockConfig) { - // Unknown block type - return inputs as-is (let it fail later if invalid) - validationLogger.warn(`Unknown block type: ${blockType}, skipping validation`) - return { validInputs: inputs, errors: [] } - } - - const validatedInputs: Record = {} - const subBlockMap = new Map() - - // Build map of subBlock id -> config - for (const subBlock of blockConfig.subBlocks) { - subBlockMap.set(subBlock.id, subBlock) - } - - for (const [key, value] of Object.entries(inputs)) { - // Skip runtime subblock IDs - if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { - continue - } - - const subBlockConfig = subBlockMap.get(key) - - // If subBlock doesn't exist in config, skip it (unless it's a known dynamic field) - if (!subBlockConfig) { - // Some fields are valid but not in subBlocks (like loop/parallel config) - // Allow these through for special block types - if (blockType === 'loop' || blockType === 'parallel') { - validatedInputs[key] = value - } else { - errors.push({ - blockId, - blockType, - field: key, - value, - error: `Unknown input field "${key}" for block type "${blockType}"`, - }) - } - continue - } - - // Note: We do NOT check subBlockConfig.condition here. - // Conditions are for UI display logic (show/hide fields in the editor). - // For API/Copilot, any valid field in the block schema should be accepted. - // The runtime will use the relevant fields based on the actual operation. - - // Validate value based on subBlock type - const validationResult = validateValueForSubBlockType( - subBlockConfig, - value, - key, - blockType, - blockId - ) - if (validationResult.valid) { - validatedInputs[key] = validationResult.value - } else if (validationResult.error) { - errors.push(validationResult.error) - } - } - - return { validInputs: validatedInputs, errors } -} - -/** - * Result of validating a single value - */ -interface ValueValidationResult { - valid: boolean - value?: any - error?: ValidationError -} - -/** - * Validates a value against its expected subBlock type - * Returns validation result with the value or an error - */ -function validateValueForSubBlockType( - subBlockConfig: SubBlockConfig, - value: any, - fieldName: string, - blockType: string, - blockId: string -): ValueValidationResult { - const { type } = subBlockConfig - - // Handle null/undefined - allow clearing fields - if (value === null || value === undefined) { - return { valid: true, value } - } - - switch (type) { - case 'dropdown': { - // Validate against allowed options - const options = - typeof subBlockConfig.options === 'function' - ? subBlockConfig.options() - : subBlockConfig.options - if (options && Array.isArray(options)) { - const validIds = options.map((opt) => opt.id) - if (!validIds.includes(value)) { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid dropdown value "${value}" for field "${fieldName}". Valid options: ${validIds.join(', ')}`, - }, - } - } - } - return { valid: true, value } - } - - case 'slider': { - // Validate numeric range - const numValue = typeof value === 'number' ? value : Number(value) - if (Number.isNaN(numValue)) { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid slider value "${value}" for field "${fieldName}" - must be a number`, - }, - } - } - // Clamp to range (allow but warn) - let clampedValue = numValue - if (subBlockConfig.min !== undefined && numValue < subBlockConfig.min) { - clampedValue = subBlockConfig.min - } - if (subBlockConfig.max !== undefined && numValue > subBlockConfig.max) { - clampedValue = subBlockConfig.max - } - return { - valid: true, - value: subBlockConfig.integer ? Math.round(clampedValue) : clampedValue, - } - } - - case 'switch': { - // Must be boolean - if (typeof value !== 'boolean') { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid switch value "${value}" for field "${fieldName}" - must be true or false`, - }, - } - } - return { valid: true, value } - } - - case 'file-upload': { - // File upload should be an object with specific properties or null - if (value === null) return { valid: true, value: null } - if (typeof value !== 'object') { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid file-upload value for field "${fieldName}" - expected object with name and path properties, or null`, - }, - } - } - // Validate file object has required properties - if (value && (!value.name || !value.path)) { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid file-upload object for field "${fieldName}" - must have "name" and "path" properties`, - }, - } - } - return { valid: true, value } - } - - case 'input-format': - case 'table': { - // Should be an array - if (!Array.isArray(value)) { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid ${type} value for field "${fieldName}" - expected an array`, - }, - } - } - return { valid: true, value } - } - - case 'tool-input': { - // Should be an array of tool objects - if (!Array.isArray(value)) { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid tool-input value for field "${fieldName}" - expected an array of tool objects`, - }, - } - } - return { valid: true, value } - } - - case 'code': { - // Code must be a string (content can be JS, Python, JSON, SQL, HTML, etc.) - if (typeof value !== 'string') { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid code value for field "${fieldName}" - expected a string, got ${typeof value}`, - }, - } - } - return { valid: true, value } - } - - case 'response-format': { - // Allow empty/null - if (value === null || value === undefined || value === '') { - return { valid: true, value } - } - // Allow objects (will be stringified later by normalizeResponseFormat) - if (typeof value === 'object') { - return { valid: true, value } - } - // If string, must be valid JSON - if (typeof value === 'string') { - try { - JSON.parse(value) - return { valid: true, value } - } catch { - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid response-format value for field "${fieldName}" - string must be valid JSON`, - }, - } - } - } - // Reject numbers, booleans, etc. - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid response-format value for field "${fieldName}" - expected a JSON string or object`, - }, - } - } - - case 'short-input': - case 'long-input': - case 'combobox': { - // Should be string (combobox allows custom values) - if (typeof value !== 'string' && typeof value !== 'number') { - // Convert to string but don't error - return { valid: true, value: String(value) } - } - return { valid: true, value } - } - - // Selector types - allow strings (IDs) or arrays of strings - case 'oauth-input': - case 'knowledge-base-selector': - case 'document-selector': - case 'file-selector': - case 'project-selector': - case 'channel-selector': - case 'folder-selector': - case 'mcp-server-selector': - case 'mcp-tool-selector': - case 'workflow-selector': { - if (subBlockConfig.multiSelect && Array.isArray(value)) { - return { valid: true, value } - } - if (typeof value === 'string') { - return { valid: true, value } - } - return { - valid: false, - error: { - blockId, - blockType, - field: fieldName, - value, - error: `Invalid selector value for field "${fieldName}" - expected a string${subBlockConfig.multiSelect ? ' or array of strings' : ''}`, - }, - } - } - - default: - // For unknown types, pass through - return { valid: true, value } - } -} - -interface EditWorkflowOperation { - operation_type: 'add' | 'edit' | 'delete' | 'insert_into_subflow' | 'extract_from_subflow' - block_id: string - params?: Record -} - -interface EditWorkflowParams { - operations: EditWorkflowOperation[] - workflowId: string - currentUserWorkflow?: string -} - -/** - * Topologically sort insert operations to ensure parents are created before children - * Returns sorted array where parent inserts always come before child inserts - */ -function topologicalSortInserts( - inserts: EditWorkflowOperation[], - adds: EditWorkflowOperation[] -): EditWorkflowOperation[] { - if (inserts.length === 0) return [] - - // Build a map of blockId -> operation for quick lookup - const insertMap = new Map() - inserts.forEach((op) => insertMap.set(op.block_id, op)) - - // Build a set of blocks being added (potential parents) - const addedBlocks = new Set(adds.map((op) => op.block_id)) - - // Build dependency graph: block -> blocks that depend on it - const dependents = new Map>() - const dependencies = new Map>() - - inserts.forEach((op) => { - const blockId = op.block_id - const parentId = op.params?.subflowId - - dependencies.set(blockId, new Set()) - - if (parentId) { - // Track dependency if parent is being inserted OR being added - // This ensures children wait for parents regardless of operation type - const parentBeingCreated = insertMap.has(parentId) || addedBlocks.has(parentId) - - if (parentBeingCreated) { - // Only add dependency if parent is also being inserted (not added) - // Because adds run before inserts, added parents are already created - if (insertMap.has(parentId)) { - dependencies.get(blockId)!.add(parentId) - if (!dependents.has(parentId)) { - dependents.set(parentId, new Set()) - } - dependents.get(parentId)!.add(blockId) - } - } - } - }) - - // Topological sort using Kahn's algorithm - const sorted: EditWorkflowOperation[] = [] - const queue: string[] = [] - - // Start with nodes that have no dependencies (or depend only on added blocks) - inserts.forEach((op) => { - const deps = dependencies.get(op.block_id)! - if (deps.size === 0) { - queue.push(op.block_id) - } - }) - - while (queue.length > 0) { - const blockId = queue.shift()! - const op = insertMap.get(blockId) - if (op) { - sorted.push(op) - } - - // Remove this node from dependencies of others - const children = dependents.get(blockId) - if (children) { - children.forEach((childId) => { - const childDeps = dependencies.get(childId)! - childDeps.delete(blockId) - if (childDeps.size === 0) { - queue.push(childId) - } - }) - } - } - - // If sorted length doesn't match input, there's a cycle (shouldn't happen with valid operations) - // Just append remaining operations - if (sorted.length < inserts.length) { - inserts.forEach((op) => { - if (!sorted.includes(op)) { - sorted.push(op) - } - }) - } - - return sorted -} - -/** - * Helper to create a block state from operation params - */ -function createBlockFromParams( - blockId: string, - params: any, - parentId?: string, - errorsCollector?: ValidationError[], - permissionConfig?: PermissionGroupConfig | null, - skippedItems?: SkippedItem[] -): any { - const blockConfig = getAllBlocks().find((b) => b.type === params.type) - - // Validate inputs against block configuration - let validatedInputs: Record | undefined - if (params.inputs) { - const result = validateInputsForBlock(params.type, params.inputs, blockId) - validatedInputs = result.validInputs - if (errorsCollector && result.errors.length > 0) { - errorsCollector.push(...result.errors) - } - } - - // Determine outputs based on trigger mode - const triggerMode = params.triggerMode || false - let outputs: Record - - if (params.outputs) { - outputs = params.outputs - } else if (blockConfig) { - const subBlocks: Record = {} - if (validatedInputs) { - Object.entries(validatedInputs).forEach(([key, value]) => { - // Skip runtime subblock IDs when computing outputs - if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { - return - } - subBlocks[key] = { id: key, type: 'short-input', value: value } - }) - } - outputs = getBlockOutputs(params.type, subBlocks, triggerMode) - } else { - outputs = {} - } - - const blockState: any = { - id: blockId, - type: params.type, - name: params.name, - position: { x: 0, y: 0 }, - enabled: params.enabled !== undefined ? params.enabled : true, - horizontalHandles: true, - advancedMode: params.advancedMode || false, - height: 0, - triggerMode: triggerMode, - subBlocks: {}, - outputs: outputs, - data: parentId ? { parentId, extent: 'parent' as const } : {}, - locked: false, - } - - // Add validated inputs as subBlocks - if (validatedInputs) { - Object.entries(validatedInputs).forEach(([key, value]) => { - if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { - return - } - - let sanitizedValue = value - - // Normalize array subblocks with id fields (inputFormat, table rows, etc.) - if (shouldNormalizeArrayIds(key)) { - sanitizedValue = normalizeArrayWithIds(value) - } - - // Special handling for tools - normalize and filter disallowed - if (key === 'tools' && Array.isArray(value)) { - sanitizedValue = filterDisallowedTools( - normalizeTools(value), - permissionConfig ?? null, - blockId, - skippedItems ?? [] - ) - } - - // Special handling for responseFormat - normalize to ensure consistent format - if (key === 'responseFormat' && value) { - sanitizedValue = normalizeResponseFormat(value) - } - - blockState.subBlocks[key] = { - id: key, - type: 'short-input', - value: sanitizedValue, - } - }) - } - - // Set up subBlocks from block configuration - if (blockConfig) { - blockConfig.subBlocks.forEach((subBlock) => { - if (!blockState.subBlocks[subBlock.id]) { - blockState.subBlocks[subBlock.id] = { - id: subBlock.id, - type: subBlock.type, - value: null, - } - } - }) - - if (validatedInputs) { - updateCanonicalModesForInputs(blockState, Object.keys(validatedInputs), blockConfig) - } - } - - return blockState -} - -function updateCanonicalModesForInputs( - block: { data?: { canonicalModes?: Record } }, - inputKeys: string[], - blockConfig: BlockConfig -): void { - if (!blockConfig.subBlocks?.length) return - - const canonicalIndex = buildCanonicalIndex(blockConfig.subBlocks) - const canonicalModeUpdates: Record = {} - - for (const inputKey of inputKeys) { - const canonicalId = canonicalIndex.canonicalIdBySubBlockId[inputKey] - if (!canonicalId) continue - - const group = canonicalIndex.groupsById[canonicalId] - if (!group || !isCanonicalPair(group)) continue - - const isAdvanced = group.advancedIds.includes(inputKey) - const existingMode = canonicalModeUpdates[canonicalId] - - if (!existingMode || isAdvanced) { - canonicalModeUpdates[canonicalId] = isAdvanced ? 'advanced' : 'basic' - } - } - - if (Object.keys(canonicalModeUpdates).length > 0) { - if (!block.data) block.data = {} - if (!block.data.canonicalModes) block.data.canonicalModes = {} - Object.assign(block.data.canonicalModes, canonicalModeUpdates) - } -} - -/** - * Normalize tools array by adding back fields that were sanitized for training - */ -function normalizeTools(tools: any[]): any[] { - return tools.map((tool) => { - if (tool.type === 'custom-tool') { - // New reference format: minimal fields only - if (tool.customToolId && !tool.schema && !tool.code) { - return { - type: tool.type, - customToolId: tool.customToolId, - usageControl: tool.usageControl || 'auto', - isExpanded: tool.isExpanded ?? true, - } - } - - // Legacy inline format: include all fields - const normalized: any = { - ...tool, - params: tool.params || {}, - isExpanded: tool.isExpanded ?? true, - } - - // Ensure schema has proper structure (for inline format) - if (normalized.schema?.function) { - normalized.schema = { - type: 'function', - function: { - name: normalized.schema.function.name || tool.title, // Preserve name or derive from title - description: normalized.schema.function.description, - parameters: normalized.schema.function.parameters, - }, - } - } - - return normalized - } - - // For other tool types, just ensure isExpanded exists - return { - ...tool, - isExpanded: tool.isExpanded ?? true, - } - }) -} - -/** UUID v4 regex pattern for validation */ -const UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i - -/** - * Subblock types that store arrays of objects with `id` fields. - * The LLM may generate arbitrary IDs which need to be converted to proper UUIDs. - */ -const ARRAY_WITH_ID_SUBBLOCK_TYPES = new Set([ - 'inputFormat', // input-format: Fields with id, name, type, value, collapsed - 'headers', // table: Rows with id, cells (used for HTTP headers) - 'params', // table: Rows with id, cells (used for query params) - 'variables', // table or variables-input: Rows/assignments with id - 'tagFilters', // knowledge-tag-filters: Filters with id, tagName, etc. - 'documentTags', // document-tag-entry: Tags with id, tagName, etc. - 'metrics', // eval-input: Metrics with id, name, description, range -]) - -/** - * Normalizes array subblock values by ensuring each item has a valid UUID. - * The LLM may generate arbitrary IDs like "input-desc-001" or "row-1" which need - * to be converted to proper UUIDs for consistency with UI-created items. - */ -function normalizeArrayWithIds(value: unknown): any[] { - if (!Array.isArray(value)) { - return [] - } - - return value.map((item: any) => { - if (!item || typeof item !== 'object') { - return item - } - - // Check if id is missing or not a valid UUID - const hasValidUUID = typeof item.id === 'string' && UUID_REGEX.test(item.id) - if (!hasValidUUID) { - return { ...item, id: crypto.randomUUID() } - } - - return item - }) -} - -/** - * Checks if a subblock key should have its array items normalized with UUIDs. - */ -function shouldNormalizeArrayIds(key: string): boolean { - return ARRAY_WITH_ID_SUBBLOCK_TYPES.has(key) -} - -/** - * Normalize responseFormat to ensure consistent storage - * Handles both string (JSON) and object formats - * Returns pretty-printed JSON for better UI readability - */ -function normalizeResponseFormat(value: any): string { - try { - let obj = value - - // If it's already a string, parse it first - if (typeof value === 'string') { - const trimmed = value.trim() - if (!trimmed) { - return '' - } - obj = JSON.parse(trimmed) - } - - // If it's an object, stringify it with consistent formatting - if (obj && typeof obj === 'object') { - // Sort keys recursively for consistent comparison - const sortKeys = (item: any): any => { - if (Array.isArray(item)) { - return item.map(sortKeys) - } - if (item !== null && typeof item === 'object') { - return Object.keys(item) - .sort() - .reduce((result: any, key: string) => { - result[key] = sortKeys(item[key]) - return result - }, {}) - } - return item - } - - // Return pretty-printed with 2-space indentation for UI readability - // The sanitizer will normalize it to minified format for comparison - return JSON.stringify(sortKeys(obj), null, 2) - } - - return String(value) - } catch (error) { - // If parsing fails, return the original value as string - return String(value) - } -} - -interface EdgeHandleValidationResult { - valid: boolean - error?: string - /** The normalized handle to use (e.g., simple 'if' normalized to 'condition-{uuid}') */ - normalizedHandle?: string -} - -/** - * Validates source handle is valid for the block type - */ -function validateSourceHandleForBlock( - sourceHandle: string, - sourceBlockType: string, - sourceBlock: any -): EdgeHandleValidationResult { - if (sourceHandle === 'error') { - return { valid: true } - } - - switch (sourceBlockType) { - case 'loop': - if (sourceHandle === 'loop-start-source' || sourceHandle === 'loop-end-source') { - return { valid: true } - } - return { - valid: false, - error: `Invalid source handle "${sourceHandle}" for loop block. Valid handles: loop-start-source, loop-end-source, error`, - } - - case 'parallel': - if (sourceHandle === 'parallel-start-source' || sourceHandle === 'parallel-end-source') { - return { valid: true } - } - return { - valid: false, - error: `Invalid source handle "${sourceHandle}" for parallel block. Valid handles: parallel-start-source, parallel-end-source, error`, - } - - case 'condition': { - const conditionsValue = sourceBlock?.subBlocks?.conditions?.value - if (!conditionsValue) { - return { - valid: false, - error: `Invalid condition handle "${sourceHandle}" - no conditions defined`, - } - } - - // validateConditionHandle accepts simple format (if, else-if-0, else), - // legacy format (condition-{blockId}-if), and internal ID format (condition-{uuid}) - return validateConditionHandle(sourceHandle, sourceBlock.id, conditionsValue) - } - - case 'router': - if (sourceHandle === 'source' || sourceHandle.startsWith(EDGE.ROUTER_PREFIX)) { - return { valid: true } - } - return { - valid: false, - error: `Invalid source handle "${sourceHandle}" for router block. Valid handles: source, ${EDGE.ROUTER_PREFIX}{targetId}, error`, - } - - case 'router_v2': { - const routesValue = sourceBlock?.subBlocks?.routes?.value - if (!routesValue) { - return { - valid: false, - error: `Invalid router handle "${sourceHandle}" - no routes defined`, - } - } - - // validateRouterHandle accepts simple format (route-0, route-1), - // legacy format (router-{blockId}-route-1), and internal ID format (router-{uuid}) - return validateRouterHandle(sourceHandle, sourceBlock.id, routesValue) - } - - default: - if (sourceHandle === 'source') { - return { valid: true } - } - return { - valid: false, - error: `Invalid source handle "${sourceHandle}" for ${sourceBlockType} block. Valid handles: source, error`, - } - } -} - -/** - * Validates condition handle references a valid condition in the block. - * Accepts multiple formats: - * - Simple format: "if", "else-if-0", "else-if-1", "else" - * - Legacy semantic format: "condition-{blockId}-if", "condition-{blockId}-else-if" - * - Internal ID format: "condition-{conditionId}" - * - * Returns the normalized handle (condition-{conditionId}) for storage. - */ -function validateConditionHandle( - sourceHandle: string, - blockId: string, - conditionsValue: string | any[] -): EdgeHandleValidationResult { - let conditions: any[] - if (typeof conditionsValue === 'string') { - try { - conditions = JSON.parse(conditionsValue) - } catch { - return { - valid: false, - error: `Cannot validate condition handle "${sourceHandle}" - conditions is not valid JSON`, - } - } - } else if (Array.isArray(conditionsValue)) { - conditions = conditionsValue - } else { - return { - valid: false, - error: `Cannot validate condition handle "${sourceHandle}" - conditions is not an array`, - } - } - - if (!Array.isArray(conditions) || conditions.length === 0) { - return { - valid: false, - error: `Invalid condition handle "${sourceHandle}" - no conditions defined`, - } - } - - // Build a map of all valid handle formats -> normalized handle (condition-{conditionId}) - const handleToNormalized = new Map() - const legacySemanticPrefix = `condition-${blockId}-` - let elseIfIndex = 0 - - for (const condition of conditions) { - if (!condition.id) continue - - const normalizedHandle = `condition-${condition.id}` - const title = condition.title?.toLowerCase() - - // Always accept internal ID format - handleToNormalized.set(normalizedHandle, normalizedHandle) - - if (title === 'if') { - // Simple format: "if" - handleToNormalized.set('if', normalizedHandle) - // Legacy format: "condition-{blockId}-if" - handleToNormalized.set(`${legacySemanticPrefix}if`, normalizedHandle) - } else if (title === 'else if') { - // Simple format: "else-if-0", "else-if-1", etc. (0-indexed) - handleToNormalized.set(`else-if-${elseIfIndex}`, normalizedHandle) - // Legacy format: "condition-{blockId}-else-if" for first, "condition-{blockId}-else-if-2" for second - if (elseIfIndex === 0) { - handleToNormalized.set(`${legacySemanticPrefix}else-if`, normalizedHandle) - } else { - handleToNormalized.set( - `${legacySemanticPrefix}else-if-${elseIfIndex + 1}`, - normalizedHandle - ) - } - elseIfIndex++ - } else if (title === 'else') { - // Simple format: "else" - handleToNormalized.set('else', normalizedHandle) - // Legacy format: "condition-{blockId}-else" - handleToNormalized.set(`${legacySemanticPrefix}else`, normalizedHandle) - } - } - - const normalizedHandle = handleToNormalized.get(sourceHandle) - if (normalizedHandle) { - return { valid: true, normalizedHandle } - } - - // Build list of valid simple format options for error message - const simpleOptions: string[] = [] - elseIfIndex = 0 - for (const condition of conditions) { - const title = condition.title?.toLowerCase() - if (title === 'if') { - simpleOptions.push('if') - } else if (title === 'else if') { - simpleOptions.push(`else-if-${elseIfIndex}`) - elseIfIndex++ - } else if (title === 'else') { - simpleOptions.push('else') - } - } - - return { - valid: false, - error: `Invalid condition handle "${sourceHandle}". Valid handles: ${simpleOptions.join(', ')}`, - } -} - -/** - * Validates router handle references a valid route in the block. - * Accepts multiple formats: - * - Simple format: "route-0", "route-1", "route-2" (0-indexed) - * - Legacy semantic format: "router-{blockId}-route-1" (1-indexed) - * - Internal ID format: "router-{routeId}" - * - * Returns the normalized handle (router-{routeId}) for storage. - */ -function validateRouterHandle( - sourceHandle: string, - blockId: string, - routesValue: string | any[] -): EdgeHandleValidationResult { - let routes: any[] - if (typeof routesValue === 'string') { - try { - routes = JSON.parse(routesValue) - } catch { - return { - valid: false, - error: `Cannot validate router handle "${sourceHandle}" - routes is not valid JSON`, - } - } - } else if (Array.isArray(routesValue)) { - routes = routesValue - } else { - return { - valid: false, - error: `Cannot validate router handle "${sourceHandle}" - routes is not an array`, - } - } - - if (!Array.isArray(routes) || routes.length === 0) { - return { - valid: false, - error: `Invalid router handle "${sourceHandle}" - no routes defined`, - } - } - - // Build a map of all valid handle formats -> normalized handle (router-{routeId}) - const handleToNormalized = new Map() - const legacySemanticPrefix = `router-${blockId}-` - - for (let i = 0; i < routes.length; i++) { - const route = routes[i] - if (!route.id) continue - - const normalizedHandle = `router-${route.id}` - - // Always accept internal ID format: router-{uuid} - handleToNormalized.set(normalizedHandle, normalizedHandle) - - // Simple format: route-0, route-1, etc. (0-indexed) - handleToNormalized.set(`route-${i}`, normalizedHandle) - - // Legacy 1-indexed route number format: router-{blockId}-route-1 - handleToNormalized.set(`${legacySemanticPrefix}route-${i + 1}`, normalizedHandle) - - // Accept normalized title format: router-{blockId}-{normalized-title} - if (route.title && typeof route.title === 'string') { - const normalizedTitle = route.title - .toLowerCase() - .replace(/\s+/g, '-') - .replace(/[^a-z0-9-]/g, '') - if (normalizedTitle) { - handleToNormalized.set(`${legacySemanticPrefix}${normalizedTitle}`, normalizedHandle) - } - } - } - - const normalizedHandle = handleToNormalized.get(sourceHandle) - if (normalizedHandle) { - return { valid: true, normalizedHandle } - } - - // Build list of valid simple format options for error message - const simpleOptions = routes.map((_, i) => `route-${i}`) - - return { - valid: false, - error: `Invalid router handle "${sourceHandle}". Valid handles: ${simpleOptions.join(', ')}`, - } -} - -/** - * Validates target handle is valid (must be 'target') - */ -function validateTargetHandle(targetHandle: string): EdgeHandleValidationResult { - if (targetHandle === 'target') { - return { valid: true } - } - return { - valid: false, - error: `Invalid target handle "${targetHandle}". Expected "target"`, - } -} - -/** - * Creates a validated edge between two blocks. - * Returns true if edge was created, false if skipped due to validation errors. - */ -function createValidatedEdge( - modifiedState: any, - sourceBlockId: string, - targetBlockId: string, - sourceHandle: string, - targetHandle: string, - operationType: string, - logger: ReturnType, - skippedItems?: SkippedItem[] -): boolean { - if (!modifiedState.blocks[targetBlockId]) { - logger.warn(`Target block "${targetBlockId}" not found. Edge skipped.`, { - sourceBlockId, - targetBlockId, - sourceHandle, - }) - skippedItems?.push({ - type: 'invalid_edge_target', - operationType, - blockId: sourceBlockId, - reason: `Edge from "${sourceBlockId}" to "${targetBlockId}" skipped - target block does not exist`, - details: { sourceHandle, targetHandle, targetId: targetBlockId }, - }) - return false - } - - const sourceBlock = modifiedState.blocks[sourceBlockId] - if (!sourceBlock) { - logger.warn(`Source block "${sourceBlockId}" not found. Edge skipped.`, { - sourceBlockId, - targetBlockId, - }) - skippedItems?.push({ - type: 'invalid_edge_source', - operationType, - blockId: sourceBlockId, - reason: `Edge from "${sourceBlockId}" to "${targetBlockId}" skipped - source block does not exist`, - details: { sourceHandle, targetHandle, targetId: targetBlockId }, - }) - return false - } - - const sourceBlockType = sourceBlock.type - if (!sourceBlockType) { - logger.warn(`Source block "${sourceBlockId}" has no type. Edge skipped.`, { - sourceBlockId, - targetBlockId, - }) - skippedItems?.push({ - type: 'invalid_edge_source', - operationType, - blockId: sourceBlockId, - reason: `Edge from "${sourceBlockId}" to "${targetBlockId}" skipped - source block has no type`, - details: { sourceHandle, targetHandle, targetId: targetBlockId }, - }) - return false - } - - const sourceValidation = validateSourceHandleForBlock(sourceHandle, sourceBlockType, sourceBlock) - if (!sourceValidation.valid) { - logger.warn(`Invalid source handle. Edge skipped.`, { - sourceBlockId, - targetBlockId, - sourceHandle, - error: sourceValidation.error, - }) - skippedItems?.push({ - type: 'invalid_source_handle', - operationType, - blockId: sourceBlockId, - reason: sourceValidation.error || `Invalid source handle "${sourceHandle}"`, - details: { sourceHandle, targetHandle, targetId: targetBlockId }, - }) - return false - } - - const targetValidation = validateTargetHandle(targetHandle) - if (!targetValidation.valid) { - logger.warn(`Invalid target handle. Edge skipped.`, { - sourceBlockId, - targetBlockId, - targetHandle, - error: targetValidation.error, - }) - skippedItems?.push({ - type: 'invalid_target_handle', - operationType, - blockId: sourceBlockId, - reason: targetValidation.error || `Invalid target handle "${targetHandle}"`, - details: { sourceHandle, targetHandle, targetId: targetBlockId }, - }) - return false - } - - // Use normalized handle if available (e.g., 'if' -> 'condition-{uuid}') - const finalSourceHandle = sourceValidation.normalizedHandle || sourceHandle - - modifiedState.edges.push({ - id: crypto.randomUUID(), - source: sourceBlockId, - sourceHandle: finalSourceHandle, - target: targetBlockId, - targetHandle, - type: 'default', - }) - return true -} - -/** - * Adds connections as edges for a block. - * Supports multiple target formats: - * - String: "target-block-id" - * - Object: { block: "target-block-id", handle?: "custom-target-handle" } - * - Array of strings or objects - */ -function addConnectionsAsEdges( - modifiedState: any, - blockId: string, - connections: Record, - logger: ReturnType, - skippedItems?: SkippedItem[] -): void { - Object.entries(connections).forEach(([sourceHandle, targets]) => { - if (targets === null) return - - const addEdgeForTarget = (targetBlock: string, targetHandle?: string) => { - createValidatedEdge( - modifiedState, - blockId, - targetBlock, - sourceHandle, - targetHandle || 'target', - 'add_edge', - logger, - skippedItems - ) - } - - if (typeof targets === 'string') { - addEdgeForTarget(targets) - } else if (Array.isArray(targets)) { - targets.forEach((target: any) => { - if (typeof target === 'string') { - addEdgeForTarget(target) - } else if (target?.block) { - addEdgeForTarget(target.block, target.handle) - } - }) - } else if (typeof targets === 'object' && targets?.block) { - addEdgeForTarget(targets.block, targets.handle) - } - }) -} - -function applyTriggerConfigToBlockSubblocks(block: any, triggerConfig: Record) { - if (!block?.subBlocks || !triggerConfig || typeof triggerConfig !== 'object') { - return - } - - Object.entries(triggerConfig).forEach(([configKey, configValue]) => { - const existingSubblock = block.subBlocks[configKey] - if (existingSubblock) { - const existingValue = existingSubblock.value - const valuesEqual = - typeof existingValue === 'object' || typeof configValue === 'object' - ? JSON.stringify(existingValue) === JSON.stringify(configValue) - : existingValue === configValue - - if (valuesEqual) { - return - } - - block.subBlocks[configKey] = { - ...existingSubblock, - value: configValue, - } - } else { - block.subBlocks[configKey] = { - id: configKey, - type: 'short-input', - value: configValue, - } - } - }) -} - -/** - * Result of applying operations to workflow state - */ -interface ApplyOperationsResult { - state: any - validationErrors: ValidationError[] - skippedItems: SkippedItem[] -} - -/** - * Checks if a block type is allowed by the permission group config - */ -function isBlockTypeAllowed( - blockType: string, - permissionConfig: PermissionGroupConfig | null -): boolean { - if (!permissionConfig || permissionConfig.allowedIntegrations === null) { - return true - } - return permissionConfig.allowedIntegrations.includes(blockType) -} - -/** - * Filters out tools that are not allowed by the permission group config - * Returns both the allowed tools and any skipped tool items for logging - */ -function filterDisallowedTools( - tools: any[], - permissionConfig: PermissionGroupConfig | null, - blockId: string, - skippedItems: SkippedItem[] -): any[] { - if (!permissionConfig) { - return tools - } - - const allowedTools: any[] = [] - - for (const tool of tools) { - if (tool.type === 'custom-tool' && permissionConfig.disableCustomTools) { - logSkippedItem(skippedItems, { - type: 'tool_not_allowed', - operationType: 'add', - blockId, - reason: `Custom tool "${tool.title || tool.customToolId || 'unknown'}" is not allowed by permission group - tool not added`, - details: { toolType: 'custom-tool', toolId: tool.customToolId }, - }) - continue - } - if (tool.type === 'mcp' && permissionConfig.disableMcpTools) { - logSkippedItem(skippedItems, { - type: 'tool_not_allowed', - operationType: 'add', - blockId, - reason: `MCP tool "${tool.title || 'unknown'}" is not allowed by permission group - tool not added`, - details: { toolType: 'mcp', serverId: tool.params?.serverId }, - }) - continue - } - allowedTools.push(tool) - } - - return allowedTools -} - -/** - * Apply operations directly to the workflow JSON state - */ -function applyOperationsToWorkflowState( - workflowState: any, - operations: EditWorkflowOperation[], - permissionConfig: PermissionGroupConfig | null = null -): ApplyOperationsResult { - // Deep clone the workflow state to avoid mutations - const modifiedState = JSON.parse(JSON.stringify(workflowState)) - - // Collect validation errors across all operations - const validationErrors: ValidationError[] = [] - - // Collect skipped items across all operations - const skippedItems: SkippedItem[] = [] - - // Log initial state - const logger = createLogger('EditWorkflowServerTool') - logger.info('Applying operations to workflow:', { - totalOperations: operations.length, - operationTypes: operations.reduce((acc: any, op) => { - acc[op.operation_type] = (acc[op.operation_type] || 0) + 1 - return acc - }, {}), - initialBlockCount: Object.keys(modifiedState.blocks || {}).length, - }) - - /** - * Reorder operations to ensure correct execution sequence: - * 1. delete - Remove blocks first to free up IDs and clean state - * 2. extract_from_subflow - Extract blocks from subflows before modifications - * 3. add - Create new blocks (sorted by connection dependencies) - * 4. insert_into_subflow - Insert blocks into subflows (sorted by parent dependency) - * 5. edit - Edit existing blocks last, so connections to newly added blocks work - * - * This ordering is CRITICAL: operations may reference blocks being added/inserted - * in the same batch. Without proper ordering, target blocks wouldn't exist yet. - * - * For add operations, we use a two-pass approach: - * - Pass 1: Create all blocks (without connections) - * - Pass 2: Add all connections (now all blocks exist) - * This ensures that if block A connects to block B, and both are being added, - * B will exist when we try to create the edge from A to B. - */ - const deletes = operations.filter((op) => op.operation_type === 'delete') - const extracts = operations.filter((op) => op.operation_type === 'extract_from_subflow') - const adds = operations.filter((op) => op.operation_type === 'add') - const inserts = operations.filter((op) => op.operation_type === 'insert_into_subflow') - const edits = operations.filter((op) => op.operation_type === 'edit') - - // Sort insert operations to ensure parents are inserted before children - // This handles cases where a loop/parallel is being added along with its children - const sortedInserts = topologicalSortInserts(inserts, adds) - - // We'll process add operations in two passes (handled in the switch statement below) - // This is tracked via a separate flag to know which pass we're in - const orderedOperations: EditWorkflowOperation[] = [ - ...deletes, - ...extracts, - ...adds, - ...sortedInserts, - ...edits, - ] - - logger.info('Operations after reordering:', { - totalOperations: orderedOperations.length, - deleteCount: deletes.length, - extractCount: extracts.length, - addCount: adds.length, - insertCount: sortedInserts.length, - editCount: edits.length, - operationOrder: orderedOperations.map( - (op) => - `${op.operation_type}:${op.block_id}${op.params?.subflowId ? `(parent:${op.params.subflowId})` : ''}` - ), - }) - - // Two-pass processing for add operations: - // Pass 1: Create all blocks (without connections) - // Pass 2: Add all connections (all blocks now exist) - const addOperationsWithConnections: Array<{ - blockId: string - connections: Record - }> = [] - - for (const operation of orderedOperations) { - const { operation_type, block_id, params } = operation - - // CRITICAL: Validate block_id is a valid string and not "undefined" - // This prevents undefined keys from being set in the workflow state - if (!isValidKey(block_id)) { - logSkippedItem(skippedItems, { - type: 'missing_required_params', - operationType: operation_type, - blockId: String(block_id || 'invalid'), - reason: `Invalid block_id "${block_id}" (type: ${typeof block_id}) - operation skipped. Block IDs must be valid non-empty strings.`, - }) - logger.error('Invalid block_id detected in operation', { - operation_type, - block_id, - block_id_type: typeof block_id, - }) - continue - } - - logger.debug(`Executing operation: ${operation_type} for block ${block_id}`, { - params: params ? Object.keys(params) : [], - currentBlockCount: Object.keys(modifiedState.blocks).length, - }) - - switch (operation_type) { - case 'delete': { - if (!modifiedState.blocks[block_id]) { - logSkippedItem(skippedItems, { - type: 'block_not_found', - operationType: 'delete', - blockId: block_id, - reason: `Block "${block_id}" does not exist and cannot be deleted`, - }) - break - } - - // Check if block is locked or inside a locked container - const deleteBlock = modifiedState.blocks[block_id] - const deleteParentId = deleteBlock.data?.parentId as string | undefined - const deleteParentLocked = deleteParentId - ? modifiedState.blocks[deleteParentId]?.locked - : false - if (deleteBlock.locked || deleteParentLocked) { - logSkippedItem(skippedItems, { - type: 'block_locked', - operationType: 'delete', - blockId: block_id, - reason: deleteParentLocked - ? `Block "${block_id}" is inside locked container "${deleteParentId}" and cannot be deleted` - : `Block "${block_id}" is locked and cannot be deleted`, - }) - break - } - - // Find all child blocks to remove - const blocksToRemove = new Set([block_id]) - const findChildren = (parentId: string) => { - Object.entries(modifiedState.blocks).forEach(([childId, child]: [string, any]) => { - if (child.data?.parentId === parentId) { - blocksToRemove.add(childId) - findChildren(childId) - } - }) - } - findChildren(block_id) - - // Remove blocks - blocksToRemove.forEach((id) => delete modifiedState.blocks[id]) - - // Remove edges connected to deleted blocks - modifiedState.edges = modifiedState.edges.filter( - (edge: any) => !blocksToRemove.has(edge.source) && !blocksToRemove.has(edge.target) - ) - break - } - - case 'edit': { - if (!modifiedState.blocks[block_id]) { - logSkippedItem(skippedItems, { - type: 'block_not_found', - operationType: 'edit', - blockId: block_id, - reason: `Block "${block_id}" does not exist and cannot be edited`, - }) - break - } - - const block = modifiedState.blocks[block_id] - - // Check if block is locked or inside a locked container - const editParentId = block.data?.parentId as string | undefined - const editParentLocked = editParentId ? modifiedState.blocks[editParentId]?.locked : false - if (block.locked || editParentLocked) { - logSkippedItem(skippedItems, { - type: 'block_locked', - operationType: 'edit', - blockId: block_id, - reason: editParentLocked - ? `Block "${block_id}" is inside locked container "${editParentId}" and cannot be edited` - : `Block "${block_id}" is locked and cannot be edited`, - }) - break - } - - // Ensure block has essential properties - if (!block.type) { - logger.warn(`Block ${block_id} missing type property, skipping edit`, { - blockKeys: Object.keys(block), - blockData: JSON.stringify(block), - }) - logSkippedItem(skippedItems, { - type: 'block_not_found', - operationType: 'edit', - blockId: block_id, - reason: `Block "${block_id}" exists but has no type property`, - }) - break - } - - // Update inputs (convert to subBlocks format) - if (params?.inputs) { - if (!block.subBlocks) block.subBlocks = {} - - // Validate inputs against block configuration - const validationResult = validateInputsForBlock(block.type, params.inputs, block_id) - validationErrors.push(...validationResult.errors) - - Object.entries(validationResult.validInputs).forEach(([inputKey, value]) => { - // Normalize common field name variations (LLM may use plural/singular inconsistently) - let key = inputKey - if ( - key === 'credentials' && - !block.subBlocks.credentials && - block.subBlocks.credential - ) { - key = 'credential' - } - - if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { - return - } - let sanitizedValue = value - - // Normalize array subblocks with id fields (inputFormat, table rows, etc.) - if (shouldNormalizeArrayIds(key)) { - sanitizedValue = normalizeArrayWithIds(value) - } - - // Special handling for tools - normalize and filter disallowed - if (key === 'tools' && Array.isArray(value)) { - sanitizedValue = filterDisallowedTools( - normalizeTools(value), - permissionConfig, - block_id, - skippedItems - ) - } - - // Special handling for responseFormat - normalize to ensure consistent format - if (key === 'responseFormat' && value) { - sanitizedValue = normalizeResponseFormat(value) - } - - if (!block.subBlocks[key]) { - block.subBlocks[key] = { - id: key, - type: 'short-input', - value: sanitizedValue, - } - } else { - const existingValue = block.subBlocks[key].value - const valuesEqual = - typeof existingValue === 'object' || typeof sanitizedValue === 'object' - ? JSON.stringify(existingValue) === JSON.stringify(sanitizedValue) - : existingValue === sanitizedValue - - if (!valuesEqual) { - block.subBlocks[key].value = sanitizedValue - } - } - }) - - if ( - Object.hasOwn(params.inputs, 'triggerConfig') && - block.subBlocks.triggerConfig && - typeof block.subBlocks.triggerConfig.value === 'object' - ) { - applyTriggerConfigToBlockSubblocks(block, block.subBlocks.triggerConfig.value) - } - - // Update loop/parallel configuration in block.data (strict validation) - if (block.type === 'loop') { - block.data = block.data || {} - // loopType is always valid - if (params.inputs.loopType !== undefined) { - const validLoopTypes = ['for', 'forEach', 'while', 'doWhile'] - if (validLoopTypes.includes(params.inputs.loopType)) { - block.data.loopType = params.inputs.loopType - } - } - const effectiveLoopType = params.inputs.loopType ?? block.data.loopType ?? 'for' - // iterations only valid for 'for' loopType - if (params.inputs.iterations !== undefined && effectiveLoopType === 'for') { - block.data.count = params.inputs.iterations - } - // collection only valid for 'forEach' loopType - if (params.inputs.collection !== undefined && effectiveLoopType === 'forEach') { - block.data.collection = params.inputs.collection - } - // condition only valid for 'while' or 'doWhile' loopType - if ( - params.inputs.condition !== undefined && - (effectiveLoopType === 'while' || effectiveLoopType === 'doWhile') - ) { - if (effectiveLoopType === 'doWhile') { - block.data.doWhileCondition = params.inputs.condition - } else { - block.data.whileCondition = params.inputs.condition - } - } - } else if (block.type === 'parallel') { - block.data = block.data || {} - // parallelType is always valid - if (params.inputs.parallelType !== undefined) { - const validParallelTypes = ['count', 'collection'] - if (validParallelTypes.includes(params.inputs.parallelType)) { - block.data.parallelType = params.inputs.parallelType - } - } - const effectiveParallelType = - params.inputs.parallelType ?? block.data.parallelType ?? 'count' - // count only valid for 'count' parallelType - if (params.inputs.count !== undefined && effectiveParallelType === 'count') { - block.data.count = params.inputs.count - } - // collection only valid for 'collection' parallelType - if (params.inputs.collection !== undefined && effectiveParallelType === 'collection') { - block.data.collection = params.inputs.collection - } - } - - const editBlockConfig = getBlock(block.type) - if (editBlockConfig) { - updateCanonicalModesForInputs( - block, - Object.keys(validationResult.validInputs), - editBlockConfig - ) - } - } - - // Update basic properties - if (params?.type !== undefined) { - // Special container types (loop, parallel) are not in the block registry but are valid - const isContainerType = params.type === 'loop' || params.type === 'parallel' - - // Validate type before setting (skip validation for container types) - const blockConfig = getBlock(params.type) - if (!blockConfig && !isContainerType) { - logSkippedItem(skippedItems, { - type: 'invalid_block_type', - operationType: 'edit', - blockId: block_id, - reason: `Invalid block type "${params.type}" - type change skipped`, - details: { requestedType: params.type }, - }) - } else if (!isContainerType && !isBlockTypeAllowed(params.type, permissionConfig)) { - logSkippedItem(skippedItems, { - type: 'block_not_allowed', - operationType: 'edit', - blockId: block_id, - reason: `Block type "${params.type}" is not allowed by permission group - type change skipped`, - details: { requestedType: params.type }, - }) - } else { - block.type = params.type - } - } - if (params?.name !== undefined) { - const normalizedName = normalizeName(params.name) - if (!normalizedName) { - logSkippedItem(skippedItems, { - type: 'missing_required_params', - operationType: 'edit', - blockId: block_id, - reason: `Cannot rename to empty name`, - details: { requestedName: params.name }, - }) - } else if ((RESERVED_BLOCK_NAMES as readonly string[]).includes(normalizedName)) { - logSkippedItem(skippedItems, { - type: 'reserved_block_name', - operationType: 'edit', - blockId: block_id, - reason: `Cannot rename to "${params.name}" - this is a reserved name`, - details: { requestedName: params.name }, - }) - } else { - const conflictingBlock = findBlockWithDuplicateNormalizedName( - modifiedState.blocks, - params.name, - block_id - ) - - if (conflictingBlock) { - logSkippedItem(skippedItems, { - type: 'duplicate_block_name', - operationType: 'edit', - blockId: block_id, - reason: `Cannot rename to "${params.name}" - conflicts with "${conflictingBlock[1].name}"`, - details: { - requestedName: params.name, - conflictingBlockId: conflictingBlock[0], - conflictingBlockName: conflictingBlock[1].name, - }, - }) - } else { - block.name = params.name - } - } - } - - // Handle trigger mode toggle - if (typeof params?.triggerMode === 'boolean') { - block.triggerMode = params.triggerMode - - if (params.triggerMode === true) { - // Remove all incoming edges when enabling trigger mode - modifiedState.edges = modifiedState.edges.filter( - (edge: any) => edge.target !== block_id - ) - } - } - - // Handle advanced mode toggle - if (typeof params?.advancedMode === 'boolean') { - block.advancedMode = params.advancedMode - } - - // Handle nested nodes update (for loops/parallels) - if (params?.nestedNodes) { - // Remove all existing child blocks - const existingChildren = Object.keys(modifiedState.blocks).filter( - (id) => modifiedState.blocks[id].data?.parentId === block_id - ) - existingChildren.forEach((childId) => delete modifiedState.blocks[childId]) - - // Remove edges to/from removed children - modifiedState.edges = modifiedState.edges.filter( - (edge: any) => - !existingChildren.includes(edge.source) && !existingChildren.includes(edge.target) - ) - - // Add new nested blocks - Object.entries(params.nestedNodes).forEach(([childId, childBlock]: [string, any]) => { - // Validate childId is a valid string - if (!isValidKey(childId)) { - logSkippedItem(skippedItems, { - type: 'missing_required_params', - operationType: 'add_nested_node', - blockId: String(childId || 'invalid'), - reason: `Invalid childId "${childId}" in nestedNodes - child block skipped`, - }) - logger.error('Invalid childId detected in nestedNodes', { - parentBlockId: block_id, - childId, - childId_type: typeof childId, - }) - return - } - - if (childBlock.type === 'loop' || childBlock.type === 'parallel') { - logSkippedItem(skippedItems, { - type: 'nested_subflow_not_allowed', - operationType: 'edit_nested_node', - blockId: childId, - reason: `Cannot nest ${childBlock.type} inside ${block.type} - nested subflows are not supported`, - details: { parentType: block.type, childType: childBlock.type }, - }) - return - } - - const childBlockState = createBlockFromParams( - childId, - childBlock, - block_id, - validationErrors, - permissionConfig, - skippedItems - ) - modifiedState.blocks[childId] = childBlockState - - // Add connections for child block - if (childBlock.connections) { - addConnectionsAsEdges( - modifiedState, - childId, - childBlock.connections, - logger, - skippedItems - ) - } - }) - - // Update loop/parallel configuration based on type (strict validation) - if (block.type === 'loop') { - block.data = block.data || {} - // loopType is always valid - if (params.inputs?.loopType) { - const validLoopTypes = ['for', 'forEach', 'while', 'doWhile'] - if (validLoopTypes.includes(params.inputs.loopType)) { - block.data.loopType = params.inputs.loopType - } - } - const effectiveLoopType = params.inputs?.loopType ?? block.data.loopType ?? 'for' - // iterations only valid for 'for' loopType - if (params.inputs?.iterations && effectiveLoopType === 'for') { - block.data.count = params.inputs.iterations - } - // collection only valid for 'forEach' loopType - if (params.inputs?.collection && effectiveLoopType === 'forEach') { - block.data.collection = params.inputs.collection - } - // condition only valid for 'while' or 'doWhile' loopType - if ( - params.inputs?.condition && - (effectiveLoopType === 'while' || effectiveLoopType === 'doWhile') - ) { - if (effectiveLoopType === 'doWhile') { - block.data.doWhileCondition = params.inputs.condition - } else { - block.data.whileCondition = params.inputs.condition - } - } - } else if (block.type === 'parallel') { - block.data = block.data || {} - // parallelType is always valid - if (params.inputs?.parallelType) { - const validParallelTypes = ['count', 'collection'] - if (validParallelTypes.includes(params.inputs.parallelType)) { - block.data.parallelType = params.inputs.parallelType - } - } - const effectiveParallelType = - params.inputs?.parallelType ?? block.data.parallelType ?? 'count' - // count only valid for 'count' parallelType - if (params.inputs?.count && effectiveParallelType === 'count') { - block.data.count = params.inputs.count - } - // collection only valid for 'collection' parallelType - if (params.inputs?.collection && effectiveParallelType === 'collection') { - block.data.collection = params.inputs.collection - } - } - } - - // Handle connections update (convert to edges) - if (params?.connections) { - modifiedState.edges = modifiedState.edges.filter((edge: any) => edge.source !== block_id) - - Object.entries(params.connections).forEach(([connectionType, targets]) => { - if (targets === null) return - - const mapConnectionTypeToHandle = (type: string): string => { - if (type === 'success') return 'source' - if (type === 'error') return 'error' - return type - } - - const sourceHandle = mapConnectionTypeToHandle(connectionType) - - const addEdgeForTarget = (targetBlock: string, targetHandle?: string) => { - createValidatedEdge( - modifiedState, - block_id, - targetBlock, - sourceHandle, - targetHandle || 'target', - 'edit', - logger, - skippedItems - ) - } - - if (typeof targets === 'string') { - addEdgeForTarget(targets) - } else if (Array.isArray(targets)) { - targets.forEach((target: any) => { - if (typeof target === 'string') { - addEdgeForTarget(target) - } else if (target?.block) { - addEdgeForTarget(target.block, target.handle) - } - }) - } else if (typeof targets === 'object' && (targets as any)?.block) { - addEdgeForTarget((targets as any).block, (targets as any).handle) - } - }) - } - - // Handle edge removal - if (params?.removeEdges && Array.isArray(params.removeEdges)) { - params.removeEdges.forEach(({ targetBlockId, sourceHandle = 'source' }) => { - modifiedState.edges = modifiedState.edges.filter( - (edge: any) => - !( - edge.source === block_id && - edge.target === targetBlockId && - edge.sourceHandle === sourceHandle - ) - ) - }) - } - break - } - - case 'add': { - const addNormalizedName = params?.name ? normalizeName(params.name) : '' - if (!params?.type || !params?.name || !addNormalizedName) { - logSkippedItem(skippedItems, { - type: 'missing_required_params', - operationType: 'add', - blockId: block_id, - reason: `Missing required params (type or name) for adding block "${block_id}"`, - details: { hasType: !!params?.type, hasName: !!params?.name }, - }) - break - } - - if ((RESERVED_BLOCK_NAMES as readonly string[]).includes(addNormalizedName)) { - logSkippedItem(skippedItems, { - type: 'reserved_block_name', - operationType: 'add', - blockId: block_id, - reason: `Block name "${params.name}" is a reserved name and cannot be used`, - details: { requestedName: params.name }, - }) - break - } - - const conflictingBlock = findBlockWithDuplicateNormalizedName( - modifiedState.blocks, - params.name, - block_id - ) - - if (conflictingBlock) { - logSkippedItem(skippedItems, { - type: 'duplicate_block_name', - operationType: 'add', - blockId: block_id, - reason: `Block name "${params.name}" conflicts with existing block "${conflictingBlock[1].name}"`, - details: { - requestedName: params.name, - conflictingBlockId: conflictingBlock[0], - conflictingBlockName: conflictingBlock[1].name, - }, - }) - break - } - - // Special container types (loop, parallel) are not in the block registry but are valid - const isContainerType = params.type === 'loop' || params.type === 'parallel' - - // Validate block type before adding (skip validation for container types) - const addBlockConfig = getBlock(params.type) - if (!addBlockConfig && !isContainerType) { - logSkippedItem(skippedItems, { - type: 'invalid_block_type', - operationType: 'add', - blockId: block_id, - reason: `Invalid block type "${params.type}" - block not added`, - details: { requestedType: params.type }, - }) - break - } - - // Check if block type is allowed by permission group - if (!isContainerType && !isBlockTypeAllowed(params.type, permissionConfig)) { - logSkippedItem(skippedItems, { - type: 'block_not_allowed', - operationType: 'add', - blockId: block_id, - reason: `Block type "${params.type}" is not allowed by permission group - block not added`, - details: { requestedType: params.type }, - }) - break - } - - const triggerIssue = TriggerUtils.getTriggerAdditionIssue(modifiedState.blocks, params.type) - if (triggerIssue) { - logSkippedItem(skippedItems, { - type: 'duplicate_trigger', - operationType: 'add', - blockId: block_id, - reason: `Cannot add ${triggerIssue.triggerName} - a workflow can only have one`, - details: { requestedType: params.type, issue: triggerIssue.issue }, - }) - break - } - - // Check single-instance block constraints (e.g., Response block) - const singleInstanceIssue = TriggerUtils.getSingleInstanceBlockIssue( - modifiedState.blocks, - params.type - ) - if (singleInstanceIssue) { - logSkippedItem(skippedItems, { - type: 'duplicate_single_instance_block', - operationType: 'add', - blockId: block_id, - reason: `Cannot add ${singleInstanceIssue.blockName} - a workflow can only have one`, - details: { requestedType: params.type }, - }) - break - } - - // Create new block with proper structure - const newBlock = createBlockFromParams( - block_id, - params, - undefined, - validationErrors, - permissionConfig, - skippedItems - ) - - // Set loop/parallel data on parent block BEFORE adding to blocks (strict validation) - if (params.nestedNodes) { - if (params.type === 'loop') { - const validLoopTypes = ['for', 'forEach', 'while', 'doWhile'] - const loopType = - params.inputs?.loopType && validLoopTypes.includes(params.inputs.loopType) - ? params.inputs.loopType - : 'for' - newBlock.data = { - ...newBlock.data, - loopType, - // Only include type-appropriate fields - ...(loopType === 'forEach' && - params.inputs?.collection && { collection: params.inputs.collection }), - ...(loopType === 'for' && - params.inputs?.iterations && { count: params.inputs.iterations }), - ...(loopType === 'while' && - params.inputs?.condition && { whileCondition: params.inputs.condition }), - ...(loopType === 'doWhile' && - params.inputs?.condition && { doWhileCondition: params.inputs.condition }), - } - } else if (params.type === 'parallel') { - const validParallelTypes = ['count', 'collection'] - const parallelType = - params.inputs?.parallelType && validParallelTypes.includes(params.inputs.parallelType) - ? params.inputs.parallelType - : 'count' - newBlock.data = { - ...newBlock.data, - parallelType, - // Only include type-appropriate fields - ...(parallelType === 'collection' && - params.inputs?.collection && { collection: params.inputs.collection }), - ...(parallelType === 'count' && - params.inputs?.count && { count: params.inputs.count }), - } - } - } - - // Add parent block FIRST before adding children - // This ensures children can reference valid parentId - modifiedState.blocks[block_id] = newBlock - - // Handle nested nodes (for loops/parallels created from scratch) - if (params.nestedNodes) { - // Defensive check: verify parent is not locked before adding children - // (Parent was just created with locked: false, but check for consistency) - const parentBlock = modifiedState.blocks[block_id] - if (parentBlock?.locked) { - logSkippedItem(skippedItems, { - type: 'block_locked', - operationType: 'add_nested_nodes', - blockId: block_id, - reason: `Container "${block_id}" is locked - cannot add nested nodes`, - }) - break - } - - Object.entries(params.nestedNodes).forEach(([childId, childBlock]: [string, any]) => { - // Validate childId is a valid string - if (!isValidKey(childId)) { - logSkippedItem(skippedItems, { - type: 'missing_required_params', - operationType: 'add_nested_node', - blockId: String(childId || 'invalid'), - reason: `Invalid childId "${childId}" in nestedNodes - child block skipped`, - }) - logger.error('Invalid childId detected in nestedNodes', { - parentBlockId: block_id, - childId, - childId_type: typeof childId, - }) - return - } - - if (childBlock.type === 'loop' || childBlock.type === 'parallel') { - logSkippedItem(skippedItems, { - type: 'nested_subflow_not_allowed', - operationType: 'add_nested_node', - blockId: childId, - reason: `Cannot nest ${childBlock.type} inside ${params.type} - nested subflows are not supported`, - details: { parentType: params.type, childType: childBlock.type }, - }) - return - } - - const childBlockState = createBlockFromParams( - childId, - childBlock, - block_id, - validationErrors, - permissionConfig, - skippedItems - ) - modifiedState.blocks[childId] = childBlockState - - // Defer connection processing to ensure all blocks exist first - if (childBlock.connections) { - addOperationsWithConnections.push({ - blockId: childId, - connections: childBlock.connections, - }) - } - }) - } - - // Defer connection processing to ensure all blocks exist first (pass 2) - if (params.connections) { - addOperationsWithConnections.push({ - blockId: block_id, - connections: params.connections, - }) - } - break - } - - case 'insert_into_subflow': { - const subflowId = params?.subflowId - if (!subflowId || !params?.type || !params?.name) { - logSkippedItem(skippedItems, { - type: 'missing_required_params', - operationType: 'insert_into_subflow', - blockId: block_id, - reason: `Missing required params (subflowId, type, or name) for inserting block "${block_id}"`, - details: { - hasSubflowId: !!subflowId, - hasType: !!params?.type, - hasName: !!params?.name, - }, - }) - break - } - - const subflowBlock = modifiedState.blocks[subflowId] - if (!subflowBlock) { - logSkippedItem(skippedItems, { - type: 'invalid_subflow_parent', - operationType: 'insert_into_subflow', - blockId: block_id, - reason: `Subflow block "${subflowId}" not found - block "${block_id}" not inserted`, - details: { subflowId }, - }) - break - } - - // Check if subflow is locked - if (subflowBlock.locked) { - logSkippedItem(skippedItems, { - type: 'block_locked', - operationType: 'insert_into_subflow', - blockId: block_id, - reason: `Subflow "${subflowId}" is locked - cannot insert block "${block_id}"`, - details: { subflowId }, - }) - break - } - - if (subflowBlock.type !== 'loop' && subflowBlock.type !== 'parallel') { - logger.error('Subflow block has invalid type', { - subflowId, - type: subflowBlock.type, - block_id, - }) - break - } - - if (params.type === 'loop' || params.type === 'parallel') { - logSkippedItem(skippedItems, { - type: 'nested_subflow_not_allowed', - operationType: 'insert_into_subflow', - blockId: block_id, - reason: `Cannot nest ${params.type} inside ${subflowBlock.type} - nested subflows are not supported`, - details: { parentType: subflowBlock.type, childType: params.type }, - }) - break - } - - // Get block configuration - const blockConfig = getAllBlocks().find((block) => block.type === params.type) - - // Check if block already exists (moving into subflow) or is new - const existingBlock = modifiedState.blocks[block_id] - - if (existingBlock) { - if (existingBlock.type === 'loop' || existingBlock.type === 'parallel') { - logSkippedItem(skippedItems, { - type: 'nested_subflow_not_allowed', - operationType: 'insert_into_subflow', - blockId: block_id, - reason: `Cannot move ${existingBlock.type} into ${subflowBlock.type} - nested subflows are not supported`, - details: { parentType: subflowBlock.type, childType: existingBlock.type }, - }) - break - } - - // Check if existing block is locked - if (existingBlock.locked) { - logSkippedItem(skippedItems, { - type: 'block_locked', - operationType: 'insert_into_subflow', - blockId: block_id, - reason: `Block "${block_id}" is locked and cannot be moved into a subflow`, - }) - break - } - - // Moving existing block into subflow - just update parent - existingBlock.data = { - ...existingBlock.data, - parentId: subflowId, - extent: 'parent' as const, - } - - // Update inputs if provided (with validation) - if (params.inputs) { - // Validate inputs against block configuration - const validationResult = validateInputsForBlock( - existingBlock.type, - params.inputs, - block_id - ) - validationErrors.push(...validationResult.errors) - - Object.entries(validationResult.validInputs).forEach(([key, value]) => { - // Skip runtime subblock IDs (webhookId, triggerPath) - if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { - return - } - - let sanitizedValue = value - - // Normalize array subblocks with id fields (inputFormat, table rows, etc.) - if (shouldNormalizeArrayIds(key)) { - sanitizedValue = normalizeArrayWithIds(value) - } - - // Special handling for tools - normalize and filter disallowed - if (key === 'tools' && Array.isArray(value)) { - sanitizedValue = filterDisallowedTools( - normalizeTools(value), - permissionConfig, - block_id, - skippedItems - ) - } - - // Special handling for responseFormat - normalize to ensure consistent format - if (key === 'responseFormat' && value) { - sanitizedValue = normalizeResponseFormat(value) - } - - if (!existingBlock.subBlocks[key]) { - existingBlock.subBlocks[key] = { - id: key, - type: 'short-input', - value: sanitizedValue, - } - } else { - existingBlock.subBlocks[key].value = sanitizedValue - } - }) - - const existingBlockConfig = getBlock(existingBlock.type) - if (existingBlockConfig) { - updateCanonicalModesForInputs( - existingBlock, - Object.keys(validationResult.validInputs), - existingBlockConfig - ) - } - } - } else { - // Special container types (loop, parallel) are not in the block registry but are valid - const isContainerType = params.type === 'loop' || params.type === 'parallel' - - // Validate block type before creating (skip validation for container types) - const insertBlockConfig = getBlock(params.type) - if (!insertBlockConfig && !isContainerType) { - logSkippedItem(skippedItems, { - type: 'invalid_block_type', - operationType: 'insert_into_subflow', - blockId: block_id, - reason: `Invalid block type "${params.type}" - block not inserted into subflow`, - details: { requestedType: params.type, subflowId }, - }) - break - } - - // Check if block type is allowed by permission group - if (!isContainerType && !isBlockTypeAllowed(params.type, permissionConfig)) { - logSkippedItem(skippedItems, { - type: 'block_not_allowed', - operationType: 'insert_into_subflow', - blockId: block_id, - reason: `Block type "${params.type}" is not allowed by permission group - block not inserted`, - details: { requestedType: params.type, subflowId }, - }) - break - } - - // Create new block as child of subflow - const newBlock = createBlockFromParams( - block_id, - params, - subflowId, - validationErrors, - permissionConfig, - skippedItems - ) - modifiedState.blocks[block_id] = newBlock - } - - // Defer connection processing to ensure all blocks exist first - // This is particularly important when multiple blocks are being inserted - // and they have connections to each other - if (params.connections) { - // Remove existing edges from this block first - modifiedState.edges = modifiedState.edges.filter((edge: any) => edge.source !== block_id) - - // Add to deferred connections list - addOperationsWithConnections.push({ - blockId: block_id, - connections: params.connections, - }) - } - break - } - - case 'extract_from_subflow': { - const subflowId = params?.subflowId - if (!subflowId) { - logSkippedItem(skippedItems, { - type: 'missing_required_params', - operationType: 'extract_from_subflow', - blockId: block_id, - reason: `Missing subflowId for extracting block "${block_id}"`, - }) - break - } - - const block = modifiedState.blocks[block_id] - if (!block) { - logSkippedItem(skippedItems, { - type: 'block_not_found', - operationType: 'extract_from_subflow', - blockId: block_id, - reason: `Block "${block_id}" not found for extraction`, - }) - break - } - - // Check if block is locked - if (block.locked) { - logSkippedItem(skippedItems, { - type: 'block_locked', - operationType: 'extract_from_subflow', - blockId: block_id, - reason: `Block "${block_id}" is locked and cannot be extracted from subflow`, - }) - break - } - - // Check if parent subflow is locked - const parentSubflow = modifiedState.blocks[subflowId] - if (parentSubflow?.locked) { - logSkippedItem(skippedItems, { - type: 'block_locked', - operationType: 'extract_from_subflow', - blockId: block_id, - reason: `Subflow "${subflowId}" is locked - cannot extract block "${block_id}"`, - details: { subflowId }, - }) - break - } - - // Verify it's actually a child of this subflow - if (block.data?.parentId !== subflowId) { - logger.warn('Block is not a child of specified subflow', { - block_id, - actualParent: block.data?.parentId, - specifiedParent: subflowId, - }) - } - - // Remove parent relationship - if (block.data) { - block.data.parentId = undefined - block.data.extent = undefined - } - - // Note: We keep the block and its edges, just remove parent relationship - // The block becomes a root-level block - break - } - } - } - - // Pass 2: Add all deferred connections from add/insert operations - // Now all blocks exist (from add, insert, and edit operations), so connections can be safely created - // This ensures that if block A connects to block B, and both are being added/inserted, - // B will exist when we create the edge from A to B - if (addOperationsWithConnections.length > 0) { - logger.info('Processing deferred connections from add/insert operations', { - deferredConnectionCount: addOperationsWithConnections.length, - totalBlocks: Object.keys(modifiedState.blocks).length, - }) - - for (const { blockId, connections } of addOperationsWithConnections) { - // Verify the source block still exists (it might have been deleted by a later operation) - if (!modifiedState.blocks[blockId]) { - logger.warn('Source block no longer exists for deferred connection', { - blockId, - availableBlocks: Object.keys(modifiedState.blocks), - }) - continue - } - - addConnectionsAsEdges(modifiedState, blockId, connections, logger, skippedItems) - } - - logger.info('Finished processing deferred connections', { - totalEdges: modifiedState.edges.length, - }) - } - - // Regenerate loops and parallels after modifications - modifiedState.loops = generateLoopBlocks(modifiedState.blocks) - modifiedState.parallels = generateParallelBlocks(modifiedState.blocks) - - // Validate all blocks have types before returning - const blocksWithoutType = Object.entries(modifiedState.blocks) - .filter(([_, block]: [string, any]) => !block.type || block.type === undefined) - .map(([id, block]: [string, any]) => ({ id, block })) - - if (blocksWithoutType.length > 0) { - logger.error('Blocks without type after operations:', { - blocksWithoutType: blocksWithoutType.map(({ id, block }) => ({ - id, - type: block.type, - name: block.name, - keys: Object.keys(block), - })), - }) - - // Attempt to fix by removing type-less blocks - blocksWithoutType.forEach(({ id }) => { - delete modifiedState.blocks[id] - }) - - // Remove edges connected to removed blocks - const removedIds = new Set(blocksWithoutType.map(({ id }) => id)) - modifiedState.edges = modifiedState.edges.filter( - (edge: any) => !removedIds.has(edge.source) && !removedIds.has(edge.target) - ) - } - - return { state: modifiedState, validationErrors, skippedItems } -} - -/** - * Validates selector IDs in the workflow state exist in the database - * Returns validation errors for any invalid selector IDs - */ -async function validateWorkflowSelectorIds( - workflowState: any, - context: { userId: string; workspaceId?: string } -): Promise { - const logger = createLogger('EditWorkflowSelectorValidation') - const errors: ValidationError[] = [] - - // Collect all selector fields from all blocks - const selectorsToValidate: Array<{ - blockId: string - blockType: string - fieldName: string - selectorType: string - value: string | string[] - }> = [] - - for (const [blockId, block] of Object.entries(workflowState.blocks || {})) { - const blockData = block as any - const blockType = blockData.type - if (!blockType) continue - - const blockConfig = getBlock(blockType) - if (!blockConfig) continue - - // Check each subBlock for selector types - for (const subBlockConfig of blockConfig.subBlocks) { - if (!SELECTOR_TYPES.has(subBlockConfig.type)) continue - - // Skip oauth-input - credentials are pre-validated before edit application - // This allows existing collaborator credentials to remain untouched - if (subBlockConfig.type === 'oauth-input') continue - - const subBlockValue = blockData.subBlocks?.[subBlockConfig.id]?.value - if (!subBlockValue) continue - - // Handle comma-separated values for multi-select - let values: string | string[] = subBlockValue - if (typeof subBlockValue === 'string' && subBlockValue.includes(',')) { - values = subBlockValue - .split(',') - .map((v: string) => v.trim()) - .filter(Boolean) - } - - selectorsToValidate.push({ - blockId, - blockType, - fieldName: subBlockConfig.id, - selectorType: subBlockConfig.type, - value: values, - }) - } - } - - if (selectorsToValidate.length === 0) { - return errors - } - - logger.info('Validating selector IDs', { - selectorCount: selectorsToValidate.length, - userId: context.userId, - workspaceId: context.workspaceId, - }) - - // Validate each selector field - for (const selector of selectorsToValidate) { - const result = await validateSelectorIds(selector.selectorType, selector.value, context) - - if (result.invalid.length > 0) { - // Include warning info (like available credentials) in the error message for better LLM feedback - const warningInfo = result.warning ? `. ${result.warning}` : '' - errors.push({ - blockId: selector.blockId, - blockType: selector.blockType, - field: selector.fieldName, - value: selector.value, - error: `Invalid ${selector.selectorType} ID(s): ${result.invalid.join(', ')} - ID(s) do not exist or user doesn't have access${warningInfo}`, - }) - } else if (result.warning) { - // Log warnings that don't have errors (shouldn't happen for credentials but may for other selectors) - logger.warn(result.warning, { - blockId: selector.blockId, - fieldName: selector.fieldName, - }) - } - } - - if (errors.length > 0) { - logger.warn('Found invalid selector IDs', { - errorCount: errors.length, - errors: errors.map((e) => ({ blockId: e.blockId, field: e.field, error: e.error })), - }) - } - - return errors -} - -/** - * Pre-validates credential and apiKey inputs in operations before they are applied. - * - Validates oauth-input (credential) IDs belong to the user - * - Filters out apiKey inputs for hosted models when isHosted is true - * - Also validates credentials and apiKeys in nestedNodes (blocks inside loop/parallel) - * Returns validation errors for any removed inputs. - */ -async function preValidateCredentialInputs( - operations: EditWorkflowOperation[], - context: { userId: string }, - workflowState?: Record -): Promise<{ filteredOperations: EditWorkflowOperation[]; errors: ValidationError[] }> { - const { isHosted } = await import('@/lib/core/config/feature-flags') - const { getHostedModels } = await import('@/providers/utils') - - const logger = createLogger('PreValidateCredentials') - const errors: ValidationError[] = [] - - // Collect credential and apiKey inputs that need validation/filtering - const credentialInputs: Array<{ - operationIndex: number - blockId: string - blockType: string - fieldName: string - value: string - nestedBlockId?: string - }> = [] - - const hostedApiKeyInputs: Array<{ - operationIndex: number - blockId: string - blockType: string - model: string - nestedBlockId?: string - }> = [] - - const hostedModelsLower = isHosted ? new Set(getHostedModels().map((m) => m.toLowerCase())) : null - - /** - * Collect credential inputs from a block's inputs based on its block config - */ - function collectCredentialInputs( - blockConfig: ReturnType, - inputs: Record, - opIndex: number, - blockId: string, - blockType: string, - nestedBlockId?: string - ) { - if (!blockConfig) return - - for (const subBlockConfig of blockConfig.subBlocks) { - if (subBlockConfig.type !== 'oauth-input') continue - - const inputValue = inputs[subBlockConfig.id] - if (!inputValue || typeof inputValue !== 'string' || inputValue.trim() === '') continue - - credentialInputs.push({ - operationIndex: opIndex, - blockId, - blockType, - fieldName: subBlockConfig.id, - value: inputValue, - nestedBlockId, - }) - } - } - - /** - * Check if apiKey should be filtered for a block with the given model - */ - function collectHostedApiKeyInput( - inputs: Record, - modelValue: string | undefined, - opIndex: number, - blockId: string, - blockType: string, - nestedBlockId?: string - ) { - if (!hostedModelsLower || !inputs.apiKey) return - if (!modelValue || typeof modelValue !== 'string') return - - if (hostedModelsLower.has(modelValue.toLowerCase())) { - hostedApiKeyInputs.push({ - operationIndex: opIndex, - blockId, - blockType, - model: modelValue, - nestedBlockId, - }) - } - } - - operations.forEach((op, opIndex) => { - // Process main block inputs - if (op.params?.inputs && op.params?.type) { - const blockConfig = getBlock(op.params.type) - if (blockConfig) { - // Collect credentials from main block - collectCredentialInputs( - blockConfig, - op.params.inputs as Record, - opIndex, - op.block_id, - op.params.type - ) - - // Check for apiKey inputs on hosted models - let modelValue = (op.params.inputs as Record).model as string | undefined - - // For edit operations, if model is not being changed, check existing block's model - if ( - !modelValue && - op.operation_type === 'edit' && - (op.params.inputs as Record).apiKey && - workflowState - ) { - const existingBlock = (workflowState.blocks as Record)?.[op.block_id] as - | Record - | undefined - const existingSubBlocks = existingBlock?.subBlocks as Record | undefined - const existingModelSubBlock = existingSubBlocks?.model as - | Record - | undefined - modelValue = existingModelSubBlock?.value as string | undefined - } - - collectHostedApiKeyInput( - op.params.inputs as Record, - modelValue, - opIndex, - op.block_id, - op.params.type - ) - } - } - - // Process nested nodes (blocks inside loop/parallel containers) - const nestedNodes = op.params?.nestedNodes as - | Record> - | undefined - if (nestedNodes) { - Object.entries(nestedNodes).forEach(([childId, childBlock]) => { - const childType = childBlock.type as string | undefined - const childInputs = childBlock.inputs as Record | undefined - if (!childType || !childInputs) return - - const childBlockConfig = getBlock(childType) - if (!childBlockConfig) return - - // Collect credentials from nested block - collectCredentialInputs( - childBlockConfig, - childInputs, - opIndex, - op.block_id, - childType, - childId - ) - - // Check for apiKey inputs on hosted models in nested block - const modelValue = childInputs.model as string | undefined - collectHostedApiKeyInput(childInputs, modelValue, opIndex, op.block_id, childType, childId) - }) - } - }) - - const hasCredentialsToValidate = credentialInputs.length > 0 - const hasHostedApiKeysToFilter = hostedApiKeyInputs.length > 0 - - if (!hasCredentialsToValidate && !hasHostedApiKeysToFilter) { - return { filteredOperations: operations, errors } - } - - // Deep clone operations so we can modify them - const filteredOperations = structuredClone(operations) - - // Filter out apiKey inputs for hosted models and add validation errors - if (hasHostedApiKeysToFilter) { - logger.info('Filtering apiKey inputs for hosted models', { count: hostedApiKeyInputs.length }) - - for (const apiKeyInput of hostedApiKeyInputs) { - const op = filteredOperations[apiKeyInput.operationIndex] - - // Handle nested block apiKey filtering - if (apiKeyInput.nestedBlockId) { - const nestedNodes = op.params?.nestedNodes as - | Record> - | undefined - const nestedBlock = nestedNodes?.[apiKeyInput.nestedBlockId] - const nestedInputs = nestedBlock?.inputs as Record | undefined - if (nestedInputs?.apiKey) { - nestedInputs.apiKey = undefined - logger.debug('Filtered apiKey for hosted model in nested block', { - parentBlockId: apiKeyInput.blockId, - nestedBlockId: apiKeyInput.nestedBlockId, - model: apiKeyInput.model, - }) - - errors.push({ - blockId: apiKeyInput.nestedBlockId, - blockType: apiKeyInput.blockType, - field: 'apiKey', - value: '[redacted]', - error: `Cannot set API key for hosted model "${apiKeyInput.model}" - API keys are managed by the platform when using hosted models`, - }) - } - } else if (op.params?.inputs?.apiKey) { - // Handle main block apiKey filtering - op.params.inputs.apiKey = undefined - logger.debug('Filtered apiKey for hosted model', { - blockId: apiKeyInput.blockId, - model: apiKeyInput.model, - }) - - errors.push({ - blockId: apiKeyInput.blockId, - blockType: apiKeyInput.blockType, - field: 'apiKey', - value: '[redacted]', - error: `Cannot set API key for hosted model "${apiKeyInput.model}" - API keys are managed by the platform when using hosted models`, - }) - } - } - } - - // Validate credential inputs - if (hasCredentialsToValidate) { - logger.info('Pre-validating credential inputs', { - credentialCount: credentialInputs.length, - userId: context.userId, - }) - - const allCredentialIds = credentialInputs.map((c) => c.value) - const validationResult = await validateSelectorIds('oauth-input', allCredentialIds, context) - const invalidSet = new Set(validationResult.invalid) - - if (invalidSet.size > 0) { - for (const credInput of credentialInputs) { - if (!invalidSet.has(credInput.value)) continue - - const op = filteredOperations[credInput.operationIndex] - - // Handle nested block credential removal - if (credInput.nestedBlockId) { - const nestedNodes = op.params?.nestedNodes as - | Record> - | undefined - const nestedBlock = nestedNodes?.[credInput.nestedBlockId] - const nestedInputs = nestedBlock?.inputs as Record | undefined - if (nestedInputs?.[credInput.fieldName]) { - delete nestedInputs[credInput.fieldName] - logger.info('Removed invalid credential from nested block', { - parentBlockId: credInput.blockId, - nestedBlockId: credInput.nestedBlockId, - field: credInput.fieldName, - invalidValue: credInput.value, - }) - } - } else if (op.params?.inputs?.[credInput.fieldName]) { - // Handle main block credential removal - delete op.params.inputs[credInput.fieldName] - logger.info('Removed invalid credential from operation', { - blockId: credInput.blockId, - field: credInput.fieldName, - invalidValue: credInput.value, - }) - } - - const warningInfo = validationResult.warning ? `. ${validationResult.warning}` : '' - const errorBlockId = credInput.nestedBlockId ?? credInput.blockId - errors.push({ - blockId: errorBlockId, - blockType: credInput.blockType, - field: credInput.fieldName, - value: credInput.value, - error: `Invalid credential ID "${credInput.value}" - credential does not exist or user doesn't have access${warningInfo}`, - }) - } - - logger.warn('Filtered out invalid credentials', { - invalidCount: invalidSet.size, - }) - } - } - - return { filteredOperations, errors } -} - -async function getCurrentWorkflowStateFromDb( - workflowId: string -): Promise<{ workflowState: any; subBlockValues: Record> }> { - const logger = createLogger('EditWorkflowServerTool') - const [workflowRecord] = await db - .select() - .from(workflowTable) - .where(eq(workflowTable.id, workflowId)) - .limit(1) - if (!workflowRecord) throw new Error(`Workflow ${workflowId} not found in database`) - const normalized = await loadWorkflowFromNormalizedTables(workflowId) - if (!normalized) throw new Error('Workflow has no normalized data') - - // Validate and fix blocks without types - const blocks = { ...normalized.blocks } - const invalidBlocks: string[] = [] - - Object.entries(blocks).forEach(([id, block]: [string, any]) => { - if (!block.type) { - logger.warn(`Block ${id} loaded without type from database`, { - blockKeys: Object.keys(block), - blockName: block.name, - }) - invalidBlocks.push(id) - } - }) - - // Remove invalid blocks - invalidBlocks.forEach((id) => delete blocks[id]) - - // Remove edges connected to invalid blocks - const edges = normalized.edges.filter( - (edge: any) => !invalidBlocks.includes(edge.source) && !invalidBlocks.includes(edge.target) - ) - - const workflowState: any = { - blocks, - edges, - loops: normalized.loops || {}, - parallels: normalized.parallels || {}, - } - const subBlockValues: Record> = {} - Object.entries(normalized.blocks).forEach(([blockId, block]) => { - subBlockValues[blockId] = {} - Object.entries((block as any).subBlocks || {}).forEach(([subId, sub]) => { - if ((sub as any).value !== undefined) subBlockValues[blockId][subId] = (sub as any).value - }) - }) - return { workflowState, subBlockValues } -} - -export const editWorkflowServerTool: BaseServerTool = { - name: 'edit_workflow', - async execute(params: EditWorkflowParams, context?: { userId: string }): Promise { - const logger = createLogger('EditWorkflowServerTool') - const { operations, workflowId, currentUserWorkflow } = params - if (!Array.isArray(operations) || operations.length === 0) { - throw new Error('operations are required and must be an array') - } - if (!workflowId) throw new Error('workflowId is required') - - logger.info('Executing edit_workflow', { - operationCount: operations.length, - workflowId, - hasCurrentUserWorkflow: !!currentUserWorkflow, - }) - - // Get current workflow state - let workflowState: any - if (currentUserWorkflow) { - try { - workflowState = JSON.parse(currentUserWorkflow) - } catch (error) { - logger.error('Failed to parse currentUserWorkflow', error) - throw new Error('Invalid currentUserWorkflow format') - } - } else { - const fromDb = await getCurrentWorkflowStateFromDb(workflowId) - workflowState = fromDb.workflowState - } - - // Get permission config for the user - const permissionConfig = context?.userId ? await getUserPermissionConfig(context.userId) : null - - // Pre-validate credential and apiKey inputs before applying operations - // This filters out invalid credentials and apiKeys for hosted models - let operationsToApply = operations - const credentialErrors: ValidationError[] = [] - if (context?.userId) { - const { filteredOperations, errors: credErrors } = await preValidateCredentialInputs( - operations, - { userId: context.userId }, - workflowState - ) - operationsToApply = filteredOperations - credentialErrors.push(...credErrors) - } - - // Apply operations directly to the workflow state - const { - state: modifiedWorkflowState, - validationErrors, - skippedItems, - } = applyOperationsToWorkflowState(workflowState, operationsToApply, permissionConfig) - - // Add credential validation errors - validationErrors.push(...credentialErrors) - - // Get workspaceId for selector validation - let workspaceId: string | undefined - try { - const [workflowRecord] = await db - .select({ workspaceId: workflowTable.workspaceId }) - .from(workflowTable) - .where(eq(workflowTable.id, workflowId)) - .limit(1) - workspaceId = workflowRecord?.workspaceId ?? undefined - } catch (error) { - logger.warn('Failed to get workspaceId for selector validation', { error, workflowId }) - } - - // Validate selector IDs exist in the database - if (context?.userId) { - try { - const selectorErrors = await validateWorkflowSelectorIds(modifiedWorkflowState, { - userId: context.userId, - workspaceId, - }) - validationErrors.push(...selectorErrors) - } catch (error) { - logger.warn('Selector ID validation failed', { - error: error instanceof Error ? error.message : String(error), - }) - } - } - - // Validate the workflow state - const validation = validateWorkflowState(modifiedWorkflowState, { sanitize: true }) - - if (!validation.valid) { - logger.error('Edited workflow state is invalid', { - errors: validation.errors, - warnings: validation.warnings, - }) - throw new Error(`Invalid edited workflow: ${validation.errors.join('; ')}`) - } - - if (validation.warnings.length > 0) { - logger.warn('Edited workflow validation warnings', { - warnings: validation.warnings, - }) - } - - // Extract and persist custom tools to database (reuse workspaceId from selector validation) - if (context?.userId && workspaceId) { - try { - const finalWorkflowState = validation.sanitizedState || modifiedWorkflowState - const { saved, errors } = await extractAndPersistCustomTools( - finalWorkflowState, - workspaceId, - context.userId - ) - - if (saved > 0) { - logger.info(`Persisted ${saved} custom tool(s) to database`, { workflowId }) - } - - if (errors.length > 0) { - logger.warn('Some custom tools failed to persist', { errors, workflowId }) - } - } catch (error) { - logger.error('Failed to persist custom tools', { error, workflowId }) - } - } else if (context?.userId && !workspaceId) { - logger.warn('Workflow has no workspaceId, skipping custom tools persistence', { - workflowId, - }) - } else { - logger.warn('No userId in context - skipping custom tools persistence', { workflowId }) - } - - logger.info('edit_workflow successfully applied operations', { - operationCount: operations.length, - blocksCount: Object.keys(modifiedWorkflowState.blocks).length, - edgesCount: modifiedWorkflowState.edges.length, - inputValidationErrors: validationErrors.length, - skippedItemsCount: skippedItems.length, - schemaValidationErrors: validation.errors.length, - validationWarnings: validation.warnings.length, - }) - - // Format validation errors for LLM feedback - const inputErrors = - validationErrors.length > 0 - ? validationErrors.map((e) => `Block "${e.blockId}" (${e.blockType}): ${e.error}`) - : undefined - - // Format skipped items for LLM feedback - const skippedMessages = - skippedItems.length > 0 ? skippedItems.map((item) => item.reason) : undefined - - // Return the modified workflow state for the client to convert to YAML if needed - return { - success: true, - workflowState: validation.sanitizedState || modifiedWorkflowState, - // Include input validation errors so the LLM can see what was rejected - ...(inputErrors && { - inputValidationErrors: inputErrors, - inputValidationMessage: `${inputErrors.length} input(s) were rejected due to validation errors. The workflow was still updated with valid inputs only. Errors: ${inputErrors.join('; ')}`, - }), - // Include skipped items so the LLM can see what operations were skipped - ...(skippedMessages && { - skippedItems: skippedMessages, - skippedItemsMessage: `${skippedItems.length} operation(s) were skipped due to invalid references. Details: ${skippedMessages.join('; ')}`, - }), - } - }, -} diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/builders.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/builders.ts new file mode 100644 index 000000000..935e7bcee --- /dev/null +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/builders.ts @@ -0,0 +1,633 @@ +import crypto from 'crypto' +import { createLogger } from '@sim/logger' +import type { PermissionGroupConfig } from '@/lib/permission-groups/types' +import { getBlockOutputs } from '@/lib/workflows/blocks/block-outputs' +import { buildCanonicalIndex, isCanonicalPair } from '@/lib/workflows/subblocks/visibility' +import { getAllBlocks } from '@/blocks/registry' +import type { BlockConfig } from '@/blocks/types' +import { TRIGGER_RUNTIME_SUBBLOCK_IDS } from '@/triggers/constants' +import type { EditWorkflowOperation, SkippedItem, ValidationError } from './types' +import { logSkippedItem, UUID_REGEX } from './types' +import { + validateInputsForBlock, + validateSourceHandleForBlock, + validateTargetHandle, +} from './validation' + +/** + * Helper to create a block state from operation params + */ +export function createBlockFromParams( + blockId: string, + params: any, + parentId?: string, + errorsCollector?: ValidationError[], + permissionConfig?: PermissionGroupConfig | null, + skippedItems?: SkippedItem[] +): any { + const blockConfig = getAllBlocks().find((b) => b.type === params.type) + + // Validate inputs against block configuration + let validatedInputs: Record | undefined + if (params.inputs) { + const result = validateInputsForBlock(params.type, params.inputs, blockId) + validatedInputs = result.validInputs + if (errorsCollector && result.errors.length > 0) { + errorsCollector.push(...result.errors) + } + } + + // Determine outputs based on trigger mode + const triggerMode = params.triggerMode || false + let outputs: Record + + if (params.outputs) { + outputs = params.outputs + } else if (blockConfig) { + const subBlocks: Record = {} + if (validatedInputs) { + Object.entries(validatedInputs).forEach(([key, value]) => { + // Skip runtime subblock IDs when computing outputs + if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { + return + } + subBlocks[key] = { id: key, type: 'short-input', value: value } + }) + } + outputs = getBlockOutputs(params.type, subBlocks, triggerMode) + } else { + outputs = {} + } + + const blockState: any = { + id: blockId, + type: params.type, + name: params.name, + position: { x: 0, y: 0 }, + enabled: params.enabled !== undefined ? params.enabled : true, + horizontalHandles: true, + advancedMode: params.advancedMode || false, + height: 0, + triggerMode: triggerMode, + subBlocks: {}, + outputs: outputs, + data: parentId ? { parentId, extent: 'parent' as const } : {}, + locked: false, + } + + // Add validated inputs as subBlocks + if (validatedInputs) { + Object.entries(validatedInputs).forEach(([key, value]) => { + if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { + return + } + + let sanitizedValue = value + + // Normalize array subblocks with id fields (inputFormat, table rows, etc.) + if (shouldNormalizeArrayIds(key)) { + sanitizedValue = normalizeArrayWithIds(value) + } + + // Special handling for tools - normalize and filter disallowed + if (key === 'tools' && Array.isArray(value)) { + sanitizedValue = filterDisallowedTools( + normalizeTools(value), + permissionConfig ?? null, + blockId, + skippedItems ?? [] + ) + } + + // Special handling for responseFormat - normalize to ensure consistent format + if (key === 'responseFormat' && value) { + sanitizedValue = normalizeResponseFormat(value) + } + + blockState.subBlocks[key] = { + id: key, + type: 'short-input', + value: sanitizedValue, + } + }) + } + + // Set up subBlocks from block configuration + if (blockConfig) { + blockConfig.subBlocks.forEach((subBlock) => { + if (!blockState.subBlocks[subBlock.id]) { + blockState.subBlocks[subBlock.id] = { + id: subBlock.id, + type: subBlock.type, + value: null, + } + } + }) + + if (validatedInputs) { + updateCanonicalModesForInputs(blockState, Object.keys(validatedInputs), blockConfig) + } + } + + return blockState +} + +export function updateCanonicalModesForInputs( + block: { data?: { canonicalModes?: Record } }, + inputKeys: string[], + blockConfig: BlockConfig +): void { + if (!blockConfig.subBlocks?.length) return + + const canonicalIndex = buildCanonicalIndex(blockConfig.subBlocks) + const canonicalModeUpdates: Record = {} + + for (const inputKey of inputKeys) { + const canonicalId = canonicalIndex.canonicalIdBySubBlockId[inputKey] + if (!canonicalId) continue + + const group = canonicalIndex.groupsById[canonicalId] + if (!group || !isCanonicalPair(group)) continue + + const isAdvanced = group.advancedIds.includes(inputKey) + const existingMode = canonicalModeUpdates[canonicalId] + + if (!existingMode || isAdvanced) { + canonicalModeUpdates[canonicalId] = isAdvanced ? 'advanced' : 'basic' + } + } + + if (Object.keys(canonicalModeUpdates).length > 0) { + if (!block.data) block.data = {} + if (!block.data.canonicalModes) block.data.canonicalModes = {} + Object.assign(block.data.canonicalModes, canonicalModeUpdates) + } +} + +/** + * Normalize tools array by adding back fields that were sanitized for training + */ +export function normalizeTools(tools: any[]): any[] { + return tools.map((tool) => { + if (tool.type === 'custom-tool') { + // New reference format: minimal fields only + if (tool.customToolId && !tool.schema && !tool.code) { + return { + type: tool.type, + customToolId: tool.customToolId, + usageControl: tool.usageControl || 'auto', + isExpanded: tool.isExpanded ?? true, + } + } + + // Legacy inline format: include all fields + const normalized: any = { + ...tool, + params: tool.params || {}, + isExpanded: tool.isExpanded ?? true, + } + + // Ensure schema has proper structure (for inline format) + if (normalized.schema?.function) { + normalized.schema = { + type: 'function', + function: { + name: normalized.schema.function.name || tool.title, // Preserve name or derive from title + description: normalized.schema.function.description, + parameters: normalized.schema.function.parameters, + }, + } + } + + return normalized + } + + // For other tool types, just ensure isExpanded exists + return { + ...tool, + isExpanded: tool.isExpanded ?? true, + } + }) +} + +/** + * Subblock types that store arrays of objects with `id` fields. + * The LLM may generate arbitrary IDs which need to be converted to proper UUIDs. + */ +const ARRAY_WITH_ID_SUBBLOCK_TYPES = new Set([ + 'inputFormat', // input-format: Fields with id, name, type, value, collapsed + 'headers', // table: Rows with id, cells (used for HTTP headers) + 'params', // table: Rows with id, cells (used for query params) + 'variables', // table or variables-input: Rows/assignments with id + 'tagFilters', // knowledge-tag-filters: Filters with id, tagName, etc. + 'documentTags', // document-tag-entry: Tags with id, tagName, etc. + 'metrics', // eval-input: Metrics with id, name, description, range +]) + +/** + * Normalizes array subblock values by ensuring each item has a valid UUID. + * The LLM may generate arbitrary IDs like "input-desc-001" or "row-1" which need + * to be converted to proper UUIDs for consistency with UI-created items. + */ +export function normalizeArrayWithIds(value: unknown): any[] { + if (!Array.isArray(value)) { + return [] + } + + return value.map((item: any) => { + if (!item || typeof item !== 'object') { + return item + } + + // Check if id is missing or not a valid UUID + const hasValidUUID = typeof item.id === 'string' && UUID_REGEX.test(item.id) + if (!hasValidUUID) { + return { ...item, id: crypto.randomUUID() } + } + + return item + }) +} + +/** + * Checks if a subblock key should have its array items normalized with UUIDs. + */ +export function shouldNormalizeArrayIds(key: string): boolean { + return ARRAY_WITH_ID_SUBBLOCK_TYPES.has(key) +} + +/** + * Normalize responseFormat to ensure consistent storage + * Handles both string (JSON) and object formats + * Returns pretty-printed JSON for better UI readability + */ +export function normalizeResponseFormat(value: any): string { + try { + let obj = value + + // If it's already a string, parse it first + if (typeof value === 'string') { + const trimmed = value.trim() + if (!trimmed) { + return '' + } + obj = JSON.parse(trimmed) + } + + // If it's an object, stringify it with consistent formatting + if (obj && typeof obj === 'object') { + // Sort keys recursively for consistent comparison + const sortKeys = (item: any): any => { + if (Array.isArray(item)) { + return item.map(sortKeys) + } + if (item !== null && typeof item === 'object') { + return Object.keys(item) + .sort() + .reduce((result: any, key: string) => { + result[key] = sortKeys(item[key]) + return result + }, {}) + } + return item + } + + // Return pretty-printed with 2-space indentation for UI readability + // The sanitizer will normalize it to minified format for comparison + return JSON.stringify(sortKeys(obj), null, 2) + } + + return String(value) + } catch { + // If parsing fails, return the original value as string + return String(value) + } +} + +/** + * Creates a validated edge between two blocks. + * Returns true if edge was created, false if skipped due to validation errors. + */ +export function createValidatedEdge( + modifiedState: any, + sourceBlockId: string, + targetBlockId: string, + sourceHandle: string, + targetHandle: string, + operationType: string, + logger: ReturnType, + skippedItems?: SkippedItem[] +): boolean { + if (!modifiedState.blocks[targetBlockId]) { + logger.warn(`Target block "${targetBlockId}" not found. Edge skipped.`, { + sourceBlockId, + targetBlockId, + sourceHandle, + }) + skippedItems?.push({ + type: 'invalid_edge_target', + operationType, + blockId: sourceBlockId, + reason: `Edge from "${sourceBlockId}" to "${targetBlockId}" skipped - target block does not exist`, + details: { sourceHandle, targetHandle, targetId: targetBlockId }, + }) + return false + } + + const sourceBlock = modifiedState.blocks[sourceBlockId] + if (!sourceBlock) { + logger.warn(`Source block "${sourceBlockId}" not found. Edge skipped.`, { + sourceBlockId, + targetBlockId, + }) + skippedItems?.push({ + type: 'invalid_edge_source', + operationType, + blockId: sourceBlockId, + reason: `Edge from "${sourceBlockId}" to "${targetBlockId}" skipped - source block does not exist`, + details: { sourceHandle, targetHandle, targetId: targetBlockId }, + }) + return false + } + + const sourceBlockType = sourceBlock.type + if (!sourceBlockType) { + logger.warn(`Source block "${sourceBlockId}" has no type. Edge skipped.`, { + sourceBlockId, + targetBlockId, + }) + skippedItems?.push({ + type: 'invalid_edge_source', + operationType, + blockId: sourceBlockId, + reason: `Edge from "${sourceBlockId}" to "${targetBlockId}" skipped - source block has no type`, + details: { sourceHandle, targetHandle, targetId: targetBlockId }, + }) + return false + } + + const sourceValidation = validateSourceHandleForBlock(sourceHandle, sourceBlockType, sourceBlock) + if (!sourceValidation.valid) { + logger.warn(`Invalid source handle. Edge skipped.`, { + sourceBlockId, + targetBlockId, + sourceHandle, + error: sourceValidation.error, + }) + skippedItems?.push({ + type: 'invalid_source_handle', + operationType, + blockId: sourceBlockId, + reason: sourceValidation.error || `Invalid source handle "${sourceHandle}"`, + details: { sourceHandle, targetHandle, targetId: targetBlockId }, + }) + return false + } + + const targetValidation = validateTargetHandle(targetHandle) + if (!targetValidation.valid) { + logger.warn(`Invalid target handle. Edge skipped.`, { + sourceBlockId, + targetBlockId, + targetHandle, + error: targetValidation.error, + }) + skippedItems?.push({ + type: 'invalid_target_handle', + operationType, + blockId: sourceBlockId, + reason: targetValidation.error || `Invalid target handle "${targetHandle}"`, + details: { sourceHandle, targetHandle, targetId: targetBlockId }, + }) + return false + } + + // Use normalized handle if available (e.g., 'if' -> 'condition-{uuid}') + const finalSourceHandle = sourceValidation.normalizedHandle || sourceHandle + + modifiedState.edges.push({ + id: crypto.randomUUID(), + source: sourceBlockId, + sourceHandle: finalSourceHandle, + target: targetBlockId, + targetHandle, + type: 'default', + }) + return true +} + +/** + * Adds connections as edges for a block. + * Supports multiple target formats: + * - String: "target-block-id" + * - Object: { block: "target-block-id", handle?: "custom-target-handle" } + * - Array of strings or objects + */ +export function addConnectionsAsEdges( + modifiedState: any, + blockId: string, + connections: Record, + logger: ReturnType, + skippedItems?: SkippedItem[] +): void { + Object.entries(connections).forEach(([sourceHandle, targets]) => { + if (targets === null) return + + const addEdgeForTarget = (targetBlock: string, targetHandle?: string) => { + createValidatedEdge( + modifiedState, + blockId, + targetBlock, + sourceHandle, + targetHandle || 'target', + 'add_edge', + logger, + skippedItems + ) + } + + if (typeof targets === 'string') { + addEdgeForTarget(targets) + } else if (Array.isArray(targets)) { + targets.forEach((target: any) => { + if (typeof target === 'string') { + addEdgeForTarget(target) + } else if (target?.block) { + addEdgeForTarget(target.block, target.handle) + } + }) + } else if (typeof targets === 'object' && targets?.block) { + addEdgeForTarget(targets.block, targets.handle) + } + }) +} + +export function applyTriggerConfigToBlockSubblocks(block: any, triggerConfig: Record) { + if (!block?.subBlocks || !triggerConfig || typeof triggerConfig !== 'object') { + return + } + + Object.entries(triggerConfig).forEach(([configKey, configValue]) => { + const existingSubblock = block.subBlocks[configKey] + if (existingSubblock) { + const existingValue = existingSubblock.value + const valuesEqual = + typeof existingValue === 'object' || typeof configValue === 'object' + ? JSON.stringify(existingValue) === JSON.stringify(configValue) + : existingValue === configValue + + if (valuesEqual) { + return + } + + block.subBlocks[configKey] = { + ...existingSubblock, + value: configValue, + } + } else { + block.subBlocks[configKey] = { + id: configKey, + type: 'short-input', + value: configValue, + } + } + }) +} + +/** + * Filters out tools that are not allowed by the permission group config + * Returns both the allowed tools and any skipped tool items for logging + */ +export function filterDisallowedTools( + tools: any[], + permissionConfig: PermissionGroupConfig | null, + blockId: string, + skippedItems: SkippedItem[] +): any[] { + if (!permissionConfig) { + return tools + } + + const allowedTools: any[] = [] + + for (const tool of tools) { + if (tool.type === 'custom-tool' && permissionConfig.disableCustomTools) { + logSkippedItem(skippedItems, { + type: 'tool_not_allowed', + operationType: 'add', + blockId, + reason: `Custom tool "${tool.title || tool.customToolId || 'unknown'}" is not allowed by permission group - tool not added`, + details: { toolType: 'custom-tool', toolId: tool.customToolId }, + }) + continue + } + if (tool.type === 'mcp' && permissionConfig.disableMcpTools) { + logSkippedItem(skippedItems, { + type: 'tool_not_allowed', + operationType: 'add', + blockId, + reason: `MCP tool "${tool.title || 'unknown'}" is not allowed by permission group - tool not added`, + details: { toolType: 'mcp', serverId: tool.params?.serverId }, + }) + continue + } + allowedTools.push(tool) + } + + return allowedTools +} + +/** + * Normalizes block IDs in operations to ensure they are valid UUIDs. + * The LLM may generate human-readable IDs like "web_search" or "research_agent" + * which need to be converted to proper UUIDs for database compatibility. + * + * Returns the normalized operations and a mapping from old IDs to new UUIDs. + */ +export function normalizeBlockIdsInOperations(operations: EditWorkflowOperation[]): { + normalizedOperations: EditWorkflowOperation[] + idMapping: Map +} { + const logger = createLogger('EditWorkflowServerTool') + const idMapping = new Map() + + // First pass: collect all non-UUID block_ids from add/insert operations + for (const op of operations) { + if (op.operation_type === 'add' || op.operation_type === 'insert_into_subflow') { + if (op.block_id && !UUID_REGEX.test(op.block_id)) { + const newId = crypto.randomUUID() + idMapping.set(op.block_id, newId) + logger.debug('Normalizing block ID', { oldId: op.block_id, newId }) + } + } + } + + if (idMapping.size === 0) { + return { normalizedOperations: operations, idMapping } + } + + logger.info('Normalizing block IDs in operations', { + normalizedCount: idMapping.size, + mappings: Object.fromEntries(idMapping), + }) + + // Helper to replace an ID if it's in the mapping + const replaceId = (id: string | undefined): string | undefined => { + if (!id) return id + return idMapping.get(id) ?? id + } + + // Second pass: update all references to use new UUIDs + const normalizedOperations = operations.map((op) => { + const normalized: EditWorkflowOperation = { + ...op, + block_id: replaceId(op.block_id) ?? op.block_id, + } + + if (op.params) { + normalized.params = { ...op.params } + + // Update subflowId references (for insert_into_subflow) + if (normalized.params.subflowId) { + normalized.params.subflowId = replaceId(normalized.params.subflowId) + } + + // Update connection references + if (normalized.params.connections) { + const normalizedConnections: Record = {} + for (const [handle, targets] of Object.entries(normalized.params.connections)) { + if (typeof targets === 'string') { + normalizedConnections[handle] = replaceId(targets) + } else if (Array.isArray(targets)) { + normalizedConnections[handle] = targets.map((t) => { + if (typeof t === 'string') return replaceId(t) + if (t && typeof t === 'object' && t.block) { + return { ...t, block: replaceId(t.block) } + } + return t + }) + } else if (targets && typeof targets === 'object' && (targets as any).block) { + normalizedConnections[handle] = { ...targets, block: replaceId((targets as any).block) } + } else { + normalizedConnections[handle] = targets + } + } + normalized.params.connections = normalizedConnections + } + + // Update nestedNodes block IDs + if (normalized.params.nestedNodes) { + const normalizedNestedNodes: Record = {} + for (const [childId, childBlock] of Object.entries(normalized.params.nestedNodes)) { + const newChildId = replaceId(childId) ?? childId + normalizedNestedNodes[newChildId] = childBlock + } + normalized.params.nestedNodes = normalizedNestedNodes + } + } + + return normalized + }) + + return { normalizedOperations, idMapping } +} diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/engine.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/engine.ts new file mode 100644 index 000000000..7bb5d4c0d --- /dev/null +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/engine.ts @@ -0,0 +1,274 @@ +import { createLogger } from '@sim/logger' +import type { PermissionGroupConfig } from '@/lib/permission-groups/types' +import { isValidKey } from '@/lib/workflows/sanitization/key-validation' +import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils' +import { addConnectionsAsEdges, normalizeBlockIdsInOperations } from './builders' +import { + handleAddOperation, + handleDeleteOperation, + handleEditOperation, + handleExtractFromSubflowOperation, + handleInsertIntoSubflowOperation, +} from './operations' +import type { + ApplyOperationsResult, + EditWorkflowOperation, + OperationContext, + ValidationError, +} from './types' +import { logSkippedItem, type SkippedItem } from './types' + +const logger = createLogger('EditWorkflowServerTool') + +type OperationHandler = (op: EditWorkflowOperation, ctx: OperationContext) => void + +const OPERATION_HANDLERS: Record = { + delete: handleDeleteOperation, + extract_from_subflow: handleExtractFromSubflowOperation, + add: handleAddOperation, + insert_into_subflow: handleInsertIntoSubflowOperation, + edit: handleEditOperation, +} + +/** + * Topologically sort insert operations to ensure parents are created before children + * Returns sorted array where parent inserts always come before child inserts + */ +export function topologicalSortInserts( + inserts: EditWorkflowOperation[], + adds: EditWorkflowOperation[] +): EditWorkflowOperation[] { + if (inserts.length === 0) return [] + + // Build a map of blockId -> operation for quick lookup + const insertMap = new Map() + inserts.forEach((op) => insertMap.set(op.block_id, op)) + + // Build a set of blocks being added (potential parents) + const addedBlocks = new Set(adds.map((op) => op.block_id)) + + // Build dependency graph: block -> blocks that depend on it + const dependents = new Map>() + const dependencies = new Map>() + + inserts.forEach((op) => { + const blockId = op.block_id + const parentId = op.params?.subflowId + + dependencies.set(blockId, new Set()) + + if (parentId) { + // Track dependency if parent is being inserted OR being added + // This ensures children wait for parents regardless of operation type + const parentBeingCreated = insertMap.has(parentId) || addedBlocks.has(parentId) + + if (parentBeingCreated) { + // Only add dependency if parent is also being inserted (not added) + // Because adds run before inserts, added parents are already created + if (insertMap.has(parentId)) { + dependencies.get(blockId)!.add(parentId) + if (!dependents.has(parentId)) { + dependents.set(parentId, new Set()) + } + dependents.get(parentId)!.add(blockId) + } + } + } + }) + + // Topological sort using Kahn's algorithm + const sorted: EditWorkflowOperation[] = [] + const queue: string[] = [] + + // Start with nodes that have no dependencies (or depend only on added blocks) + inserts.forEach((op) => { + const deps = dependencies.get(op.block_id)! + if (deps.size === 0) { + queue.push(op.block_id) + } + }) + + while (queue.length > 0) { + const blockId = queue.shift()! + const op = insertMap.get(blockId) + if (op) { + sorted.push(op) + } + + // Remove this node from dependencies of others + const children = dependents.get(blockId) + if (children) { + children.forEach((childId) => { + const childDeps = dependencies.get(childId)! + childDeps.delete(blockId) + if (childDeps.size === 0) { + queue.push(childId) + } + }) + } + } + + // If sorted length doesn't match input, there's a cycle (shouldn't happen with valid operations) + // Just append remaining operations + if (sorted.length < inserts.length) { + inserts.forEach((op) => { + if (!sorted.includes(op)) { + sorted.push(op) + } + }) + } + + return sorted +} + +function orderOperations(operations: EditWorkflowOperation[]): EditWorkflowOperation[] { + /** + * Reorder operations to ensure correct execution sequence: + * 1. delete - Remove blocks first to free up IDs and clean state + * 2. extract_from_subflow - Extract blocks from subflows before modifications + * 3. add - Create new blocks (sorted by connection dependencies) + * 4. insert_into_subflow - Insert blocks into subflows (sorted by parent dependency) + * 5. edit - Edit existing blocks last, so connections to newly added blocks work + */ + const deletes = operations.filter((op) => op.operation_type === 'delete') + const extracts = operations.filter((op) => op.operation_type === 'extract_from_subflow') + const adds = operations.filter((op) => op.operation_type === 'add') + const inserts = operations.filter((op) => op.operation_type === 'insert_into_subflow') + const edits = operations.filter((op) => op.operation_type === 'edit') + + // Sort insert operations to ensure parents are inserted before children + const sortedInserts = topologicalSortInserts(inserts, adds) + + return [...deletes, ...extracts, ...adds, ...sortedInserts, ...edits] +} + +/** + * Apply operations directly to the workflow JSON state + */ +export function applyOperationsToWorkflowState( + workflowState: Record, + operations: EditWorkflowOperation[], + permissionConfig: PermissionGroupConfig | null = null +): ApplyOperationsResult { + // Deep clone the workflow state to avoid mutations + const modifiedState = JSON.parse(JSON.stringify(workflowState)) + + // Collect validation errors across all operations + const validationErrors: ValidationError[] = [] + + // Collect skipped items across all operations + const skippedItems: SkippedItem[] = [] + + // Normalize block IDs to UUIDs before processing + const { normalizedOperations } = normalizeBlockIdsInOperations(operations) + + // Order operations for deterministic application + const orderedOperations = orderOperations(normalizedOperations) + + logger.info('Applying operations to workflow:', { + totalOperations: orderedOperations.length, + operationTypes: orderedOperations.reduce((acc: Record, op) => { + acc[op.operation_type] = (acc[op.operation_type] || 0) + 1 + return acc + }, {}), + initialBlockCount: Object.keys((modifiedState as any).blocks || {}).length, + }) + + const ctx: OperationContext = { + modifiedState, + skippedItems, + validationErrors, + permissionConfig, + deferredConnections: [], + } + + for (const operation of orderedOperations) { + const { operation_type, block_id } = operation + + // CRITICAL: Validate block_id is a valid string and not "undefined" + // This prevents undefined keys from being set in the workflow state + if (!isValidKey(block_id)) { + logSkippedItem(skippedItems, { + type: 'missing_required_params', + operationType: operation_type, + blockId: String(block_id || 'invalid'), + reason: `Invalid block_id "${block_id}" (type: ${typeof block_id}) - operation skipped. Block IDs must be valid non-empty strings.`, + }) + logger.error('Invalid block_id detected in operation', { + operation_type, + block_id, + block_id_type: typeof block_id, + }) + continue + } + + const handler = OPERATION_HANDLERS[operation_type] + if (!handler) continue + + logger.debug(`Executing operation: ${operation_type} for block ${block_id}`, { + params: operation.params ? Object.keys(operation.params) : [], + currentBlockCount: Object.keys((modifiedState as any).blocks || {}).length, + }) + + handler(operation, ctx) + } + + // Pass 2: Add all deferred connections from add/insert operations + // Now all blocks exist, so connections can be safely created + if (ctx.deferredConnections.length > 0) { + logger.info('Processing deferred connections from add/insert operations', { + deferredConnectionCount: ctx.deferredConnections.length, + totalBlocks: Object.keys((modifiedState as any).blocks || {}).length, + }) + + for (const { blockId, connections } of ctx.deferredConnections) { + // Verify the source block still exists (it might have been deleted by a later operation) + if (!(modifiedState as any).blocks[blockId]) { + logger.warn('Source block no longer exists for deferred connection', { + blockId, + availableBlocks: Object.keys((modifiedState as any).blocks || {}), + }) + continue + } + + addConnectionsAsEdges(modifiedState, blockId, connections, logger, skippedItems) + } + + logger.info('Finished processing deferred connections', { + totalEdges: (modifiedState as any).edges?.length, + }) + } + // Regenerate loops and parallels after modifications + + ;(modifiedState as any).loops = generateLoopBlocks((modifiedState as any).blocks) + ;(modifiedState as any).parallels = generateParallelBlocks((modifiedState as any).blocks) + + // Validate all blocks have types before returning + const blocksWithoutType = Object.entries((modifiedState as any).blocks || {}) + .filter(([_, block]: [string, any]) => !block.type || block.type === undefined) + .map(([id, block]: [string, any]) => ({ id, block })) + + if (blocksWithoutType.length > 0) { + logger.error('Blocks without type after operations:', { + blocksWithoutType: blocksWithoutType.map(({ id, block }) => ({ + id, + type: block.type, + name: block.name, + keys: Object.keys(block), + })), + }) + + // Attempt to fix by removing type-less blocks + blocksWithoutType.forEach(({ id }) => { + delete (modifiedState as any).blocks[id] + }) + + // Remove edges connected to removed blocks + const removedIds = new Set(blocksWithoutType.map(({ id }) => id)) + ;(modifiedState as any).edges = ((modifiedState as any).edges || []).filter( + (edge: any) => !removedIds.has(edge.source) && !removedIds.has(edge.target) + ) + } + + return { state: modifiedState, validationErrors, skippedItems } +} diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/index.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/index.ts new file mode 100644 index 000000000..5532c404a --- /dev/null +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/index.ts @@ -0,0 +1,285 @@ +import { db } from '@sim/db' +import { workflow as workflowTable } from '@sim/db/schema' +import { createLogger } from '@sim/logger' +import { eq } from 'drizzle-orm' +import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' +import { applyAutoLayout } from '@/lib/workflows/autolayout' +import { extractAndPersistCustomTools } from '@/lib/workflows/persistence/custom-tools-persistence' +import { + loadWorkflowFromNormalizedTables, + saveWorkflowToNormalizedTables, +} from '@/lib/workflows/persistence/utils' +import { validateWorkflowState } from '@/lib/workflows/sanitization/validation' +import { getUserPermissionConfig } from '@/ee/access-control/utils/permission-check' +import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils' +import { applyOperationsToWorkflowState } from './engine' +import type { EditWorkflowParams, ValidationError } from './types' +import { preValidateCredentialInputs, validateWorkflowSelectorIds } from './validation' + +async function getCurrentWorkflowStateFromDb( + workflowId: string +): Promise<{ workflowState: any; subBlockValues: Record> }> { + const logger = createLogger('EditWorkflowServerTool') + const [workflowRecord] = await db + .select() + .from(workflowTable) + .where(eq(workflowTable.id, workflowId)) + .limit(1) + if (!workflowRecord) throw new Error(`Workflow ${workflowId} not found in database`) + const normalized = await loadWorkflowFromNormalizedTables(workflowId) + if (!normalized) throw new Error('Workflow has no normalized data') + + // Validate and fix blocks without types + const blocks = { ...normalized.blocks } + const invalidBlocks: string[] = [] + + Object.entries(blocks).forEach(([id, block]: [string, any]) => { + if (!block.type) { + logger.warn(`Block ${id} loaded without type from database`, { + blockKeys: Object.keys(block), + blockName: block.name, + }) + invalidBlocks.push(id) + } + }) + + // Remove invalid blocks + invalidBlocks.forEach((id) => delete blocks[id]) + + // Remove edges connected to invalid blocks + const edges = normalized.edges.filter( + (edge: any) => !invalidBlocks.includes(edge.source) && !invalidBlocks.includes(edge.target) + ) + + const workflowState: any = { + blocks, + edges, + loops: normalized.loops || {}, + parallels: normalized.parallels || {}, + } + const subBlockValues: Record> = {} + Object.entries(normalized.blocks).forEach(([blockId, block]) => { + subBlockValues[blockId] = {} + Object.entries((block as any).subBlocks || {}).forEach(([subId, sub]) => { + if ((sub as any).value !== undefined) subBlockValues[blockId][subId] = (sub as any).value + }) + }) + return { workflowState, subBlockValues } +} + +export const editWorkflowServerTool: BaseServerTool = { + name: 'edit_workflow', + async execute(params: EditWorkflowParams, context?: { userId: string }): Promise { + const logger = createLogger('EditWorkflowServerTool') + const { operations, workflowId, currentUserWorkflow } = params + if (!Array.isArray(operations) || operations.length === 0) { + throw new Error('operations are required and must be an array') + } + if (!workflowId) throw new Error('workflowId is required') + + logger.info('Executing edit_workflow', { + operationCount: operations.length, + workflowId, + hasCurrentUserWorkflow: !!currentUserWorkflow, + }) + + // Get current workflow state + let workflowState: any + if (currentUserWorkflow) { + try { + workflowState = JSON.parse(currentUserWorkflow) + } catch (error) { + logger.error('Failed to parse currentUserWorkflow', error) + throw new Error('Invalid currentUserWorkflow format') + } + } else { + const fromDb = await getCurrentWorkflowStateFromDb(workflowId) + workflowState = fromDb.workflowState + } + + // Get permission config for the user + const permissionConfig = context?.userId ? await getUserPermissionConfig(context.userId) : null + + // Pre-validate credential and apiKey inputs before applying operations + // This filters out invalid credentials and apiKeys for hosted models + let operationsToApply = operations + const credentialErrors: ValidationError[] = [] + if (context?.userId) { + const { filteredOperations, errors: credErrors } = await preValidateCredentialInputs( + operations, + { userId: context.userId }, + workflowState + ) + operationsToApply = filteredOperations + credentialErrors.push(...credErrors) + } + + // Apply operations directly to the workflow state + const { + state: modifiedWorkflowState, + validationErrors, + skippedItems, + } = applyOperationsToWorkflowState(workflowState, operationsToApply, permissionConfig) + + // Add credential validation errors + validationErrors.push(...credentialErrors) + + // Get workspaceId for selector validation + let workspaceId: string | undefined + try { + const [workflowRecord] = await db + .select({ workspaceId: workflowTable.workspaceId }) + .from(workflowTable) + .where(eq(workflowTable.id, workflowId)) + .limit(1) + workspaceId = workflowRecord?.workspaceId ?? undefined + } catch (error) { + logger.warn('Failed to get workspaceId for selector validation', { error, workflowId }) + } + + // Validate selector IDs exist in the database + if (context?.userId) { + try { + const selectorErrors = await validateWorkflowSelectorIds(modifiedWorkflowState, { + userId: context.userId, + workspaceId, + }) + validationErrors.push(...selectorErrors) + } catch (error) { + logger.warn('Selector ID validation failed', { + error: error instanceof Error ? error.message : String(error), + }) + } + } + + // Validate the workflow state + const validation = validateWorkflowState(modifiedWorkflowState, { sanitize: true }) + + if (!validation.valid) { + logger.error('Edited workflow state is invalid', { + errors: validation.errors, + warnings: validation.warnings, + }) + throw new Error(`Invalid edited workflow: ${validation.errors.join('; ')}`) + } + + if (validation.warnings.length > 0) { + logger.warn('Edited workflow validation warnings', { + warnings: validation.warnings, + }) + } + + // Extract and persist custom tools to database (reuse workspaceId from selector validation) + if (context?.userId && workspaceId) { + try { + const finalWorkflowState = validation.sanitizedState || modifiedWorkflowState + const { saved, errors } = await extractAndPersistCustomTools( + finalWorkflowState, + workspaceId, + context.userId + ) + + if (saved > 0) { + logger.info(`Persisted ${saved} custom tool(s) to database`, { workflowId }) + } + + if (errors.length > 0) { + logger.warn('Some custom tools failed to persist', { errors, workflowId }) + } + } catch (error) { + logger.error('Failed to persist custom tools', { error, workflowId }) + } + } else if (context?.userId && !workspaceId) { + logger.warn('Workflow has no workspaceId, skipping custom tools persistence', { + workflowId, + }) + } else { + logger.warn('No userId in context - skipping custom tools persistence', { workflowId }) + } + + logger.info('edit_workflow successfully applied operations', { + operationCount: operations.length, + blocksCount: Object.keys(modifiedWorkflowState.blocks).length, + edgesCount: modifiedWorkflowState.edges.length, + inputValidationErrors: validationErrors.length, + skippedItemsCount: skippedItems.length, + schemaValidationErrors: validation.errors.length, + validationWarnings: validation.warnings.length, + }) + + // Format validation errors for LLM feedback + const inputErrors = + validationErrors.length > 0 + ? validationErrors.map((e) => `Block "${e.blockId}" (${e.blockType}): ${e.error}`) + : undefined + + // Format skipped items for LLM feedback + const skippedMessages = + skippedItems.length > 0 ? skippedItems.map((item) => item.reason) : undefined + + // Persist the workflow state to the database + const finalWorkflowState = validation.sanitizedState || modifiedWorkflowState + + // Apply autolayout to position blocks properly + const layoutResult = applyAutoLayout(finalWorkflowState.blocks, finalWorkflowState.edges, { + horizontalSpacing: 250, + verticalSpacing: 100, + padding: { x: 100, y: 100 }, + }) + + const layoutedBlocks = + layoutResult.success && layoutResult.blocks ? layoutResult.blocks : finalWorkflowState.blocks + + if (!layoutResult.success) { + logger.warn('Autolayout failed, using default positions', { + workflowId, + error: layoutResult.error, + }) + } + + const workflowStateForDb = { + blocks: layoutedBlocks, + edges: finalWorkflowState.edges, + loops: generateLoopBlocks(layoutedBlocks as any), + parallels: generateParallelBlocks(layoutedBlocks as any), + lastSaved: Date.now(), + isDeployed: false, + } + + const saveResult = await saveWorkflowToNormalizedTables(workflowId, workflowStateForDb as any) + if (!saveResult.success) { + logger.error('Failed to persist workflow state to database', { + workflowId, + error: saveResult.error, + }) + throw new Error(`Failed to save workflow: ${saveResult.error}`) + } + + // Update workflow's lastSynced timestamp + await db + .update(workflowTable) + .set({ + lastSynced: new Date(), + updatedAt: new Date(), + }) + .where(eq(workflowTable.id, workflowId)) + + logger.info('Workflow state persisted to database', { workflowId }) + + // Return the modified workflow state with autolayout applied + return { + success: true, + workflowState: { ...finalWorkflowState, blocks: layoutedBlocks }, + // Include input validation errors so the LLM can see what was rejected + ...(inputErrors && { + inputValidationErrors: inputErrors, + inputValidationMessage: `${inputErrors.length} input(s) were rejected due to validation errors. The workflow was still updated with valid inputs only. Errors: ${inputErrors.join('; ')}`, + }), + // Include skipped items so the LLM can see what operations were skipped + ...(skippedMessages && { + skippedItems: skippedMessages, + skippedItemsMessage: `${skippedItems.length} operation(s) were skipped due to invalid references. Details: ${skippedMessages.join('; ')}`, + }), + } + }, +} diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/operations.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/operations.ts new file mode 100644 index 000000000..58b3b1ab5 --- /dev/null +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/operations.ts @@ -0,0 +1,1017 @@ +import { createLogger } from '@sim/logger' +import { isValidKey } from '@/lib/workflows/sanitization/key-validation' +import { TriggerUtils } from '@/lib/workflows/triggers/triggers' +import { getBlock } from '@/blocks/registry' +import { normalizeName, RESERVED_BLOCK_NAMES } from '@/executor/constants' +import { TRIGGER_RUNTIME_SUBBLOCK_IDS } from '@/triggers/constants' +import { + addConnectionsAsEdges, + applyTriggerConfigToBlockSubblocks, + createBlockFromParams, + createValidatedEdge, + filterDisallowedTools, + normalizeArrayWithIds, + normalizeResponseFormat, + normalizeTools, + shouldNormalizeArrayIds, + updateCanonicalModesForInputs, +} from './builders' +import type { EditWorkflowOperation, OperationContext } from './types' +import { logSkippedItem } from './types' +import { + findBlockWithDuplicateNormalizedName, + isBlockTypeAllowed, + validateInputsForBlock, +} from './validation' + +const logger = createLogger('EditWorkflowServerTool') + +export function handleDeleteOperation(op: EditWorkflowOperation, ctx: OperationContext): void { + const { modifiedState, skippedItems } = ctx + const { block_id } = op + + if (!modifiedState.blocks[block_id]) { + logSkippedItem(skippedItems, { + type: 'block_not_found', + operationType: 'delete', + blockId: block_id, + reason: `Block "${block_id}" does not exist and cannot be deleted`, + }) + return + } + + // Check if block is locked or inside a locked container + const deleteBlock = modifiedState.blocks[block_id] + const deleteParentId = deleteBlock.data?.parentId as string | undefined + const deleteParentLocked = deleteParentId ? modifiedState.blocks[deleteParentId]?.locked : false + if (deleteBlock.locked || deleteParentLocked) { + logSkippedItem(skippedItems, { + type: 'block_locked', + operationType: 'delete', + blockId: block_id, + reason: deleteParentLocked + ? `Block "${block_id}" is inside locked container "${deleteParentId}" and cannot be deleted` + : `Block "${block_id}" is locked and cannot be deleted`, + }) + return + } + + // Find all child blocks to remove + const blocksToRemove = new Set([block_id]) + const findChildren = (parentId: string) => { + Object.entries(modifiedState.blocks).forEach(([childId, child]: [string, any]) => { + if (child.data?.parentId === parentId) { + blocksToRemove.add(childId) + findChildren(childId) + } + }) + } + findChildren(block_id) + + // Remove blocks + blocksToRemove.forEach((id) => delete modifiedState.blocks[id]) + + // Remove edges connected to deleted blocks + modifiedState.edges = modifiedState.edges.filter( + (edge: any) => !blocksToRemove.has(edge.source) && !blocksToRemove.has(edge.target) + ) +} + +export function handleEditOperation(op: EditWorkflowOperation, ctx: OperationContext): void { + const { modifiedState, skippedItems, validationErrors, permissionConfig } = ctx + const { block_id, params } = op + + if (!modifiedState.blocks[block_id]) { + logSkippedItem(skippedItems, { + type: 'block_not_found', + operationType: 'edit', + blockId: block_id, + reason: `Block "${block_id}" does not exist and cannot be edited`, + }) + return + } + + const block = modifiedState.blocks[block_id] + + // Check if block is locked or inside a locked container + const editParentId = block.data?.parentId as string | undefined + const editParentLocked = editParentId ? modifiedState.blocks[editParentId]?.locked : false + if (block.locked || editParentLocked) { + logSkippedItem(skippedItems, { + type: 'block_locked', + operationType: 'edit', + blockId: block_id, + reason: editParentLocked + ? `Block "${block_id}" is inside locked container "${editParentId}" and cannot be edited` + : `Block "${block_id}" is locked and cannot be edited`, + }) + return + } + + // Ensure block has essential properties + if (!block.type) { + logger.warn(`Block ${block_id} missing type property, skipping edit`, { + blockKeys: Object.keys(block), + blockData: JSON.stringify(block), + }) + logSkippedItem(skippedItems, { + type: 'block_not_found', + operationType: 'edit', + blockId: block_id, + reason: `Block "${block_id}" exists but has no type property`, + }) + return + } + + // Update inputs (convert to subBlocks format) + if (params?.inputs) { + if (!block.subBlocks) block.subBlocks = {} + + // Validate inputs against block configuration + const validationResult = validateInputsForBlock(block.type, params.inputs, block_id) + validationErrors.push(...validationResult.errors) + + Object.entries(validationResult.validInputs).forEach(([inputKey, value]) => { + // Normalize common field name variations (LLM may use plural/singular inconsistently) + let key = inputKey + if (key === 'credentials' && !block.subBlocks.credentials && block.subBlocks.credential) { + key = 'credential' + } + + if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { + return + } + let sanitizedValue = value + + // Normalize array subblocks with id fields (inputFormat, table rows, etc.) + if (shouldNormalizeArrayIds(key)) { + sanitizedValue = normalizeArrayWithIds(value) + } + + // Special handling for tools - normalize and filter disallowed + if (key === 'tools' && Array.isArray(value)) { + sanitizedValue = filterDisallowedTools( + normalizeTools(value), + permissionConfig, + block_id, + skippedItems + ) + } + + // Special handling for responseFormat - normalize to ensure consistent format + if (key === 'responseFormat' && value) { + sanitizedValue = normalizeResponseFormat(value) + } + + if (!block.subBlocks[key]) { + block.subBlocks[key] = { + id: key, + type: 'short-input', + value: sanitizedValue, + } + } else { + const existingValue = block.subBlocks[key].value + const valuesEqual = + typeof existingValue === 'object' || typeof sanitizedValue === 'object' + ? JSON.stringify(existingValue) === JSON.stringify(sanitizedValue) + : existingValue === sanitizedValue + + if (!valuesEqual) { + block.subBlocks[key].value = sanitizedValue + } + } + }) + + if ( + Object.hasOwn(params.inputs, 'triggerConfig') && + block.subBlocks.triggerConfig && + typeof block.subBlocks.triggerConfig.value === 'object' + ) { + applyTriggerConfigToBlockSubblocks(block, block.subBlocks.triggerConfig.value) + } + + // Update loop/parallel configuration in block.data (strict validation) + if (block.type === 'loop') { + block.data = block.data || {} + // loopType is always valid + if (params.inputs.loopType !== undefined) { + const validLoopTypes = ['for', 'forEach', 'while', 'doWhile'] + if (validLoopTypes.includes(params.inputs.loopType)) { + block.data.loopType = params.inputs.loopType + } + } + const effectiveLoopType = params.inputs.loopType ?? block.data.loopType ?? 'for' + // iterations only valid for 'for' loopType + if (params.inputs.iterations !== undefined && effectiveLoopType === 'for') { + block.data.count = params.inputs.iterations + } + // collection only valid for 'forEach' loopType + if (params.inputs.collection !== undefined && effectiveLoopType === 'forEach') { + block.data.collection = params.inputs.collection + } + // condition only valid for 'while' or 'doWhile' loopType + if ( + params.inputs.condition !== undefined && + (effectiveLoopType === 'while' || effectiveLoopType === 'doWhile') + ) { + if (effectiveLoopType === 'doWhile') { + block.data.doWhileCondition = params.inputs.condition + } else { + block.data.whileCondition = params.inputs.condition + } + } + } else if (block.type === 'parallel') { + block.data = block.data || {} + // parallelType is always valid + if (params.inputs.parallelType !== undefined) { + const validParallelTypes = ['count', 'collection'] + if (validParallelTypes.includes(params.inputs.parallelType)) { + block.data.parallelType = params.inputs.parallelType + } + } + const effectiveParallelType = params.inputs.parallelType ?? block.data.parallelType ?? 'count' + // count only valid for 'count' parallelType + if (params.inputs.count !== undefined && effectiveParallelType === 'count') { + block.data.count = params.inputs.count + } + // collection only valid for 'collection' parallelType + if (params.inputs.collection !== undefined && effectiveParallelType === 'collection') { + block.data.collection = params.inputs.collection + } + } + + const editBlockConfig = getBlock(block.type) + if (editBlockConfig) { + updateCanonicalModesForInputs( + block, + Object.keys(validationResult.validInputs), + editBlockConfig + ) + } + } + + // Update basic properties + if (params?.type !== undefined) { + // Special container types (loop, parallel) are not in the block registry but are valid + const isContainerType = params.type === 'loop' || params.type === 'parallel' + + // Validate type before setting (skip validation for container types) + const blockConfig = getBlock(params.type) + if (!blockConfig && !isContainerType) { + logSkippedItem(skippedItems, { + type: 'invalid_block_type', + operationType: 'edit', + blockId: block_id, + reason: `Invalid block type "${params.type}" - type change skipped`, + details: { requestedType: params.type }, + }) + } else if (!isContainerType && !isBlockTypeAllowed(params.type, permissionConfig)) { + logSkippedItem(skippedItems, { + type: 'block_not_allowed', + operationType: 'edit', + blockId: block_id, + reason: `Block type "${params.type}" is not allowed by permission group - type change skipped`, + details: { requestedType: params.type }, + }) + } else { + block.type = params.type + } + } + if (params?.name !== undefined) { + const normalizedName = normalizeName(params.name) + if (!normalizedName) { + logSkippedItem(skippedItems, { + type: 'missing_required_params', + operationType: 'edit', + blockId: block_id, + reason: `Cannot rename to empty name`, + details: { requestedName: params.name }, + }) + } else if ((RESERVED_BLOCK_NAMES as readonly string[]).includes(normalizedName)) { + logSkippedItem(skippedItems, { + type: 'reserved_block_name', + operationType: 'edit', + blockId: block_id, + reason: `Cannot rename to "${params.name}" - this is a reserved name`, + details: { requestedName: params.name }, + }) + } else { + const conflictingBlock = findBlockWithDuplicateNormalizedName( + modifiedState.blocks, + params.name, + block_id + ) + + if (conflictingBlock) { + logSkippedItem(skippedItems, { + type: 'duplicate_block_name', + operationType: 'edit', + blockId: block_id, + reason: `Cannot rename to "${params.name}" - conflicts with "${conflictingBlock[1].name}"`, + details: { + requestedName: params.name, + conflictingBlockId: conflictingBlock[0], + conflictingBlockName: conflictingBlock[1].name, + }, + }) + } else { + block.name = params.name + } + } + } + + // Handle trigger mode toggle + if (typeof params?.triggerMode === 'boolean') { + block.triggerMode = params.triggerMode + + if (params.triggerMode === true) { + // Remove all incoming edges when enabling trigger mode + modifiedState.edges = modifiedState.edges.filter((edge: any) => edge.target !== block_id) + } + } + + // Handle advanced mode toggle + if (typeof params?.advancedMode === 'boolean') { + block.advancedMode = params.advancedMode + } + + // Handle nested nodes update (for loops/parallels) + if (params?.nestedNodes) { + // Remove all existing child blocks + const existingChildren = Object.keys(modifiedState.blocks).filter( + (id) => modifiedState.blocks[id].data?.parentId === block_id + ) + existingChildren.forEach((childId) => delete modifiedState.blocks[childId]) + + // Remove edges to/from removed children + modifiedState.edges = modifiedState.edges.filter( + (edge: any) => + !existingChildren.includes(edge.source) && !existingChildren.includes(edge.target) + ) + + // Add new nested blocks + Object.entries(params.nestedNodes).forEach(([childId, childBlock]: [string, any]) => { + // Validate childId is a valid string + if (!isValidKey(childId)) { + logSkippedItem(skippedItems, { + type: 'missing_required_params', + operationType: 'add_nested_node', + blockId: String(childId || 'invalid'), + reason: `Invalid childId "${childId}" in nestedNodes - child block skipped`, + }) + logger.error('Invalid childId detected in nestedNodes', { + parentBlockId: block_id, + childId, + childId_type: typeof childId, + }) + return + } + + if (childBlock.type === 'loop' || childBlock.type === 'parallel') { + logSkippedItem(skippedItems, { + type: 'nested_subflow_not_allowed', + operationType: 'edit_nested_node', + blockId: childId, + reason: `Cannot nest ${childBlock.type} inside ${block.type} - nested subflows are not supported`, + details: { parentType: block.type, childType: childBlock.type }, + }) + return + } + + const childBlockState = createBlockFromParams( + childId, + childBlock, + block_id, + validationErrors, + permissionConfig, + skippedItems + ) + modifiedState.blocks[childId] = childBlockState + + // Add connections for child block + if (childBlock.connections) { + addConnectionsAsEdges(modifiedState, childId, childBlock.connections, logger, skippedItems) + } + }) + + // Update loop/parallel configuration based on type (strict validation) + if (block.type === 'loop') { + block.data = block.data || {} + // loopType is always valid + if (params.inputs?.loopType) { + const validLoopTypes = ['for', 'forEach', 'while', 'doWhile'] + if (validLoopTypes.includes(params.inputs.loopType)) { + block.data.loopType = params.inputs.loopType + } + } + const effectiveLoopType = params.inputs?.loopType ?? block.data.loopType ?? 'for' + // iterations only valid for 'for' loopType + if (params.inputs?.iterations && effectiveLoopType === 'for') { + block.data.count = params.inputs.iterations + } + // collection only valid for 'forEach' loopType + if (params.inputs?.collection && effectiveLoopType === 'forEach') { + block.data.collection = params.inputs.collection + } + // condition only valid for 'while' or 'doWhile' loopType + if ( + params.inputs?.condition && + (effectiveLoopType === 'while' || effectiveLoopType === 'doWhile') + ) { + if (effectiveLoopType === 'doWhile') { + block.data.doWhileCondition = params.inputs.condition + } else { + block.data.whileCondition = params.inputs.condition + } + } + } else if (block.type === 'parallel') { + block.data = block.data || {} + // parallelType is always valid + if (params.inputs?.parallelType) { + const validParallelTypes = ['count', 'collection'] + if (validParallelTypes.includes(params.inputs.parallelType)) { + block.data.parallelType = params.inputs.parallelType + } + } + const effectiveParallelType = + params.inputs?.parallelType ?? block.data.parallelType ?? 'count' + // count only valid for 'count' parallelType + if (params.inputs?.count && effectiveParallelType === 'count') { + block.data.count = params.inputs.count + } + // collection only valid for 'collection' parallelType + if (params.inputs?.collection && effectiveParallelType === 'collection') { + block.data.collection = params.inputs.collection + } + } + } + + // Handle connections update (convert to edges) + if (params?.connections) { + modifiedState.edges = modifiedState.edges.filter((edge: any) => edge.source !== block_id) + + Object.entries(params.connections).forEach(([connectionType, targets]) => { + if (targets === null) return + + const mapConnectionTypeToHandle = (type: string): string => { + if (type === 'success') return 'source' + if (type === 'error') return 'error' + return type + } + + const sourceHandle = mapConnectionTypeToHandle(connectionType) + + const addEdgeForTarget = (targetBlock: string, targetHandle?: string) => { + createValidatedEdge( + modifiedState, + block_id, + targetBlock, + sourceHandle, + targetHandle || 'target', + 'edit', + logger, + skippedItems + ) + } + + if (typeof targets === 'string') { + addEdgeForTarget(targets) + } else if (Array.isArray(targets)) { + targets.forEach((target: any) => { + if (typeof target === 'string') { + addEdgeForTarget(target) + } else if (target?.block) { + addEdgeForTarget(target.block, target.handle) + } + }) + } else if (typeof targets === 'object' && (targets as any)?.block) { + addEdgeForTarget((targets as any).block, (targets as any).handle) + } + }) + } + + // Handle edge removal + if (params?.removeEdges && Array.isArray(params.removeEdges)) { + params.removeEdges.forEach(({ targetBlockId, sourceHandle = 'source' }) => { + modifiedState.edges = modifiedState.edges.filter( + (edge: any) => + !( + edge.source === block_id && + edge.target === targetBlockId && + edge.sourceHandle === sourceHandle + ) + ) + }) + } +} + +export function handleAddOperation(op: EditWorkflowOperation, ctx: OperationContext): void { + const { modifiedState, skippedItems, validationErrors, permissionConfig, deferredConnections } = + ctx + const { block_id, params } = op + + const addNormalizedName = params?.name ? normalizeName(params.name) : '' + if (!params?.type || !params?.name || !addNormalizedName) { + logSkippedItem(skippedItems, { + type: 'missing_required_params', + operationType: 'add', + blockId: block_id, + reason: `Missing required params (type or name) for adding block "${block_id}"`, + details: { hasType: !!params?.type, hasName: !!params?.name }, + }) + return + } + + if ((RESERVED_BLOCK_NAMES as readonly string[]).includes(addNormalizedName)) { + logSkippedItem(skippedItems, { + type: 'reserved_block_name', + operationType: 'add', + blockId: block_id, + reason: `Block name "${params.name}" is a reserved name and cannot be used`, + details: { requestedName: params.name }, + }) + return + } + + const conflictingBlock = findBlockWithDuplicateNormalizedName( + modifiedState.blocks, + params.name, + block_id + ) + + if (conflictingBlock) { + logSkippedItem(skippedItems, { + type: 'duplicate_block_name', + operationType: 'add', + blockId: block_id, + reason: `Block name "${params.name}" conflicts with existing block "${conflictingBlock[1].name}"`, + details: { + requestedName: params.name, + conflictingBlockId: conflictingBlock[0], + conflictingBlockName: conflictingBlock[1].name, + }, + }) + return + } + + // Special container types (loop, parallel) are not in the block registry but are valid + const isContainerType = params.type === 'loop' || params.type === 'parallel' + + // Validate block type before adding (skip validation for container types) + const addBlockConfig = getBlock(params.type) + if (!addBlockConfig && !isContainerType) { + logSkippedItem(skippedItems, { + type: 'invalid_block_type', + operationType: 'add', + blockId: block_id, + reason: `Invalid block type "${params.type}" - block not added`, + details: { requestedType: params.type }, + }) + return + } + + // Check if block type is allowed by permission group + if (!isContainerType && !isBlockTypeAllowed(params.type, permissionConfig)) { + logSkippedItem(skippedItems, { + type: 'block_not_allowed', + operationType: 'add', + blockId: block_id, + reason: `Block type "${params.type}" is not allowed by permission group - block not added`, + details: { requestedType: params.type }, + }) + return + } + + const triggerIssue = TriggerUtils.getTriggerAdditionIssue(modifiedState.blocks, params.type) + if (triggerIssue) { + logSkippedItem(skippedItems, { + type: 'duplicate_trigger', + operationType: 'add', + blockId: block_id, + reason: `Cannot add ${triggerIssue.triggerName} - a workflow can only have one`, + details: { requestedType: params.type, issue: triggerIssue.issue }, + }) + return + } + + // Check single-instance block constraints (e.g., Response block) + const singleInstanceIssue = TriggerUtils.getSingleInstanceBlockIssue( + modifiedState.blocks, + params.type + ) + if (singleInstanceIssue) { + logSkippedItem(skippedItems, { + type: 'duplicate_single_instance_block', + operationType: 'add', + blockId: block_id, + reason: `Cannot add ${singleInstanceIssue.blockName} - a workflow can only have one`, + details: { requestedType: params.type }, + }) + return + } + + // Create new block with proper structure + const newBlock = createBlockFromParams( + block_id, + params, + undefined, + validationErrors, + permissionConfig, + skippedItems + ) + + // Set loop/parallel data on parent block BEFORE adding to blocks (strict validation) + if (params.nestedNodes) { + if (params.type === 'loop') { + const validLoopTypes = ['for', 'forEach', 'while', 'doWhile'] + const loopType = + params.inputs?.loopType && validLoopTypes.includes(params.inputs.loopType) + ? params.inputs.loopType + : 'for' + newBlock.data = { + ...newBlock.data, + loopType, + // Only include type-appropriate fields + ...(loopType === 'forEach' && + params.inputs?.collection && { collection: params.inputs.collection }), + ...(loopType === 'for' && params.inputs?.iterations && { count: params.inputs.iterations }), + ...(loopType === 'while' && + params.inputs?.condition && { whileCondition: params.inputs.condition }), + ...(loopType === 'doWhile' && + params.inputs?.condition && { doWhileCondition: params.inputs.condition }), + } + } else if (params.type === 'parallel') { + const validParallelTypes = ['count', 'collection'] + const parallelType = + params.inputs?.parallelType && validParallelTypes.includes(params.inputs.parallelType) + ? params.inputs.parallelType + : 'count' + newBlock.data = { + ...newBlock.data, + parallelType, + // Only include type-appropriate fields + ...(parallelType === 'collection' && + params.inputs?.collection && { collection: params.inputs.collection }), + ...(parallelType === 'count' && params.inputs?.count && { count: params.inputs.count }), + } + } + } + + // Add parent block FIRST before adding children + // This ensures children can reference valid parentId + modifiedState.blocks[block_id] = newBlock + + // Handle nested nodes (for loops/parallels created from scratch) + if (params.nestedNodes) { + // Defensive check: verify parent is not locked before adding children + // (Parent was just created with locked: false, but check for consistency) + const parentBlock = modifiedState.blocks[block_id] + if (parentBlock?.locked) { + logSkippedItem(skippedItems, { + type: 'block_locked', + operationType: 'add_nested_nodes', + blockId: block_id, + reason: `Container "${block_id}" is locked - cannot add nested nodes`, + }) + return + } + + Object.entries(params.nestedNodes).forEach(([childId, childBlock]: [string, any]) => { + // Validate childId is a valid string + if (!isValidKey(childId)) { + logSkippedItem(skippedItems, { + type: 'missing_required_params', + operationType: 'add_nested_node', + blockId: String(childId || 'invalid'), + reason: `Invalid childId "${childId}" in nestedNodes - child block skipped`, + }) + logger.error('Invalid childId detected in nestedNodes', { + parentBlockId: block_id, + childId, + childId_type: typeof childId, + }) + return + } + + if (childBlock.type === 'loop' || childBlock.type === 'parallel') { + logSkippedItem(skippedItems, { + type: 'nested_subflow_not_allowed', + operationType: 'add_nested_node', + blockId: childId, + reason: `Cannot nest ${childBlock.type} inside ${params.type} - nested subflows are not supported`, + details: { parentType: params.type, childType: childBlock.type }, + }) + return + } + + const childBlockState = createBlockFromParams( + childId, + childBlock, + block_id, + validationErrors, + permissionConfig, + skippedItems + ) + modifiedState.blocks[childId] = childBlockState + + // Defer connection processing to ensure all blocks exist first + if (childBlock.connections) { + deferredConnections.push({ + blockId: childId, + connections: childBlock.connections, + }) + } + }) + } + + // Defer connection processing to ensure all blocks exist first (pass 2) + if (params.connections) { + deferredConnections.push({ + blockId: block_id, + connections: params.connections, + }) + } +} + +export function handleInsertIntoSubflowOperation( + op: EditWorkflowOperation, + ctx: OperationContext +): void { + const { modifiedState, skippedItems, validationErrors, permissionConfig, deferredConnections } = + ctx + const { block_id, params } = op + + const subflowId = params?.subflowId + if (!subflowId || !params?.type || !params?.name) { + logSkippedItem(skippedItems, { + type: 'missing_required_params', + operationType: 'insert_into_subflow', + blockId: block_id, + reason: `Missing required params (subflowId, type, or name) for inserting block "${block_id}"`, + details: { + hasSubflowId: !!subflowId, + hasType: !!params?.type, + hasName: !!params?.name, + }, + }) + return + } + + const subflowBlock = modifiedState.blocks[subflowId] + if (!subflowBlock) { + logSkippedItem(skippedItems, { + type: 'invalid_subflow_parent', + operationType: 'insert_into_subflow', + blockId: block_id, + reason: `Subflow block "${subflowId}" not found - block "${block_id}" not inserted`, + details: { subflowId }, + }) + return + } + + // Check if subflow is locked + if (subflowBlock.locked) { + logSkippedItem(skippedItems, { + type: 'block_locked', + operationType: 'insert_into_subflow', + blockId: block_id, + reason: `Subflow "${subflowId}" is locked - cannot insert block "${block_id}"`, + details: { subflowId }, + }) + return + } + + if (subflowBlock.type !== 'loop' && subflowBlock.type !== 'parallel') { + logger.error('Subflow block has invalid type', { + subflowId, + type: subflowBlock.type, + block_id, + }) + return + } + + if (params.type === 'loop' || params.type === 'parallel') { + logSkippedItem(skippedItems, { + type: 'nested_subflow_not_allowed', + operationType: 'insert_into_subflow', + blockId: block_id, + reason: `Cannot nest ${params.type} inside ${subflowBlock.type} - nested subflows are not supported`, + details: { parentType: subflowBlock.type, childType: params.type }, + }) + return + } + + // Check if block already exists (moving into subflow) or is new + const existingBlock = modifiedState.blocks[block_id] + + if (existingBlock) { + if (existingBlock.type === 'loop' || existingBlock.type === 'parallel') { + logSkippedItem(skippedItems, { + type: 'nested_subflow_not_allowed', + operationType: 'insert_into_subflow', + blockId: block_id, + reason: `Cannot move ${existingBlock.type} into ${subflowBlock.type} - nested subflows are not supported`, + details: { parentType: subflowBlock.type, childType: existingBlock.type }, + }) + return + } + + // Check if existing block is locked + if (existingBlock.locked) { + logSkippedItem(skippedItems, { + type: 'block_locked', + operationType: 'insert_into_subflow', + blockId: block_id, + reason: `Block "${block_id}" is locked and cannot be moved into a subflow`, + }) + return + } + + // Moving existing block into subflow - just update parent + existingBlock.data = { + ...existingBlock.data, + parentId: subflowId, + extent: 'parent' as const, + } + + // Update inputs if provided (with validation) + if (params.inputs) { + // Validate inputs against block configuration + const validationResult = validateInputsForBlock(existingBlock.type, params.inputs, block_id) + validationErrors.push(...validationResult.errors) + + Object.entries(validationResult.validInputs).forEach(([key, value]) => { + // Skip runtime subblock IDs (webhookId, triggerPath) + if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { + return + } + + let sanitizedValue = value + + // Normalize array subblocks with id fields (inputFormat, table rows, etc.) + if (shouldNormalizeArrayIds(key)) { + sanitizedValue = normalizeArrayWithIds(value) + } + + // Special handling for tools - normalize and filter disallowed + if (key === 'tools' && Array.isArray(value)) { + sanitizedValue = filterDisallowedTools( + normalizeTools(value), + permissionConfig, + block_id, + skippedItems + ) + } + + // Special handling for responseFormat - normalize to ensure consistent format + if (key === 'responseFormat' && value) { + sanitizedValue = normalizeResponseFormat(value) + } + + if (!existingBlock.subBlocks[key]) { + existingBlock.subBlocks[key] = { + id: key, + type: 'short-input', + value: sanitizedValue, + } + } else { + existingBlock.subBlocks[key].value = sanitizedValue + } + }) + + const existingBlockConfig = getBlock(existingBlock.type) + if (existingBlockConfig) { + updateCanonicalModesForInputs( + existingBlock, + Object.keys(validationResult.validInputs), + existingBlockConfig + ) + } + } + } else { + // Special container types (loop, parallel) are not in the block registry but are valid + const isContainerType = params.type === 'loop' || params.type === 'parallel' + + // Validate block type before creating (skip validation for container types) + const insertBlockConfig = getBlock(params.type) + if (!insertBlockConfig && !isContainerType) { + logSkippedItem(skippedItems, { + type: 'invalid_block_type', + operationType: 'insert_into_subflow', + blockId: block_id, + reason: `Invalid block type "${params.type}" - block not inserted into subflow`, + details: { requestedType: params.type, subflowId }, + }) + return + } + + // Check if block type is allowed by permission group + if (!isContainerType && !isBlockTypeAllowed(params.type, permissionConfig)) { + logSkippedItem(skippedItems, { + type: 'block_not_allowed', + operationType: 'insert_into_subflow', + blockId: block_id, + reason: `Block type "${params.type}" is not allowed by permission group - block not inserted`, + details: { requestedType: params.type, subflowId }, + }) + return + } + + // Create new block as child of subflow + const newBlock = createBlockFromParams( + block_id, + params, + subflowId, + validationErrors, + permissionConfig, + skippedItems + ) + modifiedState.blocks[block_id] = newBlock + } + + // Defer connection processing to ensure all blocks exist first + // This is particularly important when multiple blocks are being inserted + // and they have connections to each other + if (params.connections) { + // Remove existing edges from this block first + modifiedState.edges = modifiedState.edges.filter((edge: any) => edge.source !== block_id) + + // Add to deferred connections list + deferredConnections.push({ + blockId: block_id, + connections: params.connections, + }) + } +} + +export function handleExtractFromSubflowOperation( + op: EditWorkflowOperation, + ctx: OperationContext +): void { + const { modifiedState, skippedItems } = ctx + const { block_id, params } = op + + const subflowId = params?.subflowId + if (!subflowId) { + logSkippedItem(skippedItems, { + type: 'missing_required_params', + operationType: 'extract_from_subflow', + blockId: block_id, + reason: `Missing subflowId for extracting block "${block_id}"`, + }) + return + } + + const block = modifiedState.blocks[block_id] + if (!block) { + logSkippedItem(skippedItems, { + type: 'block_not_found', + operationType: 'extract_from_subflow', + blockId: block_id, + reason: `Block "${block_id}" not found for extraction`, + }) + return + } + + // Check if block is locked + if (block.locked) { + logSkippedItem(skippedItems, { + type: 'block_locked', + operationType: 'extract_from_subflow', + blockId: block_id, + reason: `Block "${block_id}" is locked and cannot be extracted from subflow`, + }) + return + } + + // Check if parent subflow is locked + const parentSubflow = modifiedState.blocks[subflowId] + if (parentSubflow?.locked) { + logSkippedItem(skippedItems, { + type: 'block_locked', + operationType: 'extract_from_subflow', + blockId: block_id, + reason: `Subflow "${subflowId}" is locked - cannot extract block "${block_id}"`, + details: { subflowId }, + }) + return + } + + // Verify it's actually a child of this subflow + if (block.data?.parentId !== subflowId) { + logger.warn('Block is not a child of specified subflow', { + block_id, + actualParent: block.data?.parentId, + specifiedParent: subflowId, + }) + } + + // Remove parent relationship + if (block.data) { + block.data.parentId = undefined + block.data.extent = undefined + } + + // Note: We keep the block and its edges, just remove parent relationship + // The block becomes a root-level block +} diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/types.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/types.ts new file mode 100644 index 000000000..09b766e06 --- /dev/null +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/types.ts @@ -0,0 +1,134 @@ +import { createLogger } from '@sim/logger' +import type { PermissionGroupConfig } from '@/lib/permission-groups/types' + +/** Selector subblock types that can be validated */ +export const SELECTOR_TYPES = new Set([ + 'oauth-input', + 'knowledge-base-selector', + 'document-selector', + 'file-selector', + 'project-selector', + 'channel-selector', + 'folder-selector', + 'mcp-server-selector', + 'mcp-tool-selector', + 'workflow-selector', +]) + +const validationLogger = createLogger('EditWorkflowValidation') + +/** UUID v4 regex pattern for validation */ +export const UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i + +/** + * Validation error for a specific field + */ +export interface ValidationError { + blockId: string + blockType: string + field: string + value: any + error: string +} + +/** + * Types of items that can be skipped during operation application + */ +export type SkippedItemType = + | 'block_not_found' + | 'invalid_block_type' + | 'block_not_allowed' + | 'block_locked' + | 'tool_not_allowed' + | 'invalid_edge_target' + | 'invalid_edge_source' + | 'invalid_source_handle' + | 'invalid_target_handle' + | 'invalid_subblock_field' + | 'missing_required_params' + | 'invalid_subflow_parent' + | 'nested_subflow_not_allowed' + | 'duplicate_block_name' + | 'reserved_block_name' + | 'duplicate_trigger' + | 'duplicate_single_instance_block' + +/** + * Represents an item that was skipped during operation application + */ +export interface SkippedItem { + type: SkippedItemType + operationType: string + blockId: string + reason: string + details?: Record +} + +/** + * Logs and records a skipped item + */ +export function logSkippedItem(skippedItems: SkippedItem[], item: SkippedItem): void { + validationLogger.warn(`Skipped ${item.operationType} operation: ${item.reason}`, { + type: item.type, + operationType: item.operationType, + blockId: item.blockId, + ...(item.details && { details: item.details }), + }) + skippedItems.push(item) +} + +/** + * Result of input validation + */ +export interface ValidationResult { + validInputs: Record + errors: ValidationError[] +} + +/** + * Result of validating a single value + */ +export interface ValueValidationResult { + valid: boolean + value?: any + error?: ValidationError +} + +export interface EditWorkflowOperation { + operation_type: 'add' | 'edit' | 'delete' | 'insert_into_subflow' | 'extract_from_subflow' + block_id: string + params?: Record +} + +export interface EditWorkflowParams { + operations: EditWorkflowOperation[] + workflowId: string + currentUserWorkflow?: string +} + +export interface EdgeHandleValidationResult { + valid: boolean + error?: string + /** The normalized handle to use (e.g., simple 'if' normalized to 'condition-{uuid}') */ + normalizedHandle?: string +} + +/** + * Result of applying operations to workflow state + */ +export interface ApplyOperationsResult { + state: any + validationErrors: ValidationError[] + skippedItems: SkippedItem[] +} + +export interface OperationContext { + modifiedState: any + skippedItems: SkippedItem[] + validationErrors: ValidationError[] + permissionConfig: PermissionGroupConfig | null + deferredConnections: Array<{ + blockId: string + connections: Record + }> +} diff --git a/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/validation.ts b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/validation.ts new file mode 100644 index 000000000..424be9d25 --- /dev/null +++ b/apps/sim/lib/copilot/tools/server/workflow/edit-workflow/validation.ts @@ -0,0 +1,1051 @@ +import { createLogger } from '@sim/logger' +import { validateSelectorIds } from '@/lib/copilot/validation/selector-validator' +import type { PermissionGroupConfig } from '@/lib/permission-groups/types' +import { getBlock } from '@/blocks/registry' +import type { SubBlockConfig } from '@/blocks/types' +import { EDGE, normalizeName } from '@/executor/constants' +import { TRIGGER_RUNTIME_SUBBLOCK_IDS } from '@/triggers/constants' +import type { + EdgeHandleValidationResult, + EditWorkflowOperation, + ValidationError, + ValidationResult, + ValueValidationResult, +} from './types' +import { SELECTOR_TYPES } from './types' + +const validationLogger = createLogger('EditWorkflowValidation') + +/** + * Finds an existing block with the same normalized name. + */ +export function findBlockWithDuplicateNormalizedName( + blocks: Record, + name: string, + excludeBlockId: string +): [string, any] | undefined { + const normalizedName = normalizeName(name) + return Object.entries(blocks).find( + ([blockId, block]: [string, any]) => + blockId !== excludeBlockId && normalizeName(block.name || '') === normalizedName + ) +} + +/** + * Validates and filters inputs against a block's subBlock configuration + * Returns valid inputs and any validation errors encountered + */ +export function validateInputsForBlock( + blockType: string, + inputs: Record, + blockId: string +): ValidationResult { + const errors: ValidationError[] = [] + const blockConfig = getBlock(blockType) + + if (!blockConfig) { + // Unknown block type - return inputs as-is (let it fail later if invalid) + validationLogger.warn(`Unknown block type: ${blockType}, skipping validation`) + return { validInputs: inputs, errors: [] } + } + + const validatedInputs: Record = {} + const subBlockMap = new Map() + + // Build map of subBlock id -> config + for (const subBlock of blockConfig.subBlocks) { + subBlockMap.set(subBlock.id, subBlock) + } + + for (const [key, value] of Object.entries(inputs)) { + // Skip runtime subblock IDs + if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(key)) { + continue + } + + const subBlockConfig = subBlockMap.get(key) + + // If subBlock doesn't exist in config, skip it (unless it's a known dynamic field) + if (!subBlockConfig) { + // Some fields are valid but not in subBlocks (like loop/parallel config) + // Allow these through for special block types + if (blockType === 'loop' || blockType === 'parallel') { + validatedInputs[key] = value + } else { + errors.push({ + blockId, + blockType, + field: key, + value, + error: `Unknown input field "${key}" for block type "${blockType}"`, + }) + } + continue + } + + // Note: We do NOT check subBlockConfig.condition here. + // Conditions are for UI display logic (show/hide fields in the editor). + // For API/Copilot, any valid field in the block schema should be accepted. + // The runtime will use the relevant fields based on the actual operation. + + // Validate value based on subBlock type + const validationResult = validateValueForSubBlockType( + subBlockConfig, + value, + key, + blockType, + blockId + ) + if (validationResult.valid) { + validatedInputs[key] = validationResult.value + } else if (validationResult.error) { + errors.push(validationResult.error) + } + } + + return { validInputs: validatedInputs, errors } +} + +/** + * Validates a value against its expected subBlock type + * Returns validation result with the value or an error + */ +export function validateValueForSubBlockType( + subBlockConfig: SubBlockConfig, + value: any, + fieldName: string, + blockType: string, + blockId: string +): ValueValidationResult { + const { type } = subBlockConfig + + // Handle null/undefined - allow clearing fields + if (value === null || value === undefined) { + return { valid: true, value } + } + + switch (type) { + case 'dropdown': { + // Validate against allowed options + const options = + typeof subBlockConfig.options === 'function' + ? subBlockConfig.options() + : subBlockConfig.options + if (options && Array.isArray(options)) { + const validIds = options.map((opt) => opt.id) + if (!validIds.includes(value)) { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid dropdown value "${value}" for field "${fieldName}". Valid options: ${validIds.join(', ')}`, + }, + } + } + } + return { valid: true, value } + } + + case 'slider': { + // Validate numeric range + const numValue = typeof value === 'number' ? value : Number(value) + if (Number.isNaN(numValue)) { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid slider value "${value}" for field "${fieldName}" - must be a number`, + }, + } + } + // Clamp to range (allow but warn) + let clampedValue = numValue + if (subBlockConfig.min !== undefined && numValue < subBlockConfig.min) { + clampedValue = subBlockConfig.min + } + if (subBlockConfig.max !== undefined && numValue > subBlockConfig.max) { + clampedValue = subBlockConfig.max + } + return { + valid: true, + value: subBlockConfig.integer ? Math.round(clampedValue) : clampedValue, + } + } + + case 'switch': { + // Must be boolean + if (typeof value !== 'boolean') { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid switch value "${value}" for field "${fieldName}" - must be true or false`, + }, + } + } + return { valid: true, value } + } + + case 'file-upload': { + // File upload should be an object with specific properties or null + if (value === null) return { valid: true, value: null } + if (typeof value !== 'object') { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid file-upload value for field "${fieldName}" - expected object with name and path properties, or null`, + }, + } + } + // Validate file object has required properties + if (value && (!value.name || !value.path)) { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid file-upload object for field "${fieldName}" - must have "name" and "path" properties`, + }, + } + } + return { valid: true, value } + } + + case 'input-format': + case 'table': { + // Should be an array + if (!Array.isArray(value)) { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid ${type} value for field "${fieldName}" - expected an array`, + }, + } + } + return { valid: true, value } + } + + case 'tool-input': { + // Should be an array of tool objects + if (!Array.isArray(value)) { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid tool-input value for field "${fieldName}" - expected an array of tool objects`, + }, + } + } + return { valid: true, value } + } + + case 'code': { + // Code must be a string (content can be JS, Python, JSON, SQL, HTML, etc.) + if (typeof value !== 'string') { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid code value for field "${fieldName}" - expected a string, got ${typeof value}`, + }, + } + } + return { valid: true, value } + } + + case 'response-format': { + // Allow empty/null + if (value === null || value === undefined || value === '') { + return { valid: true, value } + } + // Allow objects (will be stringified later by normalizeResponseFormat) + if (typeof value === 'object') { + return { valid: true, value } + } + // If string, must be valid JSON + if (typeof value === 'string') { + try { + JSON.parse(value) + return { valid: true, value } + } catch { + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid response-format value for field "${fieldName}" - string must be valid JSON`, + }, + } + } + } + // Reject numbers, booleans, etc. + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid response-format value for field "${fieldName}" - expected a JSON string or object`, + }, + } + } + + case 'short-input': + case 'long-input': + case 'combobox': { + // Should be string (combobox allows custom values) + if (typeof value !== 'string' && typeof value !== 'number') { + // Convert to string but don't error + return { valid: true, value: String(value) } + } + return { valid: true, value } + } + + // Selector types - allow strings (IDs) or arrays of strings + case 'oauth-input': + case 'knowledge-base-selector': + case 'document-selector': + case 'file-selector': + case 'project-selector': + case 'channel-selector': + case 'folder-selector': + case 'mcp-server-selector': + case 'mcp-tool-selector': + case 'workflow-selector': { + if (subBlockConfig.multiSelect && Array.isArray(value)) { + return { valid: true, value } + } + if (typeof value === 'string') { + return { valid: true, value } + } + return { + valid: false, + error: { + blockId, + blockType, + field: fieldName, + value, + error: `Invalid selector value for field "${fieldName}" - expected a string${subBlockConfig.multiSelect ? ' or array of strings' : ''}`, + }, + } + } + + default: + // For unknown types, pass through + return { valid: true, value } + } +} + +/** + * Validates source handle is valid for the block type + */ +export function validateSourceHandleForBlock( + sourceHandle: string, + sourceBlockType: string, + sourceBlock: any +): EdgeHandleValidationResult { + if (sourceHandle === 'error') { + return { valid: true } + } + + switch (sourceBlockType) { + case 'loop': + if (sourceHandle === 'loop-start-source' || sourceHandle === 'loop-end-source') { + return { valid: true } + } + return { + valid: false, + error: `Invalid source handle "${sourceHandle}" for loop block. Valid handles: loop-start-source, loop-end-source, error`, + } + + case 'parallel': + if (sourceHandle === 'parallel-start-source' || sourceHandle === 'parallel-end-source') { + return { valid: true } + } + return { + valid: false, + error: `Invalid source handle "${sourceHandle}" for parallel block. Valid handles: parallel-start-source, parallel-end-source, error`, + } + + case 'condition': { + const conditionsValue = sourceBlock?.subBlocks?.conditions?.value + if (!conditionsValue) { + return { + valid: false, + error: `Invalid condition handle "${sourceHandle}" - no conditions defined`, + } + } + + // validateConditionHandle accepts simple format (if, else-if-0, else), + // legacy format (condition-{blockId}-if), and internal ID format (condition-{uuid}) + return validateConditionHandle(sourceHandle, sourceBlock.id, conditionsValue) + } + + case 'router': + if (sourceHandle === 'source' || sourceHandle.startsWith(EDGE.ROUTER_PREFIX)) { + return { valid: true } + } + return { + valid: false, + error: `Invalid source handle "${sourceHandle}" for router block. Valid handles: source, ${EDGE.ROUTER_PREFIX}{targetId}, error`, + } + + case 'router_v2': { + const routesValue = sourceBlock?.subBlocks?.routes?.value + if (!routesValue) { + return { + valid: false, + error: `Invalid router handle "${sourceHandle}" - no routes defined`, + } + } + + // validateRouterHandle accepts simple format (route-0, route-1), + // legacy format (router-{blockId}-route-1), and internal ID format (router-{uuid}) + return validateRouterHandle(sourceHandle, sourceBlock.id, routesValue) + } + + default: + if (sourceHandle === 'source') { + return { valid: true } + } + return { + valid: false, + error: `Invalid source handle "${sourceHandle}" for ${sourceBlockType} block. Valid handles: source, error`, + } + } +} + +/** + * Validates condition handle references a valid condition in the block. + * Accepts multiple formats: + * - Simple format: "if", "else-if-0", "else-if-1", "else" + * - Legacy semantic format: "condition-{blockId}-if", "condition-{blockId}-else-if" + * - Internal ID format: "condition-{conditionId}" + * + * Returns the normalized handle (condition-{conditionId}) for storage. + */ +export function validateConditionHandle( + sourceHandle: string, + blockId: string, + conditionsValue: string | any[] +): EdgeHandleValidationResult { + let conditions: any[] + if (typeof conditionsValue === 'string') { + try { + conditions = JSON.parse(conditionsValue) + } catch { + return { + valid: false, + error: `Cannot validate condition handle "${sourceHandle}" - conditions is not valid JSON`, + } + } + } else if (Array.isArray(conditionsValue)) { + conditions = conditionsValue + } else { + return { + valid: false, + error: `Cannot validate condition handle "${sourceHandle}" - conditions is not an array`, + } + } + + if (!Array.isArray(conditions) || conditions.length === 0) { + return { + valid: false, + error: `Invalid condition handle "${sourceHandle}" - no conditions defined`, + } + } + + // Build a map of all valid handle formats -> normalized handle (condition-{conditionId}) + const handleToNormalized = new Map() + const legacySemanticPrefix = `condition-${blockId}-` + let elseIfIndex = 0 + + for (const condition of conditions) { + if (!condition.id) continue + + const normalizedHandle = `condition-${condition.id}` + const title = condition.title?.toLowerCase() + + // Always accept internal ID format + handleToNormalized.set(normalizedHandle, normalizedHandle) + + if (title === 'if') { + // Simple format: "if" + handleToNormalized.set('if', normalizedHandle) + // Legacy format: "condition-{blockId}-if" + handleToNormalized.set(`${legacySemanticPrefix}if`, normalizedHandle) + } else if (title === 'else if') { + // Simple format: "else-if-0", "else-if-1", etc. (0-indexed) + handleToNormalized.set(`else-if-${elseIfIndex}`, normalizedHandle) + // Legacy format: "condition-{blockId}-else-if" for first, "condition-{blockId}-else-if-2" for second + if (elseIfIndex === 0) { + handleToNormalized.set(`${legacySemanticPrefix}else-if`, normalizedHandle) + } else { + handleToNormalized.set( + `${legacySemanticPrefix}else-if-${elseIfIndex + 1}`, + normalizedHandle + ) + } + elseIfIndex++ + } else if (title === 'else') { + // Simple format: "else" + handleToNormalized.set('else', normalizedHandle) + // Legacy format: "condition-{blockId}-else" + handleToNormalized.set(`${legacySemanticPrefix}else`, normalizedHandle) + } + } + + const normalizedHandle = handleToNormalized.get(sourceHandle) + if (normalizedHandle) { + return { valid: true, normalizedHandle } + } + + // Build list of valid simple format options for error message + const simpleOptions: string[] = [] + elseIfIndex = 0 + for (const condition of conditions) { + const title = condition.title?.toLowerCase() + if (title === 'if') { + simpleOptions.push('if') + } else if (title === 'else if') { + simpleOptions.push(`else-if-${elseIfIndex}`) + elseIfIndex++ + } else if (title === 'else') { + simpleOptions.push('else') + } + } + + return { + valid: false, + error: `Invalid condition handle "${sourceHandle}". Valid handles: ${simpleOptions.join(', ')}`, + } +} + +/** + * Validates router handle references a valid route in the block. + * Accepts multiple formats: + * - Simple format: "route-0", "route-1", "route-2" (0-indexed) + * - Legacy semantic format: "router-{blockId}-route-1" (1-indexed) + * - Internal ID format: "router-{routeId}" + * + * Returns the normalized handle (router-{routeId}) for storage. + */ +export function validateRouterHandle( + sourceHandle: string, + blockId: string, + routesValue: string | any[] +): EdgeHandleValidationResult { + let routes: any[] + if (typeof routesValue === 'string') { + try { + routes = JSON.parse(routesValue) + } catch { + return { + valid: false, + error: `Cannot validate router handle "${sourceHandle}" - routes is not valid JSON`, + } + } + } else if (Array.isArray(routesValue)) { + routes = routesValue + } else { + return { + valid: false, + error: `Cannot validate router handle "${sourceHandle}" - routes is not an array`, + } + } + + if (!Array.isArray(routes) || routes.length === 0) { + return { + valid: false, + error: `Invalid router handle "${sourceHandle}" - no routes defined`, + } + } + + // Build a map of all valid handle formats -> normalized handle (router-{routeId}) + const handleToNormalized = new Map() + const legacySemanticPrefix = `router-${blockId}-` + + for (let i = 0; i < routes.length; i++) { + const route = routes[i] + if (!route.id) continue + + const normalizedHandle = `router-${route.id}` + + // Always accept internal ID format: router-{uuid} + handleToNormalized.set(normalizedHandle, normalizedHandle) + + // Simple format: route-0, route-1, etc. (0-indexed) + handleToNormalized.set(`route-${i}`, normalizedHandle) + + // Legacy 1-indexed route number format: router-{blockId}-route-1 + handleToNormalized.set(`${legacySemanticPrefix}route-${i + 1}`, normalizedHandle) + + // Accept normalized title format: router-{blockId}-{normalized-title} + if (route.title && typeof route.title === 'string') { + const normalizedTitle = route.title + .toLowerCase() + .replace(/\s+/g, '-') + .replace(/[^a-z0-9-]/g, '') + if (normalizedTitle) { + handleToNormalized.set(`${legacySemanticPrefix}${normalizedTitle}`, normalizedHandle) + } + } + } + + const normalizedHandle = handleToNormalized.get(sourceHandle) + if (normalizedHandle) { + return { valid: true, normalizedHandle } + } + + // Build list of valid simple format options for error message + const simpleOptions = routes.map((_, i) => `route-${i}`) + + return { + valid: false, + error: `Invalid router handle "${sourceHandle}". Valid handles: ${simpleOptions.join(', ')}`, + } +} + +/** + * Validates target handle is valid (must be 'target') + */ +export function validateTargetHandle(targetHandle: string): EdgeHandleValidationResult { + if (targetHandle === 'target') { + return { valid: true } + } + return { + valid: false, + error: `Invalid target handle "${targetHandle}". Expected "target"`, + } +} + +/** + * Checks if a block type is allowed by the permission group config + */ +export function isBlockTypeAllowed( + blockType: string, + permissionConfig: PermissionGroupConfig | null +): boolean { + if (!permissionConfig || permissionConfig.allowedIntegrations === null) { + return true + } + return permissionConfig.allowedIntegrations.includes(blockType) +} + +/** + * Validates selector IDs in the workflow state exist in the database + * Returns validation errors for any invalid selector IDs + */ +export async function validateWorkflowSelectorIds( + workflowState: any, + context: { userId: string; workspaceId?: string } +): Promise { + const logger = createLogger('EditWorkflowSelectorValidation') + const errors: ValidationError[] = [] + + // Collect all selector fields from all blocks + const selectorsToValidate: Array<{ + blockId: string + blockType: string + fieldName: string + selectorType: string + value: string | string[] + }> = [] + + for (const [blockId, block] of Object.entries(workflowState.blocks || {})) { + const blockData = block as any + const blockType = blockData.type + if (!blockType) continue + + const blockConfig = getBlock(blockType) + if (!blockConfig) continue + + // Check each subBlock for selector types + for (const subBlockConfig of blockConfig.subBlocks) { + if (!SELECTOR_TYPES.has(subBlockConfig.type)) continue + + // Skip oauth-input - credentials are pre-validated before edit application + // This allows existing collaborator credentials to remain untouched + if (subBlockConfig.type === 'oauth-input') continue + + const subBlockValue = blockData.subBlocks?.[subBlockConfig.id]?.value + if (!subBlockValue) continue + + // Handle comma-separated values for multi-select + let values: string | string[] = subBlockValue + if (typeof subBlockValue === 'string' && subBlockValue.includes(',')) { + values = subBlockValue + .split(',') + .map((v: string) => v.trim()) + .filter(Boolean) + } + + selectorsToValidate.push({ + blockId, + blockType, + fieldName: subBlockConfig.id, + selectorType: subBlockConfig.type, + value: values, + }) + } + } + + if (selectorsToValidate.length === 0) { + return errors + } + + logger.info('Validating selector IDs', { + selectorCount: selectorsToValidate.length, + userId: context.userId, + workspaceId: context.workspaceId, + }) + + // Validate each selector field + for (const selector of selectorsToValidate) { + const result = await validateSelectorIds(selector.selectorType, selector.value, context) + + if (result.invalid.length > 0) { + // Include warning info (like available credentials) in the error message for better LLM feedback + const warningInfo = result.warning ? `. ${result.warning}` : '' + errors.push({ + blockId: selector.blockId, + blockType: selector.blockType, + field: selector.fieldName, + value: selector.value, + error: `Invalid ${selector.selectorType} ID(s): ${result.invalid.join(', ')} - ID(s) do not exist or user doesn't have access${warningInfo}`, + }) + } else if (result.warning) { + // Log warnings that don't have errors (shouldn't happen for credentials but may for other selectors) + logger.warn(result.warning, { + blockId: selector.blockId, + fieldName: selector.fieldName, + }) + } + } + + if (errors.length > 0) { + logger.warn('Found invalid selector IDs', { + errorCount: errors.length, + errors: errors.map((e) => ({ blockId: e.blockId, field: e.field, error: e.error })), + }) + } + + return errors +} + +/** + * Pre-validates credential and apiKey inputs in operations before they are applied. + * - Validates oauth-input (credential) IDs belong to the user + * - Filters out apiKey inputs for hosted models when isHosted is true + * - Also validates credentials and apiKeys in nestedNodes (blocks inside loop/parallel) + * Returns validation errors for any removed inputs. + */ +export async function preValidateCredentialInputs( + operations: EditWorkflowOperation[], + context: { userId: string }, + workflowState?: Record +): Promise<{ filteredOperations: EditWorkflowOperation[]; errors: ValidationError[] }> { + const { isHosted } = await import('@/lib/core/config/feature-flags') + const { getHostedModels } = await import('@/providers/utils') + + const logger = createLogger('PreValidateCredentials') + const errors: ValidationError[] = [] + + // Collect credential and apiKey inputs that need validation/filtering + const credentialInputs: Array<{ + operationIndex: number + blockId: string + blockType: string + fieldName: string + value: string + nestedBlockId?: string + }> = [] + + const hostedApiKeyInputs: Array<{ + operationIndex: number + blockId: string + blockType: string + model: string + nestedBlockId?: string + }> = [] + + const hostedModelsLower = isHosted ? new Set(getHostedModels().map((m) => m.toLowerCase())) : null + + /** + * Collect credential inputs from a block's inputs based on its block config + */ + function collectCredentialInputs( + blockConfig: ReturnType, + inputs: Record, + opIndex: number, + blockId: string, + blockType: string, + nestedBlockId?: string + ) { + if (!blockConfig) return + + for (const subBlockConfig of blockConfig.subBlocks) { + if (subBlockConfig.type !== 'oauth-input') continue + + const inputValue = inputs[subBlockConfig.id] + if (!inputValue || typeof inputValue !== 'string' || inputValue.trim() === '') continue + + credentialInputs.push({ + operationIndex: opIndex, + blockId, + blockType, + fieldName: subBlockConfig.id, + value: inputValue, + nestedBlockId, + }) + } + } + + /** + * Check if apiKey should be filtered for a block with the given model + */ + function collectHostedApiKeyInput( + inputs: Record, + modelValue: string | undefined, + opIndex: number, + blockId: string, + blockType: string, + nestedBlockId?: string + ) { + if (!hostedModelsLower || !inputs.apiKey) return + if (!modelValue || typeof modelValue !== 'string') return + + if (hostedModelsLower.has(modelValue.toLowerCase())) { + hostedApiKeyInputs.push({ + operationIndex: opIndex, + blockId, + blockType, + model: modelValue, + nestedBlockId, + }) + } + } + + operations.forEach((op, opIndex) => { + // Process main block inputs + if (op.params?.inputs && op.params?.type) { + const blockConfig = getBlock(op.params.type) + if (blockConfig) { + // Collect credentials from main block + collectCredentialInputs( + blockConfig, + op.params.inputs as Record, + opIndex, + op.block_id, + op.params.type + ) + + // Check for apiKey inputs on hosted models + let modelValue = (op.params.inputs as Record).model as string | undefined + + // For edit operations, if model is not being changed, check existing block's model + if ( + !modelValue && + op.operation_type === 'edit' && + (op.params.inputs as Record).apiKey && + workflowState + ) { + const existingBlock = (workflowState.blocks as Record)?.[op.block_id] as + | Record + | undefined + const existingSubBlocks = existingBlock?.subBlocks as Record | undefined + const existingModelSubBlock = existingSubBlocks?.model as + | Record + | undefined + modelValue = existingModelSubBlock?.value as string | undefined + } + + collectHostedApiKeyInput( + op.params.inputs as Record, + modelValue, + opIndex, + op.block_id, + op.params.type + ) + } + } + + // Process nested nodes (blocks inside loop/parallel containers) + const nestedNodes = op.params?.nestedNodes as + | Record> + | undefined + if (nestedNodes) { + Object.entries(nestedNodes).forEach(([childId, childBlock]) => { + const childType = childBlock.type as string | undefined + const childInputs = childBlock.inputs as Record | undefined + if (!childType || !childInputs) return + + const childBlockConfig = getBlock(childType) + if (!childBlockConfig) return + + // Collect credentials from nested block + collectCredentialInputs( + childBlockConfig, + childInputs, + opIndex, + op.block_id, + childType, + childId + ) + + // Check for apiKey inputs on hosted models in nested block + const modelValue = childInputs.model as string | undefined + collectHostedApiKeyInput(childInputs, modelValue, opIndex, op.block_id, childType, childId) + }) + } + }) + + const hasCredentialsToValidate = credentialInputs.length > 0 + const hasHostedApiKeysToFilter = hostedApiKeyInputs.length > 0 + + if (!hasCredentialsToValidate && !hasHostedApiKeysToFilter) { + return { filteredOperations: operations, errors } + } + + // Deep clone operations so we can modify them + const filteredOperations = structuredClone(operations) + + // Filter out apiKey inputs for hosted models and add validation errors + if (hasHostedApiKeysToFilter) { + logger.info('Filtering apiKey inputs for hosted models', { count: hostedApiKeyInputs.length }) + + for (const apiKeyInput of hostedApiKeyInputs) { + const op = filteredOperations[apiKeyInput.operationIndex] + + // Handle nested block apiKey filtering + if (apiKeyInput.nestedBlockId) { + const nestedNodes = op.params?.nestedNodes as + | Record> + | undefined + const nestedBlock = nestedNodes?.[apiKeyInput.nestedBlockId] + const nestedInputs = nestedBlock?.inputs as Record | undefined + if (nestedInputs?.apiKey) { + nestedInputs.apiKey = undefined + logger.debug('Filtered apiKey for hosted model in nested block', { + parentBlockId: apiKeyInput.blockId, + nestedBlockId: apiKeyInput.nestedBlockId, + model: apiKeyInput.model, + }) + + errors.push({ + blockId: apiKeyInput.nestedBlockId, + blockType: apiKeyInput.blockType, + field: 'apiKey', + value: '[redacted]', + error: `Cannot set API key for hosted model "${apiKeyInput.model}" - API keys are managed by the platform when using hosted models`, + }) + } + } else if (op.params?.inputs?.apiKey) { + // Handle main block apiKey filtering + op.params.inputs.apiKey = undefined + logger.debug('Filtered apiKey for hosted model', { + blockId: apiKeyInput.blockId, + model: apiKeyInput.model, + }) + + errors.push({ + blockId: apiKeyInput.blockId, + blockType: apiKeyInput.blockType, + field: 'apiKey', + value: '[redacted]', + error: `Cannot set API key for hosted model "${apiKeyInput.model}" - API keys are managed by the platform when using hosted models`, + }) + } + } + } + + // Validate credential inputs + if (hasCredentialsToValidate) { + logger.info('Pre-validating credential inputs', { + credentialCount: credentialInputs.length, + userId: context.userId, + }) + + const allCredentialIds = credentialInputs.map((c) => c.value) + const validationResult = await validateSelectorIds('oauth-input', allCredentialIds, context) + const invalidSet = new Set(validationResult.invalid) + + if (invalidSet.size > 0) { + for (const credInput of credentialInputs) { + if (!invalidSet.has(credInput.value)) continue + + const op = filteredOperations[credInput.operationIndex] + + // Handle nested block credential removal + if (credInput.nestedBlockId) { + const nestedNodes = op.params?.nestedNodes as + | Record> + | undefined + const nestedBlock = nestedNodes?.[credInput.nestedBlockId] + const nestedInputs = nestedBlock?.inputs as Record | undefined + if (nestedInputs?.[credInput.fieldName]) { + delete nestedInputs[credInput.fieldName] + logger.info('Removed invalid credential from nested block', { + parentBlockId: credInput.blockId, + nestedBlockId: credInput.nestedBlockId, + field: credInput.fieldName, + invalidValue: credInput.value, + }) + } + } else if (op.params?.inputs?.[credInput.fieldName]) { + // Handle main block credential removal + delete op.params.inputs[credInput.fieldName] + logger.info('Removed invalid credential from operation', { + blockId: credInput.blockId, + field: credInput.fieldName, + invalidValue: credInput.value, + }) + } + + const warningInfo = validationResult.warning ? `. ${validationResult.warning}` : '' + const errorBlockId = credInput.nestedBlockId ?? credInput.blockId + errors.push({ + blockId: errorBlockId, + blockType: credInput.blockType, + field: credInput.fieldName, + value: credInput.value, + error: `Invalid credential ID "${credInput.value}" - credential does not exist or user doesn't have access${warningInfo}`, + }) + } + + logger.warn('Filtered out invalid credentials', { + invalidCount: invalidSet.size, + }) + } + } + + return { filteredOperations, errors } +} diff --git a/apps/sim/lib/copilot/tools/server/workflow/get-workflow-console.ts b/apps/sim/lib/copilot/tools/server/workflow/get-workflow-console.ts index 601a17c0a..080e33969 100644 --- a/apps/sim/lib/copilot/tools/server/workflow/get-workflow-console.ts +++ b/apps/sim/lib/copilot/tools/server/workflow/get-workflow-console.ts @@ -4,6 +4,8 @@ import { createLogger } from '@sim/logger' import { desc, eq } from 'drizzle-orm' import type { BaseServerTool } from '@/lib/copilot/tools/server/base-tool' +const logger = createLogger('GetWorkflowConsoleServerTool') + interface GetWorkflowConsoleArgs { workflowId: string limit?: number @@ -87,11 +89,16 @@ function normalizeErrorMessage(errorValue: unknown): string | undefined { if (typeof errorValue === 'object') { try { return JSON.stringify(errorValue) - } catch {} + } catch (error) { + logger.warn('Failed to stringify error value', { + error: error instanceof Error ? error.message : String(error), + }) + } } try { return String(errorValue) } catch { + // JSON.stringify failed for error value; fall back to undefined return undefined } } @@ -217,7 +224,6 @@ function deriveExecutionErrorSummary(params: { export const getWorkflowConsoleServerTool: BaseServerTool = { name: 'get_workflow_console', async execute(rawArgs: GetWorkflowConsoleArgs): Promise { - const logger = createLogger('GetWorkflowConsoleServerTool') const { workflowId, limit = 2, diff --git a/apps/sim/lib/copilot/tools/shared/schemas.ts b/apps/sim/lib/copilot/tools/shared/schemas.ts index 2377aecf7..804a1a48a 100644 --- a/apps/sim/lib/copilot/tools/shared/schemas.ts +++ b/apps/sim/lib/copilot/tools/shared/schemas.ts @@ -72,7 +72,17 @@ export type GetBlockConfigResultType = z.infer // knowledge_base - shared schema used by client tool, server tool, and registry export const KnowledgeBaseArgsSchema = z.object({ - operation: z.enum(['create', 'list', 'get', 'query']), + operation: z.enum([ + 'create', + 'list', + 'get', + 'query', + 'list_tags', + 'create_tag', + 'update_tag', + 'delete_tag', + 'get_tag_usage', + ]), args: z .object({ /** Name of the knowledge base (required for create) */ @@ -81,7 +91,7 @@ export const KnowledgeBaseArgsSchema = z.object({ description: z.string().optional(), /** Workspace ID to associate with (required for create, optional for list) */ workspaceId: z.string().optional(), - /** Knowledge base ID (required for get, query) */ + /** Knowledge base ID (required for get, query, list_tags, create_tag, get_tag_usage) */ knowledgeBaseId: z.string().optional(), /** Search query text (required for query) */ query: z.string().optional(), @@ -95,6 +105,12 @@ export const KnowledgeBaseArgsSchema = z.object({ overlap: z.number().min(0).max(500).default(200), }) .optional(), + /** Tag definition ID (required for update_tag, delete_tag) */ + tagDefinitionId: z.string().optional(), + /** Tag display name (required for create_tag, optional for update_tag) */ + tagDisplayName: z.string().optional(), + /** Tag field type: text, number, date, boolean (optional for create_tag, defaults to text) */ + tagFieldType: z.enum(['text', 'number', 'date', 'boolean']).optional(), }) .optional(), }) diff --git a/apps/sim/lib/copilot/tools/shared/workflow-utils.ts b/apps/sim/lib/copilot/tools/shared/workflow-utils.ts new file mode 100644 index 000000000..2f033a883 --- /dev/null +++ b/apps/sim/lib/copilot/tools/shared/workflow-utils.ts @@ -0,0 +1,38 @@ +import { sanitizeForCopilot } from '@/lib/workflows/sanitization/json-sanitizer' + +type CopilotWorkflowState = { + blocks?: Record + edges?: any[] + loops?: Record + parallels?: Record +} + +export function formatWorkflowStateForCopilot(state: CopilotWorkflowState): string { + const workflowState = { + blocks: state.blocks || {}, + edges: state.edges || [], + loops: state.loops || {}, + parallels: state.parallels || {}, + } + const sanitized = sanitizeForCopilot(workflowState) + return JSON.stringify(sanitized, null, 2) +} + +export function formatNormalizedWorkflowForCopilot( + normalized: CopilotWorkflowState | null | undefined +): string | null { + if (!normalized) return null + return formatWorkflowStateForCopilot(normalized) +} + +export function normalizeWorkflowName(name?: string | null): string { + return String(name || '') + .trim() + .toLowerCase() +} + +export function extractWorkflowNames(workflows: Array<{ name?: string | null }>): string[] { + return workflows + .map((workflow) => (typeof workflow?.name === 'string' ? workflow.name : null)) + .filter((name): name is string => Boolean(name)) +} diff --git a/apps/sim/lib/copilot/types.ts b/apps/sim/lib/copilot/types.ts index 68e097039..b9742f335 100644 --- a/apps/sim/lib/copilot/types.ts +++ b/apps/sim/lib/copilot/types.ts @@ -1,58 +1,4 @@ -/** - * Copilot Types - Consolidated from various locations - * This file contains all copilot-related type definitions - */ - -// Tool call state types (from apps/sim/types/tool-call.ts) -export interface ToolCallState { - id: string - name: string - displayName?: string - parameters?: Record - state: - | 'detecting' - | 'pending' - | 'executing' - | 'completed' - | 'error' - | 'rejected' - | 'applied' - | 'ready_for_review' - | 'aborted' - | 'skipped' - | 'background' - startTime?: number - endTime?: number - duration?: number - result?: any - error?: string - progress?: string -} - -export interface ToolCallGroup { - id: string - toolCalls: ToolCallState[] - status: 'pending' | 'in_progress' | 'completed' | 'error' - startTime?: number - endTime?: number - summary?: string -} - -export interface InlineContent { - type: 'text' | 'tool_call' - content: string - toolCall?: ToolCallState -} - -export interface ParsedMessageContent { - textContent: string - toolCalls: ToolCallState[] - toolGroups: ToolCallGroup[] - inlineContent?: InlineContent[] -} - import type { ProviderId } from '@/providers/types' -// Copilot Tools Type Definitions (from workspace copilot lib) import type { CopilotToolCall, ToolState } from '@/stores/panel' export type NotificationStatus = @@ -63,82 +9,10 @@ export type NotificationStatus = | 'rejected' | 'background' -// Export the consolidated types export type { CopilotToolCall, ToolState } -// Display configuration for different states -export interface StateDisplayConfig { - displayName: string - icon?: string - className?: string -} - -// Complete display configuration for a tool -export interface ToolDisplayConfig { - states: { - [K in ToolState]?: StateDisplayConfig - } - getDynamicDisplayName?: (state: ToolState, params: Record) => string | null -} - -// Schema for tool parameters (OpenAI function calling format) -export interface ToolSchema { - name: string - description: string - parameters?: { - type: 'object' - properties: Record - required?: string[] - } -} - -// Tool metadata - all the static configuration -export interface ToolMetadata { - id: string - displayConfig: ToolDisplayConfig - schema: ToolSchema - requiresInterrupt: boolean - allowBackgroundExecution?: boolean - stateMessages?: Partial> -} - -// Result from executing a tool -export interface ToolExecuteResult { - success: boolean - data?: any - error?: string -} - -// Response from the confirmation API -export interface ToolConfirmResponse { - success: boolean - message?: string -} - -// Options for tool execution -export interface ToolExecutionOptions { - onStateChange?: (state: ToolState) => void - beforeExecute?: () => Promise - afterExecute?: (result: ToolExecuteResult) => Promise - context?: Record -} - -// The main tool interface that all tools must implement -export interface Tool { - metadata: ToolMetadata - execute(toolCall: CopilotToolCall, options?: ToolExecutionOptions): Promise - getDisplayName(toolCall: CopilotToolCall): string - getIcon(toolCall: CopilotToolCall): string - handleUserAction( - toolCall: CopilotToolCall, - action: 'run' | 'skip' | 'background', - options?: ToolExecutionOptions - ): Promise - requiresConfirmation(toolCall: CopilotToolCall): boolean -} - -// Provider configuration for Sim Agent requests -// This type is only for the `provider` field in requests sent to the Sim Agent +// Provider configuration for Sim Agent requests. +// This type is only for the `provider` field in requests sent to the Sim Agent. export type CopilotProviderConfig = | { provider: 'azure-openai' diff --git a/apps/sim/lib/core/config/env.ts b/apps/sim/lib/core/config/env.ts index 685cf0e9d..0299ade0e 100644 --- a/apps/sim/lib/core/config/env.ts +++ b/apps/sim/lib/core/config/env.ts @@ -35,6 +35,11 @@ export const env = createEnv({ SIM_AGENT_API_URL: z.string().url().optional(), // URL for internal sim agent API AGENT_INDEXER_URL: z.string().url().optional(), // URL for agent training data indexer AGENT_INDEXER_API_KEY: z.string().min(1).optional(), // API key for agent indexer authentication + COPILOT_STREAM_TTL_SECONDS: z.number().optional(), // Redis TTL for copilot SSE buffer + COPILOT_STREAM_EVENT_LIMIT: z.number().optional(), // Max events retained per stream + COPILOT_STREAM_RESERVE_BATCH: z.number().optional(), // Event ID reservation batch size + COPILOT_STREAM_FLUSH_INTERVAL_MS: z.number().optional(), // Buffer flush interval in ms + COPILOT_STREAM_FLUSH_MAX_BATCH: z.number().optional(), // Max events per flush batch // Database & Storage REDIS_URL: z.string().url().optional(), // Redis connection string for caching/sessions diff --git a/apps/sim/lib/logs/execution/logger.ts b/apps/sim/lib/logs/execution/logger.ts index 0fc47fa73..3c8fa4224 100644 --- a/apps/sim/lib/logs/execution/logger.ts +++ b/apps/sim/lib/logs/execution/logger.ts @@ -34,6 +34,7 @@ import type { WorkflowState, } from '@/lib/logs/types' import { getWorkspaceBilledAccountUserId } from '@/lib/workspaces/utils' +import type { SerializableExecutionState } from '@/executor/execution/types' export interface ToolCall { name: string @@ -188,6 +189,7 @@ export class ExecutionLogger implements IExecutionLoggerService { finalOutput: BlockOutputData traceSpans?: TraceSpan[] workflowInput?: any + executionState?: SerializableExecutionState isResume?: boolean level?: 'info' | 'error' status?: 'completed' | 'failed' | 'cancelled' | 'pending' @@ -200,6 +202,7 @@ export class ExecutionLogger implements IExecutionLoggerService { finalOutput, traceSpans, workflowInput, + executionState, isResume, level: levelOverride, status: statusOverride, @@ -287,6 +290,7 @@ export class ExecutionLogger implements IExecutionLoggerService { total: executionCost.tokens.total, }, models: executionCost.models, + ...(executionState ? { executionState } : {}), }, cost: executionCost, }) diff --git a/apps/sim/lib/logs/execution/logging-session.ts b/apps/sim/lib/logs/execution/logging-session.ts index be1515686..9ab710dc1 100644 --- a/apps/sim/lib/logs/execution/logging-session.ts +++ b/apps/sim/lib/logs/execution/logging-session.ts @@ -17,6 +17,7 @@ import type { TraceSpan, WorkflowState, } from '@/lib/logs/types' +import type { SerializableExecutionState } from '@/executor/execution/types' const logger = createLogger('LoggingSession') @@ -35,6 +36,7 @@ export interface SessionCompleteParams { finalOutput?: any traceSpans?: TraceSpan[] workflowInput?: any + executionState?: SerializableExecutionState } export interface SessionErrorCompleteParams { @@ -269,7 +271,8 @@ export class LoggingSession { return } - const { endedAt, totalDurationMs, finalOutput, traceSpans, workflowInput } = params + const { endedAt, totalDurationMs, finalOutput, traceSpans, workflowInput, executionState } = + params try { const costSummary = calculateCostSummary(traceSpans || []) @@ -284,6 +287,7 @@ export class LoggingSession { finalOutput: finalOutput || {}, traceSpans: traceSpans || [], workflowInput, + executionState, isResume: this.isResume, }) diff --git a/apps/sim/lib/logs/types.ts b/apps/sim/lib/logs/types.ts index 8d65fbd55..1b93e64e1 100644 --- a/apps/sim/lib/logs/types.ts +++ b/apps/sim/lib/logs/types.ts @@ -1,4 +1,5 @@ import type { Edge } from 'reactflow' +import type { SerializableExecutionState } from '@/executor/execution/types' import type { BlockLog, NormalizedBlockOutput } from '@/executor/types' import type { DeploymentStatus } from '@/stores/workflows/registry/types' import type { Loop, Parallel, WorkflowState } from '@/stores/workflows/workflow/types' @@ -111,6 +112,7 @@ export interface WorkflowExecutionLog { tokens?: { input?: number; output?: number; total?: number } } > + executionState?: SerializableExecutionState finalOutput?: any errorDetails?: { blockId: string diff --git a/apps/sim/lib/mcp/client.test.ts b/apps/sim/lib/mcp/client.test.ts new file mode 100644 index 000000000..386769d41 --- /dev/null +++ b/apps/sim/lib/mcp/client.test.ts @@ -0,0 +1,109 @@ +/** + * @vitest-environment node + */ +import { loggerMock } from '@sim/testing' +import { beforeEach, describe, expect, it, vi } from 'vitest' + +vi.mock('@sim/logger', () => loggerMock) + +/** + * Capture the notification handler registered via `client.setNotificationHandler()`. + * This lets us simulate the MCP SDK delivering a `tools/list_changed` notification. + */ +let capturedNotificationHandler: (() => Promise) | null = null + +vi.mock('@modelcontextprotocol/sdk/client/index.js', () => ({ + Client: vi.fn().mockImplementation(() => ({ + connect: vi.fn().mockResolvedValue(undefined), + close: vi.fn().mockResolvedValue(undefined), + getServerVersion: vi.fn().mockReturnValue('2025-06-18'), + getServerCapabilities: vi.fn().mockReturnValue({ tools: { listChanged: true } }), + setNotificationHandler: vi + .fn() + .mockImplementation((_schema: unknown, handler: () => Promise) => { + capturedNotificationHandler = handler + }), + listTools: vi.fn().mockResolvedValue({ tools: [] }), + })), +})) + +vi.mock('@modelcontextprotocol/sdk/client/streamableHttp.js', () => ({ + StreamableHTTPClientTransport: vi.fn().mockImplementation(() => ({ + onclose: null, + sessionId: 'test-session', + })), +})) + +vi.mock('@modelcontextprotocol/sdk/types.js', () => ({ + ToolListChangedNotificationSchema: { method: 'notifications/tools/list_changed' }, +})) + +vi.mock('@/lib/core/execution-limits', () => ({ + getMaxExecutionTimeout: vi.fn().mockReturnValue(30000), +})) + +import { McpClient } from './client' +import type { McpServerConfig } from './types' + +function createConfig(): McpServerConfig { + return { + id: 'server-1', + name: 'Test Server', + transport: 'streamable-http', + url: 'https://test.example.com/mcp', + } +} + +describe('McpClient notification handler', () => { + beforeEach(() => { + capturedNotificationHandler = null + }) + + it('fires onToolsChanged when a notification arrives while connected', async () => { + const onToolsChanged = vi.fn() + + const client = new McpClient({ + config: createConfig(), + securityPolicy: { requireConsent: false, auditLevel: 'basic' }, + onToolsChanged, + }) + + await client.connect() + + expect(capturedNotificationHandler).not.toBeNull() + + await capturedNotificationHandler!() + + expect(onToolsChanged).toHaveBeenCalledTimes(1) + expect(onToolsChanged).toHaveBeenCalledWith('server-1') + }) + + it('suppresses notifications after disconnect', async () => { + const onToolsChanged = vi.fn() + + const client = new McpClient({ + config: createConfig(), + securityPolicy: { requireConsent: false, auditLevel: 'basic' }, + onToolsChanged, + }) + + await client.connect() + expect(capturedNotificationHandler).not.toBeNull() + + await client.disconnect() + await capturedNotificationHandler!() + + expect(onToolsChanged).not.toHaveBeenCalled() + }) + + it('does not register a notification handler when onToolsChanged is not provided', async () => { + const client = new McpClient({ + config: createConfig(), + securityPolicy: { requireConsent: false, auditLevel: 'basic' }, + }) + + await client.connect() + + expect(capturedNotificationHandler).toBeNull() + }) +}) diff --git a/apps/sim/lib/mcp/client.ts b/apps/sim/lib/mcp/client.ts index 56375613f..f26adf33b 100644 --- a/apps/sim/lib/mcp/client.ts +++ b/apps/sim/lib/mcp/client.ts @@ -10,10 +10,15 @@ import { Client } from '@modelcontextprotocol/sdk/client/index.js' import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js' -import type { ListToolsResult, Tool } from '@modelcontextprotocol/sdk/types.js' +import { + type ListToolsResult, + type Tool, + ToolListChangedNotificationSchema, +} from '@modelcontextprotocol/sdk/types.js' import { createLogger } from '@sim/logger' import { getMaxExecutionTimeout } from '@/lib/core/execution-limits' import { + type McpClientOptions, McpConnectionError, type McpConnectionStatus, type McpConsentRequest, @@ -24,6 +29,7 @@ import { type McpTool, type McpToolCall, type McpToolResult, + type McpToolsChangedCallback, type McpVersionInfo, } from '@/lib/mcp/types' @@ -35,6 +41,7 @@ export class McpClient { private config: McpServerConfig private connectionStatus: McpConnectionStatus private securityPolicy: McpSecurityPolicy + private onToolsChanged?: McpToolsChangedCallback private isConnected = false private static readonly SUPPORTED_VERSIONS = [ @@ -44,23 +51,36 @@ export class McpClient { ] /** - * Creates a new MCP client + * Creates a new MCP client. * - * No session ID parameter (we disconnect after each operation). - * The SDK handles session management automatically via Mcp-Session-Id header. - * - * @param config - Server configuration - * @param securityPolicy - Optional security policy + * Accepts either the legacy (config, securityPolicy?) signature + * or a single McpClientOptions object with an optional onToolsChanged callback. */ - constructor(config: McpServerConfig, securityPolicy?: McpSecurityPolicy) { - this.config = config - this.connectionStatus = { connected: false } - this.securityPolicy = securityPolicy ?? { - requireConsent: true, - auditLevel: 'basic', - maxToolExecutionsPerHour: 1000, + constructor(config: McpServerConfig, securityPolicy?: McpSecurityPolicy) + constructor(options: McpClientOptions) + constructor( + configOrOptions: McpServerConfig | McpClientOptions, + securityPolicy?: McpSecurityPolicy + ) { + if ('config' in configOrOptions) { + this.config = configOrOptions.config + this.securityPolicy = configOrOptions.securityPolicy ?? { + requireConsent: true, + auditLevel: 'basic', + maxToolExecutionsPerHour: 1000, + } + this.onToolsChanged = configOrOptions.onToolsChanged + } else { + this.config = configOrOptions + this.securityPolicy = securityPolicy ?? { + requireConsent: true, + auditLevel: 'basic', + maxToolExecutionsPerHour: 1000, + } } + this.connectionStatus = { connected: false } + if (!this.config.url) { throw new McpError('URL required for Streamable HTTP transport') } @@ -79,16 +99,15 @@ export class McpClient { { capabilities: { tools: {}, - // Resources and prompts can be added later - // resources: {}, - // prompts: {}, }, } ) } /** - * Initialize connection to MCP server + * Initialize connection to MCP server. + * If an `onToolsChanged` callback was provided, registers a notification handler + * for `notifications/tools/list_changed` after connecting. */ async connect(): Promise { logger.info(`Connecting to MCP server: ${this.config.name} (${this.config.transport})`) @@ -100,6 +119,15 @@ export class McpClient { this.connectionStatus.connected = true this.connectionStatus.lastConnected = new Date() + if (this.onToolsChanged) { + this.client.setNotificationHandler(ToolListChangedNotificationSchema, async () => { + if (!this.isConnected) return + logger.info(`[${this.config.name}] Received tools/list_changed notification`) + this.onToolsChanged?.(this.config.id) + }) + logger.info(`[${this.config.name}] Registered tools/list_changed notification handler`) + } + const serverVersion = this.client.getServerVersion() logger.info(`Successfully connected to MCP server: ${this.config.name}`, { protocolVersion: serverVersion, @@ -241,6 +269,28 @@ export class McpClient { return !!serverCapabilities?.[capability] } + /** + * Check if the server declared `capabilities.tools.listChanged: true` during initialization. + */ + hasListChangedCapability(): boolean { + const caps = this.client.getServerCapabilities() + const toolsCap = caps?.tools as Record | undefined + return !!toolsCap?.listChanged + } + + /** + * Register a callback to be invoked when the underlying transport closes. + * Used by the connection manager for reconnection logic. + * Chains with the SDK's internal onclose handler so it still performs its cleanup. + */ + onClose(callback: () => void): void { + const existingHandler = this.transport.onclose + this.transport.onclose = () => { + existingHandler?.() + callback() + } + } + /** * Get server configuration */ diff --git a/apps/sim/lib/mcp/connection-manager.test.ts b/apps/sim/lib/mcp/connection-manager.test.ts new file mode 100644 index 000000000..4badbdde5 --- /dev/null +++ b/apps/sim/lib/mcp/connection-manager.test.ts @@ -0,0 +1,180 @@ +/** + * @vitest-environment node + */ +import { loggerMock } from '@sim/testing' +import { afterEach, describe, expect, it, vi } from 'vitest' + +interface MockMcpClient { + connect: ReturnType + disconnect: ReturnType + hasListChangedCapability: ReturnType + onClose: ReturnType +} + +/** Deferred promise to control when `client.connect()` resolves. */ +function createDeferred() { + let resolve!: (value: T) => void + const promise = new Promise((res) => { + resolve = res + }) + return { promise, resolve } +} + +function serverConfig(id: string, name = `Server ${id}`) { + return { + id, + name, + transport: 'streamable-http' as const, + url: `https://${id}.example.com/mcp`, + } +} + +/** Shared setup: resets modules and applies base mocks. */ +function setupBaseMocks() { + vi.resetModules() + vi.doMock('@sim/logger', () => loggerMock) + vi.doMock('@/lib/core/config/feature-flags', () => ({ isTest: false })) + vi.doMock('@/lib/mcp/pubsub', () => ({ + mcpPubSub: { onToolsChanged: vi.fn(() => vi.fn()), publishToolsChanged: vi.fn() }, + })) +} + +describe('McpConnectionManager', () => { + let manager: { + connect: (...args: unknown[]) => Promise<{ supportsListChanged: boolean }> + dispose: () => void + } | null = null + + afterEach(() => { + manager?.dispose() + manager = null + }) + + describe('concurrent connect() guard', () => { + it('creates only one client when two connect() calls race for the same serverId', async () => { + setupBaseMocks() + + const deferred = createDeferred() + const instances: MockMcpClient[] = [] + + vi.doMock('./client', () => ({ + McpClient: vi.fn().mockImplementation(() => { + const instance: MockMcpClient = { + connect: vi.fn().mockImplementation(() => deferred.promise), + disconnect: vi.fn().mockResolvedValue(undefined), + hasListChangedCapability: vi.fn().mockReturnValue(true), + onClose: vi.fn(), + } + instances.push(instance) + return instance + }), + })) + + const { mcpConnectionManager: mgr } = await import('./connection-manager') + manager = mgr + + const config = serverConfig('server-1') + + const p1 = mgr.connect(config, 'user-1', 'ws-1') + const p2 = mgr.connect(config, 'user-1', 'ws-1') + + deferred.resolve() + const [r1, r2] = await Promise.all([p1, p2]) + + expect(instances).toHaveLength(1) + expect(r1.supportsListChanged).toBe(true) + expect(r2.supportsListChanged).toBe(false) + }) + + it('allows a new connect() after a previous one completes', async () => { + setupBaseMocks() + + const instances: MockMcpClient[] = [] + + vi.doMock('./client', () => ({ + McpClient: vi.fn().mockImplementation(() => { + const instance: MockMcpClient = { + connect: vi.fn().mockResolvedValue(undefined), + disconnect: vi.fn().mockResolvedValue(undefined), + hasListChangedCapability: vi.fn().mockReturnValue(false), + onClose: vi.fn(), + } + instances.push(instance) + return instance + }), + })) + + const { mcpConnectionManager: mgr } = await import('./connection-manager') + manager = mgr + + const config = serverConfig('server-2') + + const r1 = await mgr.connect(config, 'user-1', 'ws-1') + expect(r1.supportsListChanged).toBe(false) + + const r2 = await mgr.connect(config, 'user-1', 'ws-1') + expect(r2.supportsListChanged).toBe(false) + + expect(instances).toHaveLength(2) + }) + + it('cleans up connectingServers when connect() throws', async () => { + setupBaseMocks() + + let callCount = 0 + const instances: MockMcpClient[] = [] + + vi.doMock('./client', () => ({ + McpClient: vi.fn().mockImplementation(() => { + callCount++ + const instance: MockMcpClient = { + connect: + callCount === 1 + ? vi.fn().mockRejectedValue(new Error('Connection refused')) + : vi.fn().mockResolvedValue(undefined), + disconnect: vi.fn().mockResolvedValue(undefined), + hasListChangedCapability: vi.fn().mockReturnValue(true), + onClose: vi.fn(), + } + instances.push(instance) + return instance + }), + })) + + const { mcpConnectionManager: mgr } = await import('./connection-manager') + manager = mgr + + const config = serverConfig('server-3') + + const r1 = await mgr.connect(config, 'user-1', 'ws-1') + expect(r1.supportsListChanged).toBe(false) + + const r2 = await mgr.connect(config, 'user-1', 'ws-1') + expect(r2.supportsListChanged).toBe(true) + expect(instances).toHaveLength(2) + }) + }) + + describe('dispose', () => { + it('rejects new connections after dispose', async () => { + setupBaseMocks() + + vi.doMock('./client', () => ({ + McpClient: vi.fn().mockImplementation(() => ({ + connect: vi.fn().mockResolvedValue(undefined), + disconnect: vi.fn().mockResolvedValue(undefined), + hasListChangedCapability: vi.fn().mockReturnValue(true), + onClose: vi.fn(), + })), + })) + + const { mcpConnectionManager: mgr } = await import('./connection-manager') + manager = mgr + + mgr.dispose() + + const result = await mgr.connect(serverConfig('server-4'), 'user-1', 'ws-1') + expect(result.supportsListChanged).toBe(false) + }) + }) +}) diff --git a/apps/sim/lib/mcp/connection-manager.ts b/apps/sim/lib/mcp/connection-manager.ts new file mode 100644 index 000000000..d25670e9d --- /dev/null +++ b/apps/sim/lib/mcp/connection-manager.ts @@ -0,0 +1,395 @@ +/** + * MCP Connection Manager + * + * Maintains persistent connections to MCP servers that support + * `notifications/tools/list_changed`. When a notification arrives, + * the manager invalidates the tools cache and emits a ToolsChangedEvent + * so the frontend SSE endpoint can push updates to browsers. + * + * Servers that do not support `listChanged` fall back to the existing + * stale-time cache approach — no persistent connection is kept. + */ + +import { createLogger } from '@sim/logger' +import { isTest } from '@/lib/core/config/feature-flags' +import { McpClient } from '@/lib/mcp/client' +import { mcpPubSub } from '@/lib/mcp/pubsub' +import type { + ManagedConnectionState, + McpServerConfig, + McpToolsChangedCallback, + ToolsChangedEvent, +} from '@/lib/mcp/types' + +const logger = createLogger('McpConnectionManager') + +const MAX_CONNECTIONS = 50 +const MAX_RECONNECT_ATTEMPTS = 10 +const BASE_RECONNECT_DELAY_MS = 1000 +const IDLE_TIMEOUT_MS = 30 * 60 * 1000 // 30 minutes +const IDLE_CHECK_INTERVAL_MS = 5 * 60 * 1000 // 5 minutes + +type ToolsChangedListener = (event: ToolsChangedEvent) => void + +class McpConnectionManager { + private connections = new Map() + private states = new Map() + private reconnectTimers = new Map>() + private listeners = new Set() + private connectingServers = new Set() + private idleCheckTimer: ReturnType | null = null + private disposed = false + private unsubscribePubSub?: () => void + + constructor() { + if (mcpPubSub) { + this.unsubscribePubSub = mcpPubSub.onToolsChanged((event) => { + this.notifyLocalListeners(event) + }) + } + } + + /** + * Subscribe to tools-changed events from any managed connection. + * Returns an unsubscribe function. + */ + subscribe(listener: ToolsChangedListener): () => void { + this.listeners.add(listener) + return () => { + this.listeners.delete(listener) + } + } + + /** + * Establish a persistent connection to an MCP server. + * If the server supports `listChanged`, the connection is kept alive + * and notifications are forwarded to subscribers. + * + * If the server does NOT support `listChanged`, the client is disconnected + * immediately — there's nothing to listen for. + */ + async connect( + config: McpServerConfig, + userId: string, + workspaceId: string + ): Promise<{ supportsListChanged: boolean }> { + if (this.disposed) { + logger.warn('Connection manager is disposed, ignoring connect request') + return { supportsListChanged: false } + } + + const serverId = config.id + + if (this.connections.has(serverId) || this.connectingServers.has(serverId)) { + logger.info(`[${config.name}] Already has a managed connection or is connecting, skipping`) + const state = this.states.get(serverId) + return { supportsListChanged: state?.supportsListChanged ?? false } + } + + if (this.connections.size >= MAX_CONNECTIONS) { + logger.warn(`Max connections (${MAX_CONNECTIONS}) reached, cannot connect to ${config.name}`) + return { supportsListChanged: false } + } + + this.connectingServers.add(serverId) + + try { + const onToolsChanged: McpToolsChangedCallback = (sid) => { + this.handleToolsChanged(sid) + } + + const client = new McpClient({ + config, + securityPolicy: { + requireConsent: false, + auditLevel: 'basic', + maxToolExecutionsPerHour: 1000, + }, + onToolsChanged, + }) + + try { + await client.connect() + } catch (error) { + logger.error(`[${config.name}] Failed to connect for persistent monitoring:`, error) + return { supportsListChanged: false } + } + + const supportsListChanged = client.hasListChangedCapability() + + if (!supportsListChanged) { + logger.info( + `[${config.name}] Server does not support listChanged — disconnecting (fallback to cache)` + ) + await client.disconnect() + return { supportsListChanged: false } + } + + this.clearReconnectTimer(serverId) + + this.connections.set(serverId, client) + this.states.set(serverId, { + serverId, + serverName: config.name, + workspaceId, + userId, + connected: true, + supportsListChanged: true, + reconnectAttempts: 0, + lastActivity: Date.now(), + }) + + client.onClose(() => { + this.handleDisconnect(config, userId, workspaceId) + }) + + this.ensureIdleCheck() + + logger.info(`[${config.name}] Persistent connection established (listChanged supported)`) + return { supportsListChanged: true } + } finally { + this.connectingServers.delete(serverId) + } + } + + /** + * Disconnect a managed connection. + */ + async disconnect(serverId: string): Promise { + this.clearReconnectTimer(serverId) + + const client = this.connections.get(serverId) + if (client) { + try { + await client.disconnect() + } catch (error) { + logger.warn(`Error disconnecting managed client ${serverId}:`, error) + } + this.connections.delete(serverId) + } + + this.states.delete(serverId) + logger.info(`Managed connection removed: ${serverId}`) + } + + /** + * Check whether a managed connection exists for the given server. + */ + hasConnection(serverId: string): boolean { + return this.connections.has(serverId) + } + + /** + * Get connection state for a server. + */ + getState(serverId: string): ManagedConnectionState | undefined { + return this.states.get(serverId) + } + + /** + * Get all managed connection states (for diagnostics). + */ + getAllStates(): ManagedConnectionState[] { + return [...this.states.values()] + } + + /** + * Dispose all connections and timers. + */ + dispose(): void { + this.disposed = true + + this.unsubscribePubSub?.() + + for (const timer of this.reconnectTimers.values()) { + clearTimeout(timer) + } + this.reconnectTimers.clear() + + if (this.idleCheckTimer) { + clearInterval(this.idleCheckTimer) + this.idleCheckTimer = null + } + + const disconnects = [...this.connections.entries()].map(async ([id, client]) => { + try { + await client.disconnect() + } catch (error) { + logger.warn(`Error disconnecting ${id} during dispose:`, error) + } + }) + + Promise.allSettled(disconnects).then(() => { + logger.info('Connection manager disposed') + }) + + this.connections.clear() + this.states.clear() + this.listeners.clear() + this.connectingServers.clear() + } + + /** + * Notify only process-local listeners. + * Called by the pub/sub subscription (receives events from all processes). + */ + private notifyLocalListeners(event: ToolsChangedEvent): void { + for (const listener of this.listeners) { + try { + listener(event) + } catch (error) { + logger.error('Error in tools-changed listener:', error) + } + } + } + + /** + * Handle a tools/list_changed notification from an external MCP server. + * Publishes to pub/sub so all processes are notified. + */ + private handleToolsChanged(serverId: string): void { + const state = this.states.get(serverId) + if (!state) return + + state.lastActivity = Date.now() + + const event: ToolsChangedEvent = { + serverId, + serverName: state.serverName, + workspaceId: state.workspaceId, + timestamp: Date.now(), + } + + logger.info(`[${state.serverName}] Tools changed — publishing to pub/sub`) + + mcpPubSub?.publishToolsChanged(event) + } + + private handleDisconnect(config: McpServerConfig, userId: string, workspaceId: string): void { + const serverId = config.id + const state = this.states.get(serverId) + + if (!state || this.disposed) return + + state.connected = false + this.connections.delete(serverId) + + logger.warn(`[${config.name}] Persistent connection lost, scheduling reconnect`) + + this.scheduleReconnect(config, userId, workspaceId) + } + + private scheduleReconnect(config: McpServerConfig, userId: string, workspaceId: string): void { + const serverId = config.id + const state = this.states.get(serverId) + + if (!state || this.disposed) return + + if (state.reconnectAttempts >= MAX_RECONNECT_ATTEMPTS) { + logger.error( + `[${config.name}] Max reconnect attempts (${MAX_RECONNECT_ATTEMPTS}) reached — giving up` + ) + this.states.delete(serverId) + return + } + + const delay = Math.min(BASE_RECONNECT_DELAY_MS * 2 ** state.reconnectAttempts, 60_000) + state.reconnectAttempts++ + + logger.info( + `[${config.name}] Reconnecting in ${delay}ms (attempt ${state.reconnectAttempts}/${MAX_RECONNECT_ATTEMPTS})` + ) + + this.clearReconnectTimer(serverId) + + const timer = setTimeout(async () => { + this.reconnectTimers.delete(serverId) + + if (this.disposed) return + + const currentState = this.states.get(serverId) + if (currentState?.connected) { + logger.info( + `[${config.name}] Connection already re-established externally, skipping reconnect` + ) + return + } + + const attempts = state.reconnectAttempts + this.connections.delete(serverId) + this.states.delete(serverId) + + try { + const result = await this.connect(config, userId, workspaceId) + if (result.supportsListChanged) { + logger.info(`[${config.name}] Reconnected successfully`) + } else { + this.restoreReconnectState(config, userId, workspaceId, attempts) + this.scheduleReconnect(config, userId, workspaceId) + } + } catch (error) { + logger.error(`[${config.name}] Reconnect failed:`, error) + this.restoreReconnectState(config, userId, workspaceId, attempts) + this.scheduleReconnect(config, userId, workspaceId) + } + }, delay) + + this.reconnectTimers.set(serverId, timer) + } + + private clearReconnectTimer(serverId: string): void { + const timer = this.reconnectTimers.get(serverId) + if (timer) { + clearTimeout(timer) + this.reconnectTimers.delete(serverId) + } + } + + /** + * Restore minimal state so `scheduleReconnect` can check attempts and continue the retry loop. + */ + private restoreReconnectState( + config: McpServerConfig, + userId: string, + workspaceId: string, + reconnectAttempts: number + ): void { + if (!this.states.has(config.id)) { + this.states.set(config.id, { + serverId: config.id, + serverName: config.name, + workspaceId, + userId, + connected: false, + supportsListChanged: false, + reconnectAttempts, + lastActivity: Date.now(), + }) + } + } + + private ensureIdleCheck(): void { + if (this.idleCheckTimer) return + + this.idleCheckTimer = setInterval(() => { + const now = Date.now() + for (const [serverId, state] of this.states) { + if (now - state.lastActivity > IDLE_TIMEOUT_MS) { + logger.info( + `[${state.serverName}] Idle timeout reached, disconnecting managed connection` + ) + this.disconnect(serverId) + } + } + + if (this.states.size === 0 && this.idleCheckTimer) { + clearInterval(this.idleCheckTimer) + this.idleCheckTimer = null + } + }, IDLE_CHECK_INTERVAL_MS) + } +} + +export const mcpConnectionManager = isTest + ? (null as unknown as McpConnectionManager) + : new McpConnectionManager() diff --git a/apps/sim/lib/mcp/oauth-discovery.ts b/apps/sim/lib/mcp/oauth-discovery.ts new file mode 100644 index 000000000..445d4e51d --- /dev/null +++ b/apps/sim/lib/mcp/oauth-discovery.ts @@ -0,0 +1,59 @@ +import { type NextRequest, NextResponse } from 'next/server' + +function getOrigin(request: NextRequest): string { + return request.nextUrl.origin +} + +export function createMcpAuthorizationServerMetadataResponse(request: NextRequest): NextResponse { + const origin = getOrigin(request) + const resource = `${origin}/api/mcp/copilot` + + return NextResponse.json( + { + issuer: resource, + token_endpoint: `${origin}/api/auth/oauth/token`, + token_endpoint_auth_methods_supported: ['none'], + grant_types_supported: ['authorization_code', 'refresh_token'], + response_types_supported: ['code'], + code_challenge_methods_supported: ['S256'], + scopes_supported: ['mcp:tools'], + resource, + // Non-standard extension for API-key-only clients. + x_sim_auth: { + type: 'api_key', + header: 'x-api-key', + }, + }, + { + headers: { + 'Cache-Control': 'no-store', + }, + } + ) +} + +export function createMcpProtectedResourceMetadataResponse(request: NextRequest): NextResponse { + const origin = getOrigin(request) + const resource = `${origin}/api/mcp/copilot` + const authorizationServerIssuer = `${origin}/api/mcp/copilot` + + return NextResponse.json( + { + resource, + // RFC 9728 expects issuer identifiers here, not metadata URLs. + authorization_servers: [authorizationServerIssuer], + bearer_methods_supported: ['header'], + scopes_supported: ['mcp:tools'], + // Non-standard extension for API-key-only clients. + x_sim_auth: { + type: 'api_key', + header: 'x-api-key', + }, + }, + { + headers: { + 'Cache-Control': 'no-store', + }, + } + ) +} diff --git a/apps/sim/lib/mcp/pubsub.test.ts b/apps/sim/lib/mcp/pubsub.test.ts new file mode 100644 index 000000000..7f7373e3e --- /dev/null +++ b/apps/sim/lib/mcp/pubsub.test.ts @@ -0,0 +1,93 @@ +/** + * @vitest-environment node + */ +import { createMockRedis, loggerMock, type MockRedis } from '@sim/testing' +import { describe, expect, it, vi } from 'vitest' + +/** Extend the @sim/testing Redis mock with the methods RedisMcpPubSub uses. */ +function createPubSubRedis(): MockRedis & { removeAllListeners: ReturnType } { + const mock = createMockRedis() + // ioredis subscribe invokes a callback as the last argument + mock.subscribe.mockImplementation((...args: unknown[]) => { + const cb = args[args.length - 1] + if (typeof cb === 'function') (cb as (err: null) => void)(null) + }) + // on() returns `this` for chaining in ioredis + mock.on.mockReturnThis() + return { ...mock, removeAllListeners: vi.fn().mockReturnThis() } +} + +/** Shared setup: resets modules and applies base mocks. Returns the two Redis instances. */ +async function setupPubSub() { + const instances: ReturnType[] = [] + + vi.resetModules() + vi.doMock('@sim/logger', () => loggerMock) + vi.doMock('@/lib/core/config/env', () => ({ env: { REDIS_URL: 'redis://localhost:6379' } })) + vi.doMock('ioredis', () => ({ + default: vi.fn().mockImplementation(() => { + const instance = createPubSubRedis() + instances.push(instance) + return instance + }), + })) + + const { mcpPubSub } = await import('./pubsub') + const [pub, sub] = instances + + return { mcpPubSub, pub, sub, instances } +} + +describe('RedisMcpPubSub', () => { + it('creates two Redis clients (pub and sub)', async () => { + const { mcpPubSub, instances } = await setupPubSub() + + expect(instances).toHaveLength(2) + mcpPubSub.dispose() + }) + + it('registers error, connect, and message listeners', async () => { + const { mcpPubSub, pub, sub } = await setupPubSub() + + const pubEvents = pub.on.mock.calls.map((c: unknown[]) => c[0]) + const subEvents = sub.on.mock.calls.map((c: unknown[]) => c[0]) + + expect(pubEvents).toContain('error') + expect(pubEvents).toContain('connect') + expect(subEvents).toContain('error') + expect(subEvents).toContain('connect') + expect(subEvents).toContain('message') + + mcpPubSub.dispose() + }) + + describe('dispose', () => { + it('calls removeAllListeners on both pub and sub before quit', async () => { + const { mcpPubSub, pub, sub } = await setupPubSub() + + mcpPubSub.dispose() + + expect(pub.removeAllListeners).toHaveBeenCalledTimes(1) + expect(sub.removeAllListeners).toHaveBeenCalledTimes(1) + expect(sub.unsubscribe).toHaveBeenCalledTimes(1) + expect(pub.quit).toHaveBeenCalledTimes(1) + expect(sub.quit).toHaveBeenCalledTimes(1) + }) + + it('drops publish calls after dispose', async () => { + const { mcpPubSub, pub } = await setupPubSub() + + mcpPubSub.dispose() + pub.publish.mockClear() + + mcpPubSub.publishToolsChanged({ + serverId: 'srv-1', + serverName: 'Test', + workspaceId: 'ws-1', + timestamp: Date.now(), + }) + + expect(pub.publish).not.toHaveBeenCalled() + }) + }) +}) diff --git a/apps/sim/lib/mcp/pubsub.ts b/apps/sim/lib/mcp/pubsub.ts new file mode 100644 index 000000000..3451b54a3 --- /dev/null +++ b/apps/sim/lib/mcp/pubsub.ts @@ -0,0 +1,207 @@ +/** + * MCP Pub/Sub Adapter + * + * Broadcasts MCP notification events across processes using Redis Pub/Sub. + * Gracefully falls back to process-local EventEmitter when Redis is unavailable. + * + * Two channels: + * - `mcp:tools_changed` — external MCP server sent a listChanged notification + * (published by connection manager, consumed by events SSE endpoint) + * - `mcp:workflow_tools_changed` — workflow CRUD modified a workflow MCP server's tools + * (published by serve route, consumed by serve route on other processes to push to local SSE clients) + */ + +import { EventEmitter } from 'events' +import { createLogger } from '@sim/logger' +import Redis from 'ioredis' +import { env } from '@/lib/core/config/env' +import type { ToolsChangedEvent, WorkflowToolsChangedEvent } from '@/lib/mcp/types' + +const logger = createLogger('McpPubSub') + +const CHANNEL_TOOLS_CHANGED = 'mcp:tools_changed' +const CHANNEL_WORKFLOW_TOOLS_CHANGED = 'mcp:workflow_tools_changed' + +type ToolsChangedHandler = (event: ToolsChangedEvent) => void +type WorkflowToolsChangedHandler = (event: WorkflowToolsChangedEvent) => void + +interface McpPubSubAdapter { + publishToolsChanged(event: ToolsChangedEvent): void + publishWorkflowToolsChanged(event: WorkflowToolsChangedEvent): void + onToolsChanged(handler: ToolsChangedHandler): () => void + onWorkflowToolsChanged(handler: WorkflowToolsChangedHandler): () => void + dispose(): void +} + +/** + * Redis-backed pub/sub adapter. + * Uses dedicated pub and sub clients (ioredis requires separate connections for subscribers). + */ +class RedisMcpPubSub implements McpPubSubAdapter { + private pub: Redis + private sub: Redis + private toolsChangedHandlers = new Set() + private workflowToolsChangedHandlers = new Set() + private disposed = false + + constructor(redisUrl: string) { + const commonOpts = { + keepAlive: 1000, + connectTimeout: 10000, + maxRetriesPerRequest: null as unknown as number, + enableOfflineQueue: true, + retryStrategy: (times: number) => { + if (times > 10) return 30000 + return Math.min(times * 500, 5000) + }, + } + + this.pub = new Redis(redisUrl, { ...commonOpts, connectionName: 'mcp-pubsub-pub' }) + this.sub = new Redis(redisUrl, { ...commonOpts, connectionName: 'mcp-pubsub-sub' }) + + this.pub.on('error', (err) => logger.error('MCP pub/sub publish client error:', err.message)) + this.sub.on('error', (err) => logger.error('MCP pub/sub subscribe client error:', err.message)) + this.pub.on('connect', () => logger.info('MCP pub/sub publish client connected')) + this.sub.on('connect', () => logger.info('MCP pub/sub subscribe client connected')) + + this.sub.subscribe(CHANNEL_TOOLS_CHANGED, CHANNEL_WORKFLOW_TOOLS_CHANGED, (err) => { + if (err) { + logger.error('Failed to subscribe to MCP pub/sub channels:', err) + } else { + logger.info('Subscribed to MCP pub/sub channels') + } + }) + + this.sub.on('message', (channel: string, message: string) => { + try { + const parsed = JSON.parse(message) + if (channel === CHANNEL_TOOLS_CHANGED) { + for (const handler of this.toolsChangedHandlers) { + try { + handler(parsed as ToolsChangedEvent) + } catch (err) { + logger.error('Error in tools_changed handler:', err) + } + } + } else if (channel === CHANNEL_WORKFLOW_TOOLS_CHANGED) { + for (const handler of this.workflowToolsChangedHandlers) { + try { + handler(parsed as WorkflowToolsChangedEvent) + } catch (err) { + logger.error('Error in workflow_tools_changed handler:', err) + } + } + } + } catch (err) { + logger.error('Failed to parse pub/sub message:', err) + } + }) + } + + publishToolsChanged(event: ToolsChangedEvent): void { + if (this.disposed) return + this.pub.publish(CHANNEL_TOOLS_CHANGED, JSON.stringify(event)).catch((err) => { + logger.error('Failed to publish tools_changed:', err) + }) + } + + publishWorkflowToolsChanged(event: WorkflowToolsChangedEvent): void { + if (this.disposed) return + this.pub.publish(CHANNEL_WORKFLOW_TOOLS_CHANGED, JSON.stringify(event)).catch((err) => { + logger.error('Failed to publish workflow_tools_changed:', err) + }) + } + + onToolsChanged(handler: ToolsChangedHandler): () => void { + this.toolsChangedHandlers.add(handler) + return () => { + this.toolsChangedHandlers.delete(handler) + } + } + + onWorkflowToolsChanged(handler: WorkflowToolsChangedHandler): () => void { + this.workflowToolsChangedHandlers.add(handler) + return () => { + this.workflowToolsChangedHandlers.delete(handler) + } + } + + dispose(): void { + this.disposed = true + this.toolsChangedHandlers.clear() + this.workflowToolsChangedHandlers.clear() + + const noop = () => {} + this.pub.removeAllListeners() + this.sub.removeAllListeners() + this.pub.on('error', noop) + this.sub.on('error', noop) + + this.sub.unsubscribe().catch(noop) + this.pub.quit().catch(noop) + this.sub.quit().catch(noop) + logger.info('Redis MCP pub/sub disposed') + } +} + +/** + * Process-local fallback using EventEmitter. + * Used when Redis is not configured — notifications only reach listeners in the same process. + */ +class LocalMcpPubSub implements McpPubSubAdapter { + private emitter = new EventEmitter() + + constructor() { + this.emitter.setMaxListeners(100) + logger.info('MCP pub/sub: Using process-local EventEmitter (Redis not configured)') + } + + publishToolsChanged(event: ToolsChangedEvent): void { + this.emitter.emit(CHANNEL_TOOLS_CHANGED, event) + } + + publishWorkflowToolsChanged(event: WorkflowToolsChangedEvent): void { + this.emitter.emit(CHANNEL_WORKFLOW_TOOLS_CHANGED, event) + } + + onToolsChanged(handler: ToolsChangedHandler): () => void { + this.emitter.on(CHANNEL_TOOLS_CHANGED, handler) + return () => { + this.emitter.off(CHANNEL_TOOLS_CHANGED, handler) + } + } + + onWorkflowToolsChanged(handler: WorkflowToolsChangedHandler): () => void { + this.emitter.on(CHANNEL_WORKFLOW_TOOLS_CHANGED, handler) + return () => { + this.emitter.off(CHANNEL_WORKFLOW_TOOLS_CHANGED, handler) + } + } + + dispose(): void { + this.emitter.removeAllListeners() + logger.info('Local MCP pub/sub disposed') + } +} + +/** + * Create the appropriate pub/sub adapter based on Redis availability. + */ +function createMcpPubSub(): McpPubSubAdapter { + const redisUrl = env.REDIS_URL + + if (redisUrl) { + try { + logger.info('MCP pub/sub: Using Redis') + return new RedisMcpPubSub(redisUrl) + } catch (err) { + logger.error('Failed to create Redis pub/sub, falling back to local:', err) + return new LocalMcpPubSub() + } + } + + return new LocalMcpPubSub() +} + +export const mcpPubSub: McpPubSubAdapter = + typeof window !== 'undefined' ? (null as unknown as McpPubSubAdapter) : createMcpPubSub() diff --git a/apps/sim/lib/mcp/service.ts b/apps/sim/lib/mcp/service.ts index 64001b50c..e38cfb3f0 100644 --- a/apps/sim/lib/mcp/service.ts +++ b/apps/sim/lib/mcp/service.ts @@ -9,6 +9,7 @@ import { and, eq, isNull } from 'drizzle-orm' import { isTest } from '@/lib/core/config/feature-flags' import { generateRequestId } from '@/lib/core/utils/request' import { McpClient } from '@/lib/mcp/client' +import { mcpConnectionManager } from '@/lib/mcp/connection-manager' import { resolveMcpConfigEnvVars } from '@/lib/mcp/resolve-config' import { createMcpCacheAdapter, @@ -31,16 +32,24 @@ const logger = createLogger('McpService') class McpService { private cacheAdapter: McpCacheStorageAdapter private readonly cacheTimeout = MCP_CONSTANTS.CACHE_TIMEOUT + private unsubscribeConnectionManager?: () => void constructor() { this.cacheAdapter = createMcpCacheAdapter() logger.info(`MCP Service initialized with ${getMcpCacheType()} cache`) + + if (mcpConnectionManager) { + this.unsubscribeConnectionManager = mcpConnectionManager.subscribe((event) => { + this.clearCache(event.workspaceId) + }) + } } /** * Dispose of the service and cleanup resources */ dispose(): void { + this.unsubscribeConnectionManager?.() this.cacheAdapter.dispose() logger.info('MCP Service disposed') } @@ -328,7 +337,7 @@ class McpService { logger.debug( `[${requestId}] Discovered ${tools.length} tools from server ${config.name}` ) - return { serverId: config.id, tools } + return { serverId: config.id, tools, resolvedConfig } } finally { await client.disconnect() } @@ -364,6 +373,21 @@ class McpService { logger.error(`[${requestId}] Error updating server statuses:`, err) }) + // Fire-and-forget persistent connections for servers that support listChanged + if (mcpConnectionManager) { + for (const [index, result] of results.entries()) { + if (result.status === 'fulfilled') { + const { resolvedConfig } = result.value + mcpConnectionManager.connect(resolvedConfig, userId, workspaceId).catch((err) => { + logger.warn( + `[${requestId}] Persistent connection failed for ${servers[index].name}:`, + err + ) + }) + } + } + } + if (failedCount === 0) { try { await this.cacheAdapter.set(cacheKey, allTools, this.cacheTimeout) diff --git a/apps/sim/lib/mcp/types.ts b/apps/sim/lib/mcp/types.ts index f9e7948f0..b7e0d838e 100644 --- a/apps/sim/lib/mcp/types.ts +++ b/apps/sim/lib/mcp/types.ts @@ -147,6 +147,52 @@ export interface McpServerSummary { error?: string } +/** + * Callback invoked when an MCP server sends a `notifications/tools/list_changed` notification. + */ +export type McpToolsChangedCallback = (serverId: string) => void + +/** + * Options for creating an McpClient with notification support. + */ +export interface McpClientOptions { + config: McpServerConfig + securityPolicy?: McpSecurityPolicy + onToolsChanged?: McpToolsChangedCallback +} + +/** + * Event emitted by the connection manager when a server's tools change. + */ +export interface ToolsChangedEvent { + serverId: string + serverName: string + workspaceId: string + timestamp: number +} + +/** + * State of a managed persistent connection. + */ +export interface ManagedConnectionState { + serverId: string + serverName: string + workspaceId: string + userId: string + connected: boolean + supportsListChanged: boolean + reconnectAttempts: number + lastActivity: number +} + +/** + * Event emitted when workflow CRUD modifies a workflow MCP server's tools. + */ +export interface WorkflowToolsChangedEvent { + serverId: string + workspaceId: string +} + export interface McpApiResponse { success: boolean data?: T diff --git a/apps/sim/lib/webhooks/utils.server.ts b/apps/sim/lib/webhooks/utils.server.ts index 39371150c..2d7498627 100644 --- a/apps/sim/lib/webhooks/utils.server.ts +++ b/apps/sim/lib/webhooks/utils.server.ts @@ -527,17 +527,61 @@ export async function validateTwilioSignature( } } -const SLACK_FILE_HOSTS = new Set(['files.slack.com', 'files-pri.slack.com']) const SLACK_MAX_FILE_SIZE = 50 * 1024 * 1024 // 50 MB -const SLACK_MAX_FILES = 10 +const SLACK_MAX_FILES = 15 + +/** + * Resolves the full file object from the Slack API when the event payload + * only contains a partial file (e.g. missing url_private due to file_access restrictions). + * @see https://docs.slack.dev/reference/methods/files.info + */ +async function resolveSlackFileInfo( + fileId: string, + botToken: string +): Promise<{ url_private?: string; name?: string; mimetype?: string; size?: number } | null> { + try { + const response = await fetch( + `https://slack.com/api/files.info?file=${encodeURIComponent(fileId)}`, + { + headers: { Authorization: `Bearer ${botToken}` }, + } + ) + + const data = (await response.json()) as { + ok: boolean + error?: string + file?: Record + } + + if (!data.ok || !data.file) { + logger.warn('Slack files.info failed', { fileId, error: data.error }) + return null + } + + return { + url_private: data.file.url_private, + name: data.file.name, + mimetype: data.file.mimetype, + size: data.file.size, + } + } catch (error) { + logger.error('Error calling Slack files.info', { + fileId, + error: error instanceof Error ? error.message : String(error), + }) + return null + } +} /** * Downloads file attachments from Slack using the bot token. * Returns files in the format expected by WebhookAttachmentProcessor: * { name, data (base64 string), mimeType, size } * + * When the event payload contains partial file objects (missing url_private), + * falls back to the Slack files.info API to resolve the full file metadata. + * * Security: - * - Validates each url_private against allowlisted Slack file hosts * - Uses validateUrlWithDNS + secureFetchWithPinnedIP to prevent SSRF * - Enforces per-file size limit and max file count */ @@ -549,30 +593,31 @@ async function downloadSlackFiles( const downloaded: Array<{ name: string; data: string; mimeType: string; size: number }> = [] for (const file of filesToProcess) { - const urlPrivate = file.url_private as string | undefined + let urlPrivate = file.url_private as string | undefined + let fileName = file.name as string | undefined + let fileMimeType = file.mimetype as string | undefined + let fileSize = file.size as number | undefined + + // If url_private is missing, resolve via files.info API + if (!urlPrivate && file.id) { + const resolved = await resolveSlackFileInfo(file.id, botToken) + if (resolved?.url_private) { + urlPrivate = resolved.url_private + fileName = fileName || resolved.name + fileMimeType = fileMimeType || resolved.mimetype + fileSize = fileSize ?? resolved.size + } + } + if (!urlPrivate) { - continue - } - - // Validate the URL points to a known Slack file host - let parsedUrl: URL - try { - parsedUrl = new URL(urlPrivate) - } catch { - logger.warn('Slack file has invalid url_private, skipping', { fileId: file.id }) - continue - } - - if (!SLACK_FILE_HOSTS.has(parsedUrl.hostname)) { - logger.warn('Slack file url_private points to unexpected host, skipping', { + logger.warn('Slack file has no url_private and could not be resolved, skipping', { fileId: file.id, - hostname: sanitizeUrlForLog(urlPrivate), }) continue } // Skip files that exceed the size limit - const reportedSize = Number(file.size) || 0 + const reportedSize = Number(fileSize) || 0 if (reportedSize > SLACK_MAX_FILE_SIZE) { logger.warn('Slack file exceeds size limit, skipping', { fileId: file.id, @@ -618,9 +663,9 @@ async function downloadSlackFiles( } downloaded.push({ - name: file.name || 'download', + name: fileName || 'download', data: buffer.toString('base64'), - mimeType: file.mimetype || 'application/octet-stream', + mimeType: fileMimeType || 'application/octet-stream', size: buffer.length, }) } catch (error) { diff --git a/apps/sim/lib/workflows/executor/execute-workflow.ts b/apps/sim/lib/workflows/executor/execute-workflow.ts index 8edce5526..82813ce76 100644 --- a/apps/sim/lib/workflows/executor/execute-workflow.ts +++ b/apps/sim/lib/workflows/executor/execute-workflow.ts @@ -4,7 +4,7 @@ import { LoggingSession } from '@/lib/logs/execution/logging-session' import { executeWorkflowCore } from '@/lib/workflows/executor/execution-core' import { PauseResumeManager } from '@/lib/workflows/executor/human-in-the-loop-manager' import { ExecutionSnapshot } from '@/executor/execution/snapshot' -import type { ExecutionMetadata } from '@/executor/execution/types' +import type { ExecutionMetadata, SerializableExecutionState } from '@/executor/execution/types' import type { ExecutionResult, StreamingExecution } from '@/executor/types' const logger = createLogger('WorkflowExecution') @@ -20,6 +20,15 @@ export interface ExecuteWorkflowOptions { includeFileBase64?: boolean base64MaxBytes?: number abortSignal?: AbortSignal + /** Use the live/draft workflow state instead of the deployed state. Used by copilot. */ + useDraftState?: boolean + /** Stop execution after this block completes. Used for "run until block" feature. */ + stopAfterBlockId?: string + /** Run-from-block configuration using a prior execution snapshot. */ + runFromBlock?: { + startBlockId: string + sourceSnapshot: SerializableExecutionState + } } export interface WorkflowInfo { @@ -57,7 +66,7 @@ export async function executeWorkflow( userId: actorUserId, workflowUserId: workflow.userId, triggerType, - useDraftState: false, + useDraftState: streamConfig?.useDraftState ?? false, startTime: new Date().toISOString(), isClientSession: false, } @@ -84,6 +93,8 @@ export async function executeWorkflow( includeFileBase64: streamConfig?.includeFileBase64, base64MaxBytes: streamConfig?.base64MaxBytes, abortSignal: streamConfig?.abortSignal, + stopAfterBlockId: streamConfig?.stopAfterBlockId, + runFromBlock: streamConfig?.runFromBlock, }) if (result.status === 'paused') { diff --git a/apps/sim/lib/workflows/executor/execution-core.ts b/apps/sim/lib/workflows/executor/execution-core.ts index 60998d934..56926d627 100644 --- a/apps/sim/lib/workflows/executor/execution-core.ts +++ b/apps/sim/lib/workflows/executor/execution-core.ts @@ -400,6 +400,7 @@ export async function executeWorkflowCore( finalOutput: result.output || {}, traceSpans: traceSpans || [], workflowInput: processedInput, + executionState: result.executionState, }) await clearExecutionCancellation(executionId) diff --git a/apps/sim/lib/workflows/executor/execution-state.ts b/apps/sim/lib/workflows/executor/execution-state.ts new file mode 100644 index 000000000..490895a89 --- /dev/null +++ b/apps/sim/lib/workflows/executor/execution-state.ts @@ -0,0 +1,53 @@ +import { db } from '@sim/db' +import { workflowExecutionLogs } from '@sim/db/schema' +import { and, desc, eq, sql } from 'drizzle-orm' +import type { SerializableExecutionState } from '@/executor/execution/types' + +function isSerializableExecutionState(value: unknown): value is SerializableExecutionState { + if (!value || typeof value !== 'object') return false + const state = value as Record + return ( + typeof state.blockStates === 'object' && + Array.isArray(state.executedBlocks) && + Array.isArray(state.blockLogs) && + typeof state.decisions === 'object' && + Array.isArray(state.completedLoops) && + Array.isArray(state.activeExecutionPath) + ) +} + +function extractExecutionState(executionData: unknown): SerializableExecutionState | null { + if (!executionData || typeof executionData !== 'object') return null + const state = (executionData as Record).executionState + return isSerializableExecutionState(state) ? state : null +} + +export async function getExecutionState( + executionId: string +): Promise { + const [row] = await db + .select({ executionData: workflowExecutionLogs.executionData }) + .from(workflowExecutionLogs) + .where(eq(workflowExecutionLogs.executionId, executionId)) + .limit(1) + + return extractExecutionState(row?.executionData) +} + +export async function getLatestExecutionState( + workflowId: string +): Promise { + const [row] = await db + .select({ executionData: workflowExecutionLogs.executionData }) + .from(workflowExecutionLogs) + .where( + and( + eq(workflowExecutionLogs.workflowId, workflowId), + sql`${workflowExecutionLogs.executionData} -> 'executionState' IS NOT NULL` + ) + ) + .orderBy(desc(workflowExecutionLogs.startedAt)) + .limit(1) + + return extractExecutionState(row?.executionData) +} diff --git a/apps/sim/lib/workflows/sanitization/references.test.ts b/apps/sim/lib/workflows/sanitization/references.test.ts index 4aece4c77..83b861384 100644 --- a/apps/sim/lib/workflows/sanitization/references.test.ts +++ b/apps/sim/lib/workflows/sanitization/references.test.ts @@ -43,4 +43,13 @@ describe('isLikelyReferenceSegment', () => { it('should return false when leading content is not comparator characters', () => { expect(isLikelyReferenceSegment('')).toBe(false) }) + + it('should return true for references starting with a digit', () => { + expect(isLikelyReferenceSegment('<1password1>')).toBe(true) + expect(isLikelyReferenceSegment('<1password1.secret>')).toBe(true) + }) + + it('should return false for purely numeric references', () => { + expect(isLikelyReferenceSegment('<123>')).toBe(false) + }) }) diff --git a/apps/sim/lib/workflows/sanitization/references.ts b/apps/sim/lib/workflows/sanitization/references.ts index 2290f150f..8f30762df 100644 --- a/apps/sim/lib/workflows/sanitization/references.ts +++ b/apps/sim/lib/workflows/sanitization/references.ts @@ -70,7 +70,7 @@ export function isLikelyReferenceSegment(segment: string): boolean { if (INVALID_REFERENCE_CHARS.test(beforeDot) || INVALID_REFERENCE_CHARS.test(afterDot)) { return false } - } else if (INVALID_REFERENCE_CHARS.test(inner) || inner.match(/^\d/) || inner.match(/\s\d/)) { + } else if (INVALID_REFERENCE_CHARS.test(inner) || inner.match(/^\d+$/) || inner.match(/\s\d/)) { return false } diff --git a/apps/sim/lib/workflows/utils.ts b/apps/sim/lib/workflows/utils.ts index d17744af6..7f952510f 100644 --- a/apps/sim/lib/workflows/utils.ts +++ b/apps/sim/lib/workflows/utils.ts @@ -1,7 +1,7 @@ import { db } from '@sim/db' import { permissions, userStats, workflow as workflowTable } from '@sim/db/schema' import { createLogger } from '@sim/logger' -import { and, eq } from 'drizzle-orm' +import { and, asc, eq, inArray, or } from 'drizzle-orm' import { NextResponse } from 'next/server' import { getSession } from '@/lib/auth' import { getWorkspaceWithOwner, type PermissionType } from '@/lib/workspaces/permissions/utils' @@ -15,6 +15,53 @@ export async function getWorkflowById(id: string) { return rows[0] } +export async function resolveWorkflowIdForUser( + userId: string, + workflowId?: string, + workflowName?: string +): Promise<{ workflowId: string; workflowName?: string } | null> { + if (workflowId) { + return { workflowId } + } + + const workspaceIds = await db + .select({ entityId: permissions.entityId }) + .from(permissions) + .where(and(eq(permissions.userId, userId), eq(permissions.entityType, 'workspace'))) + + const workspaceIdList = workspaceIds.map((row) => row.entityId) + + const workflowConditions = [eq(workflowTable.userId, userId)] + if (workspaceIdList.length > 0) { + workflowConditions.push(inArray(workflowTable.workspaceId, workspaceIdList)) + } + + const workflows = await db + .select() + .from(workflowTable) + .where(or(...workflowConditions)) + .orderBy(asc(workflowTable.sortOrder), asc(workflowTable.createdAt), asc(workflowTable.id)) + + if (workflows.length === 0) { + return null + } + + if (workflowName) { + const match = workflows.find( + (w) => + String(w.name || '') + .trim() + .toLowerCase() === workflowName.toLowerCase() + ) + if (match) { + return { workflowId: match.id, workflowName: match.name || undefined } + } + return null + } + + return { workflowId: workflows[0].id, workflowName: workflows[0].name || undefined } +} + type WorkflowRecord = ReturnType extends Promise ? NonNullable : never diff --git a/apps/sim/next.config.ts b/apps/sim/next.config.ts index bbeb57c94..0a9ed16cd 100644 --- a/apps/sim/next.config.ts +++ b/apps/sim/next.config.ts @@ -77,6 +77,7 @@ const nextConfig: NextConfig = { resolveExtensions: ['.tsx', '.ts', '.jsx', '.js', '.mjs', '.json'], }, serverExternalPackages: [ + '@1password/sdk', 'unpdf', 'ffmpeg-static', 'fluent-ffmpeg', diff --git a/apps/sim/package.json b/apps/sim/package.json index 6dcc55980..73a0b0b06 100644 --- a/apps/sim/package.json +++ b/apps/sim/package.json @@ -23,6 +23,7 @@ "generate-docs": "bun run ../../scripts/generate-docs.ts" }, "dependencies": { + "@1password/sdk": "0.3.1", "@a2a-js/sdk": "0.3.7", "@anthropic-ai/sdk": "0.71.2", "@aws-sdk/client-bedrock-runtime": "3.940.0", diff --git a/apps/sim/proxy.ts b/apps/sim/proxy.ts index 773700a75..c90df2eec 100644 --- a/apps/sim/proxy.ts +++ b/apps/sim/proxy.ts @@ -100,11 +100,17 @@ function handleWorkspaceInvitationAPI( */ function handleSecurityFiltering(request: NextRequest): NextResponse | null { const userAgent = request.headers.get('user-agent') || '' - const isWebhookEndpoint = request.nextUrl.pathname.startsWith('/api/webhooks/trigger/') + const { pathname } = request.nextUrl + const isWebhookEndpoint = pathname.startsWith('/api/webhooks/trigger/') + const isMcpEndpoint = pathname.startsWith('/api/mcp/') + const isMcpOauthDiscoveryEndpoint = + pathname.startsWith('/.well-known/oauth-authorization-server') || + pathname.startsWith('/.well-known/oauth-protected-resource') const isSuspicious = SUSPICIOUS_UA_PATTERNS.some((pattern) => pattern.test(userAgent)) - // Block suspicious requests, but exempt webhook endpoints from User-Agent validation - if (isSuspicious && !isWebhookEndpoint) { + // Block suspicious requests, but exempt machine-to-machine endpoints that may + // legitimately omit User-Agent headers (webhooks and MCP protocol discovery/calls). + if (isSuspicious && !isWebhookEndpoint && !isMcpEndpoint && !isMcpOauthDiscoveryEndpoint) { logger.warn('Blocked suspicious request', { userAgent, ip: request.headers.get('x-forwarded-for') || 'unknown', diff --git a/apps/sim/stores/panel/copilot/store.ts b/apps/sim/stores/panel/copilot/store.ts index e368d412e..1dd8540ee 100644 --- a/apps/sim/stores/panel/copilot/store.ts +++ b/apps/sim/stores/panel/copilot/store.ts @@ -4,2342 +4,920 @@ import { createLogger } from '@sim/logger' import { create } from 'zustand' import { devtools } from 'zustand/middleware' import { type CopilotChat, sendStreamingMessage } from '@/lib/copilot/api' -import type { CopilotTransportMode } from '@/lib/copilot/models' -import type { - BaseClientToolMetadata, - ClientToolDisplay, -} from '@/lib/copilot/tools/client/base-tool' -import { ClientToolCallState } from '@/lib/copilot/tools/client/base-tool' -import { GetBlockConfigClientTool } from '@/lib/copilot/tools/client/blocks/get-block-config' -import { GetBlockOptionsClientTool } from '@/lib/copilot/tools/client/blocks/get-block-options' -import { GetBlocksAndToolsClientTool } from '@/lib/copilot/tools/client/blocks/get-blocks-and-tools' -import { GetBlocksMetadataClientTool } from '@/lib/copilot/tools/client/blocks/get-blocks-metadata' -import { GetTriggerBlocksClientTool } from '@/lib/copilot/tools/client/blocks/get-trigger-blocks' -import { GetExamplesRagClientTool } from '@/lib/copilot/tools/client/examples/get-examples-rag' -import { GetOperationsExamplesClientTool } from '@/lib/copilot/tools/client/examples/get-operations-examples' -import { GetTriggerExamplesClientTool } from '@/lib/copilot/tools/client/examples/get-trigger-examples' -import { SummarizeClientTool } from '@/lib/copilot/tools/client/examples/summarize' -import { KnowledgeBaseClientTool } from '@/lib/copilot/tools/client/knowledge/knowledge-base' +import { applySseEvent, sseHandlers } from '@/lib/copilot/client-sse' import { - getClientTool, - registerClientTool, - registerToolStateSync, -} from '@/lib/copilot/tools/client/manager' -import { NavigateUIClientTool } from '@/lib/copilot/tools/client/navigation/navigate-ui' -import { AuthClientTool } from '@/lib/copilot/tools/client/other/auth' -import { CheckoffTodoClientTool } from '@/lib/copilot/tools/client/other/checkoff-todo' -import { CrawlWebsiteClientTool } from '@/lib/copilot/tools/client/other/crawl-website' -import { CustomToolClientTool } from '@/lib/copilot/tools/client/other/custom-tool' -import { DebugClientTool } from '@/lib/copilot/tools/client/other/debug' -import { DeployClientTool } from '@/lib/copilot/tools/client/other/deploy' -import { EditClientTool } from '@/lib/copilot/tools/client/other/edit' -import { EvaluateClientTool } from '@/lib/copilot/tools/client/other/evaluate' -import { GetPageContentsClientTool } from '@/lib/copilot/tools/client/other/get-page-contents' -import { InfoClientTool } from '@/lib/copilot/tools/client/other/info' -import { KnowledgeClientTool } from '@/lib/copilot/tools/client/other/knowledge' -import { MakeApiRequestClientTool } from '@/lib/copilot/tools/client/other/make-api-request' -import { MarkTodoInProgressClientTool } from '@/lib/copilot/tools/client/other/mark-todo-in-progress' -import { OAuthRequestAccessClientTool } from '@/lib/copilot/tools/client/other/oauth-request-access' -import { PlanClientTool } from '@/lib/copilot/tools/client/other/plan' -import { RememberDebugClientTool } from '@/lib/copilot/tools/client/other/remember-debug' -import { ResearchClientTool } from '@/lib/copilot/tools/client/other/research' -import { ScrapePageClientTool } from '@/lib/copilot/tools/client/other/scrape-page' -import { SearchDocumentationClientTool } from '@/lib/copilot/tools/client/other/search-documentation' -import { SearchErrorsClientTool } from '@/lib/copilot/tools/client/other/search-errors' -import { SearchLibraryDocsClientTool } from '@/lib/copilot/tools/client/other/search-library-docs' -import { SearchOnlineClientTool } from '@/lib/copilot/tools/client/other/search-online' -import { SearchPatternsClientTool } from '@/lib/copilot/tools/client/other/search-patterns' -import { SleepClientTool } from '@/lib/copilot/tools/client/other/sleep' -import { TestClientTool } from '@/lib/copilot/tools/client/other/test' -import { TourClientTool } from '@/lib/copilot/tools/client/other/tour' -import { WorkflowClientTool } from '@/lib/copilot/tools/client/other/workflow' -import { createExecutionContext, getTool } from '@/lib/copilot/tools/client/registry' -import { GetCredentialsClientTool } from '@/lib/copilot/tools/client/user/get-credentials' -import { SetEnvironmentVariablesClientTool } from '@/lib/copilot/tools/client/user/set-environment-variables' -import { CheckDeploymentStatusClientTool } from '@/lib/copilot/tools/client/workflow/check-deployment-status' -import { CreateWorkspaceMcpServerClientTool } from '@/lib/copilot/tools/client/workflow/create-workspace-mcp-server' -import { DeployApiClientTool } from '@/lib/copilot/tools/client/workflow/deploy-api' -import { DeployChatClientTool } from '@/lib/copilot/tools/client/workflow/deploy-chat' -import { DeployMcpClientTool } from '@/lib/copilot/tools/client/workflow/deploy-mcp' -import { EditWorkflowClientTool } from '@/lib/copilot/tools/client/workflow/edit-workflow' -import { GetBlockOutputsClientTool } from '@/lib/copilot/tools/client/workflow/get-block-outputs' -import { GetBlockUpstreamReferencesClientTool } from '@/lib/copilot/tools/client/workflow/get-block-upstream-references' -import { GetUserWorkflowClientTool } from '@/lib/copilot/tools/client/workflow/get-user-workflow' -import { GetWorkflowConsoleClientTool } from '@/lib/copilot/tools/client/workflow/get-workflow-console' -import { GetWorkflowDataClientTool } from '@/lib/copilot/tools/client/workflow/get-workflow-data' -import { GetWorkflowFromNameClientTool } from '@/lib/copilot/tools/client/workflow/get-workflow-from-name' -import { ListUserWorkflowsClientTool } from '@/lib/copilot/tools/client/workflow/list-user-workflows' -import { ListWorkspaceMcpServersClientTool } from '@/lib/copilot/tools/client/workflow/list-workspace-mcp-servers' -import { ManageCustomToolClientTool } from '@/lib/copilot/tools/client/workflow/manage-custom-tool' -import { ManageMcpToolClientTool } from '@/lib/copilot/tools/client/workflow/manage-mcp-tool' -import { RedeployClientTool } from '@/lib/copilot/tools/client/workflow/redeploy' -import { RunWorkflowClientTool } from '@/lib/copilot/tools/client/workflow/run-workflow' -import { SetGlobalWorkflowVariablesClientTool } from '@/lib/copilot/tools/client/workflow/set-global-workflow-variables' + appendContinueOption, + appendContinueOptionBlock, + createErrorMessage, + createStreamingMessage, + createUserMessage, + finalizeThinkingBlock, + stripContinueOption, + stripContinueOptionFromBlocks, +} from '@/lib/copilot/client-sse/content-blocks' +import { flushStreamingUpdates, stopStreamingUpdates } from '@/lib/copilot/client-sse/handlers' +import type { ClientContentBlock, ClientStreamingContext } from '@/lib/copilot/client-sse/types' +import { + COPILOT_AUTO_ALLOWED_TOOLS_API_PATH, + COPILOT_CHAT_API_PATH, + COPILOT_CHAT_STREAM_API_PATH, + COPILOT_CHECKPOINTS_API_PATH, + COPILOT_CHECKPOINTS_REVERT_API_PATH, + COPILOT_CONFIRM_API_PATH, + COPILOT_CREDENTIALS_API_PATH, + COPILOT_DELETE_CHAT_API_PATH, + MAX_RESUME_ATTEMPTS, + OPTIMISTIC_TITLE_MAX_LENGTH, + QUEUE_PROCESS_DELAY_MS, + STREAM_STORAGE_KEY, + STREAM_TIMEOUT_MS, + SUBSCRIPTION_INVALIDATE_DELAY_MS, +} from '@/lib/copilot/constants' +import { + buildCheckpointWorkflowState, + buildToolCallsById, + normalizeMessagesForUI, + persistMessages, + saveMessageCheckpoint, +} from '@/lib/copilot/messages' +import type { CopilotTransportMode } from '@/lib/copilot/models' +import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser' +import { + abortAllInProgressTools, + cleanupActiveState, + isRejectedState, + resolveToolDisplay, + stripTodoTags, +} from '@/lib/copilot/store-utils' +import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' import { getQueryClient } from '@/app/_shell/providers/query-provider' import { subscriptionKeys } from '@/hooks/queries/subscription' import type { ChatContext, + CheckpointEntry, CopilotMessage, CopilotStore, + CopilotStreamInfo, CopilotToolCall, MessageFileAttachment, } from '@/stores/panel/copilot/types' import { useWorkflowDiffStore } from '@/stores/workflow-diff/store' import { useSubBlockStore } from '@/stores/workflows/subblock/store' -import { mergeSubblockState } from '@/stores/workflows/utils' import { useWorkflowStore } from '@/stores/workflows/workflow/store' import type { WorkflowState } from '@/stores/workflows/workflow/types' const logger = createLogger('CopilotStore') -// On module load, clear any lingering diff preview (fresh page refresh) -try { - const diffStore = useWorkflowDiffStore.getState() - if (diffStore?.hasActiveDiff) { - diffStore.clearDiff() - } -} catch {} - -// Known class-based client tools: map tool name -> instantiator -const CLIENT_TOOL_INSTANTIATORS: Record any> = { - plan: (id) => new PlanClientTool(id), - edit: (id) => new EditClientTool(id), - debug: (id) => new DebugClientTool(id), - test: (id) => new TestClientTool(id), - deploy: (id) => new DeployClientTool(id), - evaluate: (id) => new EvaluateClientTool(id), - auth: (id) => new AuthClientTool(id), - research: (id) => new ResearchClientTool(id), - knowledge: (id) => new KnowledgeClientTool(id), - custom_tool: (id) => new CustomToolClientTool(id), - tour: (id) => new TourClientTool(id), - info: (id) => new InfoClientTool(id), - workflow: (id) => new WorkflowClientTool(id), - run_workflow: (id) => new RunWorkflowClientTool(id), - get_workflow_console: (id) => new GetWorkflowConsoleClientTool(id), - get_blocks_and_tools: (id) => new GetBlocksAndToolsClientTool(id), - get_blocks_metadata: (id) => new GetBlocksMetadataClientTool(id), - get_block_options: (id) => new GetBlockOptionsClientTool(id), - get_block_config: (id) => new GetBlockConfigClientTool(id), - get_trigger_blocks: (id) => new GetTriggerBlocksClientTool(id), - search_online: (id) => new SearchOnlineClientTool(id), - search_documentation: (id) => new SearchDocumentationClientTool(id), - search_library_docs: (id) => new SearchLibraryDocsClientTool(id), - search_patterns: (id) => new SearchPatternsClientTool(id), - search_errors: (id) => new SearchErrorsClientTool(id), - scrape_page: (id) => new ScrapePageClientTool(id), - get_page_contents: (id) => new GetPageContentsClientTool(id), - crawl_website: (id) => new CrawlWebsiteClientTool(id), - remember_debug: (id) => new RememberDebugClientTool(id), - set_environment_variables: (id) => new SetEnvironmentVariablesClientTool(id), - get_credentials: (id) => new GetCredentialsClientTool(id), - knowledge_base: (id) => new KnowledgeBaseClientTool(id), - make_api_request: (id) => new MakeApiRequestClientTool(id), - checkoff_todo: (id) => new CheckoffTodoClientTool(id), - mark_todo_in_progress: (id) => new MarkTodoInProgressClientTool(id), - oauth_request_access: (id) => new OAuthRequestAccessClientTool(id), - edit_workflow: (id) => new EditWorkflowClientTool(id), - get_user_workflow: (id) => new GetUserWorkflowClientTool(id), - list_user_workflows: (id) => new ListUserWorkflowsClientTool(id), - get_workflow_from_name: (id) => new GetWorkflowFromNameClientTool(id), - get_workflow_data: (id) => new GetWorkflowDataClientTool(id), - set_global_workflow_variables: (id) => new SetGlobalWorkflowVariablesClientTool(id), - get_trigger_examples: (id) => new GetTriggerExamplesClientTool(id), - get_examples_rag: (id) => new GetExamplesRagClientTool(id), - get_operations_examples: (id) => new GetOperationsExamplesClientTool(id), - summarize_conversation: (id) => new SummarizeClientTool(id), - deploy_api: (id) => new DeployApiClientTool(id), - deploy_chat: (id) => new DeployChatClientTool(id), - deploy_mcp: (id) => new DeployMcpClientTool(id), - redeploy: (id) => new RedeployClientTool(id), - list_workspace_mcp_servers: (id) => new ListWorkspaceMcpServersClientTool(id), - create_workspace_mcp_server: (id) => new CreateWorkspaceMcpServerClientTool(id), - check_deployment_status: (id) => new CheckDeploymentStatusClientTool(id), - navigate_ui: (id) => new NavigateUIClientTool(id), - manage_custom_tool: (id) => new ManageCustomToolClientTool(id), - manage_mcp_tool: (id) => new ManageMcpToolClientTool(id), - sleep: (id) => new SleepClientTool(id), - get_block_outputs: (id) => new GetBlockOutputsClientTool(id), - get_block_upstream_references: (id) => new GetBlockUpstreamReferencesClientTool(id), +/** + * Flag set on beforeunload to suppress continue option during page refresh/close. + * Initialized once when the store module loads. + */ +let _isPageUnloading = false +if (typeof window !== 'undefined') { + window.addEventListener('beforeunload', () => { + _isPageUnloading = true + }) +} +function isPageUnloading(): boolean { + return _isPageUnloading } -// Read-only static metadata for class-based tools (no instances) -export const CLASS_TOOL_METADATA: Record = { - plan: (PlanClientTool as any)?.metadata, - edit: (EditClientTool as any)?.metadata, - debug: (DebugClientTool as any)?.metadata, - test: (TestClientTool as any)?.metadata, - deploy: (DeployClientTool as any)?.metadata, - evaluate: (EvaluateClientTool as any)?.metadata, - auth: (AuthClientTool as any)?.metadata, - research: (ResearchClientTool as any)?.metadata, - knowledge: (KnowledgeClientTool as any)?.metadata, - custom_tool: (CustomToolClientTool as any)?.metadata, - tour: (TourClientTool as any)?.metadata, - info: (InfoClientTool as any)?.metadata, - workflow: (WorkflowClientTool as any)?.metadata, - run_workflow: (RunWorkflowClientTool as any)?.metadata, - get_workflow_console: (GetWorkflowConsoleClientTool as any)?.metadata, - get_blocks_and_tools: (GetBlocksAndToolsClientTool as any)?.metadata, - get_blocks_metadata: (GetBlocksMetadataClientTool as any)?.metadata, - get_block_options: (GetBlockOptionsClientTool as any)?.metadata, - get_block_config: (GetBlockConfigClientTool as any)?.metadata, - get_trigger_blocks: (GetTriggerBlocksClientTool as any)?.metadata, - search_online: (SearchOnlineClientTool as any)?.metadata, - search_documentation: (SearchDocumentationClientTool as any)?.metadata, - search_library_docs: (SearchLibraryDocsClientTool as any)?.metadata, - search_patterns: (SearchPatternsClientTool as any)?.metadata, - search_errors: (SearchErrorsClientTool as any)?.metadata, - scrape_page: (ScrapePageClientTool as any)?.metadata, - get_page_contents: (GetPageContentsClientTool as any)?.metadata, - crawl_website: (CrawlWebsiteClientTool as any)?.metadata, - remember_debug: (RememberDebugClientTool as any)?.metadata, - set_environment_variables: (SetEnvironmentVariablesClientTool as any)?.metadata, - get_credentials: (GetCredentialsClientTool as any)?.metadata, - knowledge_base: (KnowledgeBaseClientTool as any)?.metadata, - make_api_request: (MakeApiRequestClientTool as any)?.metadata, - checkoff_todo: (CheckoffTodoClientTool as any)?.metadata, - mark_todo_in_progress: (MarkTodoInProgressClientTool as any)?.metadata, - edit_workflow: (EditWorkflowClientTool as any)?.metadata, - get_user_workflow: (GetUserWorkflowClientTool as any)?.metadata, - list_user_workflows: (ListUserWorkflowsClientTool as any)?.metadata, - get_workflow_from_name: (GetWorkflowFromNameClientTool as any)?.metadata, - get_workflow_data: (GetWorkflowDataClientTool as any)?.metadata, - set_global_workflow_variables: (SetGlobalWorkflowVariablesClientTool as any)?.metadata, - get_trigger_examples: (GetTriggerExamplesClientTool as any)?.metadata, - get_examples_rag: (GetExamplesRagClientTool as any)?.metadata, - oauth_request_access: (OAuthRequestAccessClientTool as any)?.metadata, - get_operations_examples: (GetOperationsExamplesClientTool as any)?.metadata, - summarize_conversation: (SummarizeClientTool as any)?.metadata, - deploy_api: (DeployApiClientTool as any)?.metadata, - deploy_chat: (DeployChatClientTool as any)?.metadata, - deploy_mcp: (DeployMcpClientTool as any)?.metadata, - redeploy: (RedeployClientTool as any)?.metadata, - list_workspace_mcp_servers: (ListWorkspaceMcpServersClientTool as any)?.metadata, - create_workspace_mcp_server: (CreateWorkspaceMcpServerClientTool as any)?.metadata, - check_deployment_status: (CheckDeploymentStatusClientTool as any)?.metadata, - navigate_ui: (NavigateUIClientTool as any)?.metadata, - manage_custom_tool: (ManageCustomToolClientTool as any)?.metadata, - manage_mcp_tool: (ManageMcpToolClientTool as any)?.metadata, - sleep: (SleepClientTool as any)?.metadata, - get_block_outputs: (GetBlockOutputsClientTool as any)?.metadata, - get_block_upstream_references: (GetBlockUpstreamReferencesClientTool as any)?.metadata, -} - -function ensureClientToolInstance(toolName: string | undefined, toolCallId: string | undefined) { +function readActiveStreamFromStorage(): CopilotStreamInfo | null { + if (typeof window === 'undefined') return null try { - if (!toolName || !toolCallId) return - if (getClientTool(toolCallId)) return - const make = CLIENT_TOOL_INSTANTIATORS[toolName] - if (make) { - const inst = make(toolCallId) - registerClientTool(toolCallId, inst) - } - } catch {} + const raw = window.sessionStorage.getItem(STREAM_STORAGE_KEY) + logger.debug('[Copilot] Reading stream from storage', { + hasRaw: !!raw, + rawPreview: raw ? raw.substring(0, 100) : null, + }) + if (!raw) return null + const parsed = JSON.parse(raw) as CopilotStreamInfo + return parsed?.streamId ? parsed : null + } catch (e) { + logger.warn('[Copilot] Failed to read stream from storage', { error: String(e) }) + return null + } +} + +function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void { + if (typeof window === 'undefined') return + try { + if (!info) { + logger.debug('[Copilot] Clearing stream from storage', { + isPageUnloading: isPageUnloading(), + stack: new Error().stack?.split('\n').slice(1, 4).join(' <- '), + }) + window.sessionStorage.removeItem(STREAM_STORAGE_KEY) + return + } + const payload = JSON.stringify(info) + window.sessionStorage.setItem(STREAM_STORAGE_KEY, payload) + const verified = window.sessionStorage.getItem(STREAM_STORAGE_KEY) === payload + logger.debug('[Copilot] Writing stream to storage', { + streamId: info.streamId, + lastEventId: info.lastEventId, + userMessageContent: info.userMessageContent?.slice(0, 30), + verified, + }) + } catch (e) { + logger.error('[Copilot] Failed to write stream to storage', { error: String(e) }) + } +} + +function updateActiveStreamEventId( + get: () => CopilotStore, + set: (next: Partial) => void, + streamId: string, + eventId: number +): void { + const current = get().activeStream + if (!current || current.streamId !== streamId) return + if (eventId <= (current.lastEventId || 0)) return + const next = { ...current, lastEventId: eventId } + set({ activeStream: next }) + writeActiveStreamToStorage(next) +} + +const AUTO_ALLOWED_TOOLS_STORAGE_KEY = 'copilot_auto_allowed_tools' + +function readAutoAllowedToolsFromStorage(): string[] | null { + if (typeof window === 'undefined') return null + try { + const raw = window.localStorage.getItem(AUTO_ALLOWED_TOOLS_STORAGE_KEY) + if (!raw) return null + const parsed = JSON.parse(raw) + if (!Array.isArray(parsed)) return null + return parsed.filter((item): item is string => typeof item === 'string') + } catch (error) { + logger.warn('[AutoAllowedTools] Failed to read local cache', { + error: error instanceof Error ? error.message : String(error), + }) + return null + } +} + +function writeAutoAllowedToolsToStorage(tools: string[]): void { + if (typeof window === 'undefined') return + try { + window.localStorage.setItem(AUTO_ALLOWED_TOOLS_STORAGE_KEY, JSON.stringify(tools)) + } catch (error) { + logger.warn('[AutoAllowedTools] Failed to write local cache', { + error: error instanceof Error ? error.message : String(error), + }) + } +} + +function isToolAutoAllowedByList(toolId: string, autoAllowedTools: string[]): boolean { + if (!toolId) return false + const normalizedTarget = toolId.trim() + return autoAllowedTools.some((allowed) => allowed?.trim() === normalizedTarget) +} + +/** + * Clear any lingering diff preview from a previous session. + * Called lazily when the store is first activated (setWorkflowId). + */ +let _initialDiffCleared = false +function clearInitialDiffIfNeeded(): void { + if (_initialDiffCleared) return + _initialDiffCleared = true + try { + const diffStore = useWorkflowDiffStore.getState() + if (diffStore?.hasActiveDiff) { + diffStore.clearDiff() + } + } catch (error) { + logger.warn('[Copilot] Failed to clear initial diff state', { + error: error instanceof Error ? error.message : String(error), + }) + } } -// Constants const TEXT_BLOCK_TYPE = 'text' -const THINKING_BLOCK_TYPE = 'thinking' -const DATA_PREFIX = 'data: ' -const DATA_PREFIX_LENGTH = 6 const CONTINUE_OPTIONS_TAG = '{"1":"Continue"}' -// Resolve display text/icon for a tool based on its state -function resolveToolDisplay( - toolName: string | undefined, - state: ClientToolCallState, - toolCallId?: string, - params?: Record -): ClientToolDisplay | undefined { - try { - if (!toolName) return undefined - const def = getTool(toolName) as any - const toolMetadata = def?.metadata || CLASS_TOOL_METADATA[toolName] - const meta = toolMetadata?.displayNames || {} +function cloneContentBlocks(blocks: ClientContentBlock[]): ClientContentBlock[] { + if (!Array.isArray(blocks)) return [] + return blocks.map((block) => (block ? { ...block } : block)) +} - // Exact state first - const ds = meta?.[state] - if (ds?.text || ds?.icon) { - // Check if tool has a dynamic text formatter - const getDynamicText = toolMetadata?.getDynamicText - if (getDynamicText && params) { - try { - const dynamicText = getDynamicText(params, state) - if (dynamicText) { - return { text: dynamicText, icon: ds.icon } - } - } catch (e) { - // Fall back to static text if formatter fails - } +function extractTextFromBlocks(blocks: ClientContentBlock[]): string { + if (!Array.isArray(blocks)) return '' + return blocks + .filter((block) => block?.type === TEXT_BLOCK_TYPE && typeof block.content === 'string') + .map((block) => block.content) + .join('') +} + +function appendTextToBlocks(blocks: ClientContentBlock[], text: string): ClientContentBlock[] { + const nextBlocks = cloneContentBlocks(blocks) + if (!text) return nextBlocks + const lastIndex = nextBlocks.length - 1 + const lastBlock = nextBlocks[lastIndex] + if (lastBlock?.type === TEXT_BLOCK_TYPE) { + const current = typeof lastBlock.content === 'string' ? lastBlock.content : '' + nextBlocks[lastIndex] = { ...lastBlock, content: current + text } + return nextBlocks + } + nextBlocks.push({ type: TEXT_BLOCK_TYPE, content: text, timestamp: Date.now() }) + return nextBlocks +} + +function findLastTextBlock(blocks: ClientContentBlock[]): ClientContentBlock | null { + if (!Array.isArray(blocks) || blocks.length === 0) return null + const lastBlock = blocks[blocks.length - 1] + return lastBlock?.type === TEXT_BLOCK_TYPE ? lastBlock : null +} + +function replaceTextBlocks(blocks: ClientContentBlock[], text: string): ClientContentBlock[] { + const next: ClientContentBlock[] = [] + let inserted = false + for (const block of blocks ?? []) { + if (block?.type === TEXT_BLOCK_TYPE) { + if (!inserted && text) { + next.push({ type: TEXT_BLOCK_TYPE, content: text, timestamp: Date.now() }) + inserted = true } - return { text: ds.text, icon: ds.icon } + continue } - - // Fallback order (prefer pre-execution states for unknown states like pending) - const fallbackOrder: ClientToolCallState[] = [ - (ClientToolCallState as any).generating, - (ClientToolCallState as any).executing, - (ClientToolCallState as any).review, - (ClientToolCallState as any).success, - (ClientToolCallState as any).error, - (ClientToolCallState as any).rejected, - ] - for (const key of fallbackOrder) { - const cand = meta?.[key] - if (cand?.text || cand?.icon) return { text: cand.text, icon: cand.icon } - } - } catch {} - // Humanized fallback as last resort - include state verb for proper verb-noun styling - try { - if (toolName) { - const formattedName = toolName.replace(/_/g, ' ').replace(/\b\w/g, (c) => c.toUpperCase()) - // Add state verb prefix for verb-noun rendering in tool-call component - let stateVerb: string - switch (state) { - case ClientToolCallState.pending: - case ClientToolCallState.executing: - stateVerb = 'Executing' - break - case ClientToolCallState.success: - stateVerb = 'Executed' - break - case ClientToolCallState.error: - stateVerb = 'Failed' - break - case ClientToolCallState.rejected: - case ClientToolCallState.aborted: - stateVerb = 'Skipped' - break - default: - stateVerb = 'Executing' - } - return { text: `${stateVerb} ${formattedName}`, icon: undefined as any } - } - } catch {} - return undefined + next.push(block ? { ...block } : block) + } + if (!inserted && text) { + next.push({ type: TEXT_BLOCK_TYPE, content: text, timestamp: Date.now() }) + } + return next } -// Helper: check if a tool state is rejected -function isRejectedState(state: any): boolean { - try { - return state === 'rejected' || state === (ClientToolCallState as any).rejected - } catch { - return state === 'rejected' +function createClientStreamingContext(messageId: string): ClientStreamingContext { + return { + messageId, + accumulatedContent: '', + contentBlocks: [], + currentTextBlock: null, + isInThinkingBlock: false, + currentThinkingBlock: null, + isInDesignWorkflowBlock: false, + designWorkflowContent: '', + pendingContent: '', + doneEventCount: 0, + subAgentContent: {}, + subAgentToolCalls: {}, + subAgentBlocks: {}, } } -// Helper: check if a tool state is review (terminal for build/edit preview) -function isReviewState(state: any): boolean { - try { - return state === 'review' || state === (ClientToolCallState as any).review - } catch { - return state === 'review' - } -} +type CopilotSet = ( + partial: Partial | ((state: CopilotStore) => Partial) +) => void -// Helper: check if a tool state is background (terminal) -function isBackgroundState(state: any): boolean { - try { - return state === 'background' || state === (ClientToolCallState as any).background - } catch { - return state === 'background' - } -} +type CopilotGet = () => CopilotStore -/** - * Checks if a tool call state is terminal (success, error, rejected, aborted, review, or background) - */ -function isTerminalState(state: any): boolean { - return ( - state === ClientToolCallState.success || - state === ClientToolCallState.error || - state === ClientToolCallState.rejected || - state === ClientToolCallState.aborted || - isReviewState(state) || - isBackgroundState(state) - ) -} - -// Helper: abort all in-progress client tools and update inline blocks -function abortAllInProgressTools(set: any, get: () => CopilotStore) { - try { - const { toolCallsById, messages } = get() - const updatedMap = { ...toolCallsById } - const abortedIds = new Set() - let hasUpdates = false - for (const [id, tc] of Object.entries(toolCallsById)) { - const st = tc.state as any - // Abort anything not already terminal success/error/rejected/aborted - const isTerminal = - st === ClientToolCallState.success || - st === ClientToolCallState.error || - st === ClientToolCallState.rejected || - st === ClientToolCallState.aborted - if (!isTerminal || isReviewState(st)) { - abortedIds.add(id) - updatedMap[id] = { - ...tc, - state: ClientToolCallState.aborted, - subAgentStreaming: false, - display: resolveToolDisplay(tc.name, ClientToolCallState.aborted, id, (tc as any).params), - } - hasUpdates = true - } else if (tc.subAgentStreaming) { - updatedMap[id] = { - ...tc, - subAgentStreaming: false, - } - hasUpdates = true - } - } - if (abortedIds.size > 0 || hasUpdates) { - set({ toolCallsById: updatedMap }) - // Update inline blocks in-place for the latest assistant message only (most relevant) - set((s: CopilotStore) => { - const msgs = [...s.messages] - for (let mi = msgs.length - 1; mi >= 0; mi--) { - const m = msgs[mi] as any - if (m.role !== 'assistant' || !Array.isArray(m.contentBlocks)) continue - let changed = false - const blocks = m.contentBlocks.map((b: any) => { - if (b?.type === 'tool_call' && b.toolCall?.id && abortedIds.has(b.toolCall.id)) { - changed = true - const prev = b.toolCall - return { - ...b, - toolCall: { - ...prev, - state: ClientToolCallState.aborted, - display: resolveToolDisplay( - prev?.name, - ClientToolCallState.aborted, - prev?.id, - prev?.params - ), - }, - } - } - return b - }) - if (changed) { - msgs[mi] = { ...m, contentBlocks: blocks } - break - } - } - return { messages: msgs } - }) - } - } catch {} -} - -// Normalize loaded messages so assistant messages render correctly from DB -/** - * Loads messages from DB for UI rendering. - * Messages are stored exactly as they render, so we just need to: - * 1. Register client tool instances for any tool calls - * 2. Clear any streaming flags (messages loaded from DB are never actively streaming) - * 3. Return the messages - */ -function normalizeMessagesForUI(messages: CopilotMessage[]): CopilotMessage[] { - try { - // Log what we're loading - for (const message of messages) { - if (message.role === 'assistant') { - logger.info('[normalizeMessagesForUI] Loading assistant message', { - id: message.id, - hasContent: !!message.content?.trim(), - contentBlockCount: message.contentBlocks?.length || 0, - contentBlockTypes: (message.contentBlocks as any[])?.map((b) => b?.type) || [], - }) - } - } - - // Register client tool instances and clear streaming flags for all tool calls - for (const message of messages) { - if (message.contentBlocks) { - for (const block of message.contentBlocks as any[]) { - if (block?.type === 'tool_call' && block.toolCall) { - registerToolCallInstances(block.toolCall) - clearStreamingFlags(block.toolCall) - } - } - } - // Also clear from toolCalls array (legacy format) - if (message.toolCalls) { - for (const toolCall of message.toolCalls) { - clearStreamingFlags(toolCall) - } - } - } - return messages - } catch { - return messages - } -} - -/** - * Recursively clears streaming flags from a tool call and its nested subagent tool calls. - * This ensures messages loaded from DB don't appear to be streaming. - */ -function clearStreamingFlags(toolCall: any): void { - if (!toolCall) return - - // Always set subAgentStreaming to false - messages loaded from DB are never streaming - toolCall.subAgentStreaming = false - - // Clear nested subagent tool calls - if (Array.isArray(toolCall.subAgentBlocks)) { - for (const block of toolCall.subAgentBlocks) { - if (block?.type === 'subagent_tool_call' && block.toolCall) { - clearStreamingFlags(block.toolCall) - } - } - } - if (Array.isArray(toolCall.subAgentToolCalls)) { - for (const subTc of toolCall.subAgentToolCalls) { - clearStreamingFlags(subTc) - } - } -} - -/** - * Recursively registers client tool instances for a tool call and its nested subagent tool calls. - */ -function registerToolCallInstances(toolCall: any): void { - if (!toolCall?.id) return - ensureClientToolInstance(toolCall.name, toolCall.id) - - // Register nested subagent tool calls - if (Array.isArray(toolCall.subAgentBlocks)) { - for (const block of toolCall.subAgentBlocks) { - if (block?.type === 'subagent_tool_call' && block.toolCall) { - registerToolCallInstances(block.toolCall) - } - } - } - if (Array.isArray(toolCall.subAgentToolCalls)) { - for (const subTc of toolCall.subAgentToolCalls) { - registerToolCallInstances(subTc) - } - } -} - -// Simple object pool for content blocks -class ObjectPool { - private pool: T[] = [] - private createFn: () => T - private resetFn: (obj: T) => void - - constructor(createFn: () => T, resetFn: (obj: T) => void, initialSize = 5) { - this.createFn = createFn - this.resetFn = resetFn - for (let i = 0; i < initialSize; i++) this.pool.push(createFn()) - } - get(): T { - const obj = this.pool.pop() - if (obj) { - this.resetFn(obj) - return obj - } - return this.createFn() - } - release(obj: T): void { - if (this.pool.length < 20) this.pool.push(obj) - } -} - -const contentBlockPool = new ObjectPool( - () => ({ type: '', content: '', timestamp: 0, toolCall: null as any }), - (obj) => { - obj.type = '' - obj.content = '' - obj.timestamp = 0 - ;(obj as any).toolCall = null - ;(obj as any).startTime = undefined - ;(obj as any).duration = undefined - } -) - -// Efficient string builder -class StringBuilder { - private parts: string[] = [] - private length = 0 - append(str: string): void { - this.parts.push(str) - this.length += str.length - } - toString(): string { - const result = this.parts.join('') - this.clear() - return result - } - clear(): void { - this.parts.length = 0 - this.length = 0 - } - get size(): number { - return this.length - } -} - -// Helpers -function createUserMessage( - content: string, - fileAttachments?: MessageFileAttachment[], - contexts?: ChatContext[], +interface SendMessageOptionsInput { + stream?: boolean + fileAttachments?: MessageFileAttachment[] + contexts?: ChatContext[] messageId?: string -): CopilotMessage { - return { - id: messageId || crypto.randomUUID(), - role: 'user', - content, - timestamp: new Date().toISOString(), - ...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }), - ...(contexts && contexts.length > 0 && { contexts }), - ...(contexts && - contexts.length > 0 && { - contentBlocks: [ - { type: 'contexts', contexts: contexts as any, timestamp: Date.now() }, - ] as any, - }), - } + queueIfBusy?: boolean } -function createStreamingMessage(): CopilotMessage { - return { - id: crypto.randomUUID(), - role: 'assistant', - content: '', - timestamp: new Date().toISOString(), - } +interface PreparedSendContext { + workflowId: string + currentChat: CopilotChat | null + mode: CopilotStore['mode'] + message: string + stream: boolean + fileAttachments?: MessageFileAttachment[] + contexts?: ChatContext[] + userMessage: CopilotMessage + streamingMessage: CopilotMessage + nextAbortController: AbortController } -function createErrorMessage( - messageId: string, - content: string, - errorType?: 'usage_limit' | 'unauthorized' | 'forbidden' | 'rate_limit' | 'upgrade_required' -): CopilotMessage { - return { - id: messageId, - role: 'assistant', - content, - timestamp: new Date().toISOString(), - contentBlocks: [ - { - type: 'text', - content, - timestamp: Date.now(), - }, - ], - errorType, - } -} +type InitiateStreamResult = + | { kind: 'success'; result: Awaited> } + | { kind: 'error'; error: unknown } -/** - * Builds a workflow snapshot suitable for checkpoint persistence. - */ -function buildCheckpointWorkflowState(workflowId: string): WorkflowState | null { - const rawState = useWorkflowStore.getState().getWorkflowState() - if (!rawState) return null +function prepareSendContext( + get: CopilotGet, + set: CopilotSet, + message: string, + options: SendMessageOptionsInput +): PreparedSendContext | null { + const { + workflowId, + currentChat, + mode, + revertState, + isSendingMessage, + abortController: activeAbortController, + } = get() + const { stream = true, fileAttachments, contexts, messageId, queueIfBusy = true } = options - const blocksWithSubblockValues = mergeSubblockState(rawState.blocks, workflowId) + if (!workflowId) return null - const filteredBlocks = Object.entries(blocksWithSubblockValues).reduce( - (acc, [blockId, block]) => { - if (block?.type && block?.name) { - acc[blockId] = { - ...block, - id: block.id || blockId, - enabled: block.enabled !== undefined ? block.enabled : true, - horizontalHandles: block.horizontalHandles !== undefined ? block.horizontalHandles : true, - height: block.height !== undefined ? block.height : 90, - subBlocks: block.subBlocks || {}, - outputs: block.outputs || {}, - data: block.data || {}, - position: block.position || { x: 0, y: 0 }, - } - } - return acc - }, - {} as WorkflowState['blocks'] - ) - - return { - blocks: filteredBlocks, - edges: rawState.edges || [], - loops: rawState.loops || {}, - parallels: rawState.parallels || {}, - lastSaved: rawState.lastSaved || Date.now(), - deploymentStatuses: rawState.deploymentStatuses || {}, - } -} - -/** - * Persists a previously captured snapshot as a workflow checkpoint. - */ -async function saveMessageCheckpoint( - messageId: string, - get: () => CopilotStore, - set: (partial: Partial | ((state: CopilotStore) => Partial)) => void -): Promise { - const { workflowId, currentChat, messageSnapshots, messageCheckpoints } = get() - if (!workflowId || !currentChat?.id) return false - - const snapshot = messageSnapshots[messageId] - if (!snapshot) return false - - const nextSnapshots = { ...messageSnapshots } - delete nextSnapshots[messageId] - set({ messageSnapshots: nextSnapshots }) - - try { - const response = await fetch('/api/copilot/checkpoints', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - workflowId, - chatId: currentChat.id, - messageId, - workflowState: JSON.stringify(snapshot), - }), + if (isSendingMessage && !activeAbortController) { + logger.warn('[Copilot] sendMessage: stale sending state detected, clearing', { + originalMessageId: messageId, }) - - if (!response.ok) { - throw new Error(`Failed to create checkpoint: ${response.statusText}`) - } - - const result = await response.json() - const newCheckpoint = result.checkpoint - if (newCheckpoint) { - const existingCheckpoints = messageCheckpoints[messageId] || [] - const updatedCheckpoints = { - ...messageCheckpoints, - [messageId]: [newCheckpoint, ...existingCheckpoints], - } - set({ messageCheckpoints: updatedCheckpoints }) - } - - return true - } catch (error) { - logger.error('Failed to create checkpoint from snapshot:', error) - return false - } -} - -function stripTodoTags(text: string): string { - if (!text) return text - return text - .replace(/[\s\S]*?<\/marktodo>/g, '') - .replace(/[\s\S]*?<\/checkofftodo>/g, '') - .replace(/[\s\S]*?<\/design_workflow>/g, '') - .replace(/[ \t]+\n/g, '\n') - .replace(/\n{2,}/g, '\n') -} - -/** - * Deep clones an object using JSON serialization. - * This ensures we strip any non-serializable data (functions, circular refs). - */ -function deepClone(obj: T): T { - try { - const json = JSON.stringify(obj) - if (!json || json === 'undefined') { - logger.warn('[deepClone] JSON.stringify returned empty for object', { - type: typeof obj, - isArray: Array.isArray(obj), - length: Array.isArray(obj) ? obj.length : undefined, + set({ isSendingMessage: false }) + } else if (isSendingMessage && activeAbortController?.signal.aborted) { + logger.warn('[Copilot] sendMessage: aborted controller detected, clearing', { + originalMessageId: messageId, + }) + set({ isSendingMessage: false, abortController: null }) + } else if (isSendingMessage) { + if (queueIfBusy) { + get().addToQueue(message, { fileAttachments, contexts, messageId }) + logger.info('[Copilot] Message queued (already sending)', { + queueLength: get().messageQueue.length + 1, + originalMessageId: messageId, }) - return obj + return null } - const parsed = JSON.parse(json) - // Verify the clone worked - if (Array.isArray(obj) && (!Array.isArray(parsed) || parsed.length !== obj.length)) { - logger.warn('[deepClone] Array clone mismatch', { - originalLength: obj.length, - clonedLength: Array.isArray(parsed) ? parsed.length : 'not array', - }) - } - return parsed - } catch (err) { - logger.error('[deepClone] Failed to clone object', { - error: String(err), - type: typeof obj, - isArray: Array.isArray(obj), + get().abortMessage({ suppressContinueOption: true }) + } + + const nextAbortController = new AbortController() + set({ isSendingMessage: true, error: null, abortController: nextAbortController }) + + const userMessage = createUserMessage(message, fileAttachments, contexts, messageId) + const streamingMessage = createStreamingMessage() + const snapshot = workflowId ? buildCheckpointWorkflowState(workflowId) : null + if (snapshot) { + set((state) => ({ + messageSnapshots: { ...state.messageSnapshots, [userMessage.id]: snapshot }, + })) + } + + get() + .loadSensitiveCredentialIds() + .catch((err) => { + logger.warn('[Copilot] Failed to load sensitive credential IDs', err) }) - return obj - } -} - -/** - * Recursively masks credential IDs in any value (string, object, or array). - * Used during serialization to ensure sensitive IDs are never persisted. - */ -function maskCredentialIdsInValue(value: any, credentialIds: Set): any { - if (!value || credentialIds.size === 0) return value - - if (typeof value === 'string') { - let masked = value - // Sort by length descending to mask longer IDs first - const sortedIds = Array.from(credentialIds).sort((a, b) => b.length - a.length) - for (const id of sortedIds) { - if (id && masked.includes(id)) { - masked = masked.split(id).join('••••••••') - } - } - return masked - } - - if (Array.isArray(value)) { - return value.map((item) => maskCredentialIdsInValue(item, credentialIds)) - } - - if (typeof value === 'object') { - const masked: any = {} - for (const key of Object.keys(value)) { - masked[key] = maskCredentialIdsInValue(value[key], credentialIds) - } - return masked - } - - return value -} - -/** - * Serializes messages for database storage. - * Deep clones all fields to ensure proper JSON serialization. - * Masks sensitive credential IDs before persisting. - * This ensures they render identically when loaded back. - */ -function serializeMessagesForDB(messages: CopilotMessage[]): any[] { - // Get credential IDs to mask - const credentialIds = useCopilotStore.getState().sensitiveCredentialIds - - const result = messages - .map((msg) => { - // Deep clone the entire message to ensure all nested data is serializable - // Ensure timestamp is always a string (Zod schema requires it) - let timestamp: string = msg.timestamp - if (typeof timestamp !== 'string') { - const ts = timestamp as any - timestamp = ts instanceof Date ? ts.toISOString() : new Date().toISOString() - } - - const serialized: any = { - id: msg.id, - role: msg.role, - content: msg.content || '', - timestamp, - } - - // Deep clone contentBlocks (the main rendering data) - if (Array.isArray(msg.contentBlocks) && msg.contentBlocks.length > 0) { - serialized.contentBlocks = deepClone(msg.contentBlocks) - } - - // Deep clone toolCalls - if (Array.isArray((msg as any).toolCalls) && (msg as any).toolCalls.length > 0) { - serialized.toolCalls = deepClone((msg as any).toolCalls) - } - - // Deep clone file attachments - if (Array.isArray(msg.fileAttachments) && msg.fileAttachments.length > 0) { - serialized.fileAttachments = deepClone(msg.fileAttachments) - } - - // Deep clone contexts - if (Array.isArray((msg as any).contexts) && (msg as any).contexts.length > 0) { - serialized.contexts = deepClone((msg as any).contexts) - } - - // Deep clone citations - if (Array.isArray(msg.citations) && msg.citations.length > 0) { - serialized.citations = deepClone(msg.citations) - } - - // Copy error type - if (msg.errorType) { - serialized.errorType = msg.errorType - } - - // Mask credential IDs in the serialized message before persisting - return maskCredentialIdsInValue(serialized, credentialIds) - }) - .filter((msg) => { - // Filter out empty assistant messages - if (msg.role === 'assistant') { - const hasContent = typeof msg.content === 'string' && msg.content.trim().length > 0 - const hasTools = Array.isArray(msg.toolCalls) && msg.toolCalls.length > 0 - const hasBlocks = Array.isArray(msg.contentBlocks) && msg.contentBlocks.length > 0 - return hasContent || hasTools || hasBlocks - } - return true + get() + .loadAutoAllowedTools() + .catch((err) => { + logger.warn('[Copilot] Failed to load auto-allowed tools', err) }) - // Log what we're serializing - for (const msg of messages) { - if (msg.role === 'assistant') { - logger.info('[serializeMessagesForDB] Input assistant message', { - id: msg.id, - hasContent: !!msg.content?.trim(), - contentBlockCount: msg.contentBlocks?.length || 0, - contentBlockTypes: (msg.contentBlocks as any[])?.map((b) => b?.type) || [], - }) + let newMessages: CopilotMessage[] + if (revertState) { + const currentMessages = get().messages + newMessages = [...currentMessages, userMessage, streamingMessage] + set({ revertState: null, inputValue: '' }) + } else { + const currentMessages = get().messages + const existingIndex = messageId ? currentMessages.findIndex((m) => m.id === messageId) : -1 + if (existingIndex !== -1) { + newMessages = [...currentMessages.slice(0, existingIndex), userMessage, streamingMessage] + } else { + newMessages = [...currentMessages, userMessage, streamingMessage] } } - logger.info('[serializeMessagesForDB] Serialized messages', { - inputCount: messages.length, - outputCount: result.length, - sample: - result.length > 0 - ? { - role: result[result.length - 1].role, - hasContent: !!result[result.length - 1].content, - contentBlockCount: result[result.length - 1].contentBlocks?.length || 0, - toolCallCount: result[result.length - 1].toolCalls?.length || 0, - } - : null, + const isFirstMessage = get().messages.length === 0 && !currentChat?.title + set({ + messages: newMessages, + currentUserMessageId: userMessage.id, }) - return result -} + const activeStream: CopilotStreamInfo = { + streamId: userMessage.id, + workflowId, + chatId: currentChat?.id, + userMessageId: userMessage.id, + assistantMessageId: streamingMessage.id, + lastEventId: 0, + resumeAttempts: 0, + userMessageContent: message, + fileAttachments, + contexts, + startedAt: Date.now(), + } + logger.info('[Copilot] Creating new active stream', { + streamId: activeStream.streamId, + workflowId: activeStream.workflowId, + chatId: activeStream.chatId, + userMessageContent: message.slice(0, 50), + }) + set({ activeStream }) + writeActiveStreamToStorage(activeStream) -/** - * @deprecated Use serializeMessagesForDB instead. - */ -function validateMessagesForLLM(messages: CopilotMessage[]): any[] { - return serializeMessagesForDB(messages) -} - -/** - * Extracts all tool calls from a toolCall object, including nested subAgentBlocks. - * Adds them to the provided map. - */ -function extractToolCallsRecursively( - toolCall: CopilotToolCall, - map: Record -): void { - if (!toolCall?.id) return - map[toolCall.id] = toolCall - - // Extract nested tool calls from subAgentBlocks - if (Array.isArray(toolCall.subAgentBlocks)) { - for (const block of toolCall.subAgentBlocks) { - if (block?.type === 'subagent_tool_call' && block.toolCall?.id) { - extractToolCallsRecursively(block.toolCall, map) - } - } + if (isFirstMessage) { + const optimisticTitle = + message.length > OPTIMISTIC_TITLE_MAX_LENGTH + ? `${message.substring(0, OPTIMISTIC_TITLE_MAX_LENGTH - 3)}...` + : message + set((state) => ({ + currentChat: state.currentChat + ? { ...state.currentChat, title: optimisticTitle } + : state.currentChat, + chats: state.currentChat + ? state.chats.map((c) => + c.id === state.currentChat!.id ? { ...c, title: optimisticTitle } : c + ) + : state.chats, + })) } - // Extract from subAgentToolCalls as well - if (Array.isArray(toolCall.subAgentToolCalls)) { - for (const subTc of toolCall.subAgentToolCalls) { - extractToolCallsRecursively(subTc, map) - } + return { + workflowId, + currentChat, + mode, + message, + stream, + fileAttachments, + contexts, + userMessage, + streamingMessage, + nextAbortController, } } -/** - * Builds a complete toolCallsById map from normalized messages. - * Extracts all tool calls including nested subagent tool calls. - */ -function buildToolCallsById(messages: CopilotMessage[]): Record { - const toolCallsById: Record = {} - for (const msg of messages) { - if (msg.contentBlocks) { - for (const block of msg.contentBlocks as any[]) { - if (block?.type === 'tool_call' && block.toolCall?.id) { - extractToolCallsRecursively(block.toolCall, toolCallsById) - } - } - } - } - return toolCallsById -} - -// Streaming context and SSE parsing -interface StreamingContext { - messageId: string - accumulatedContent: StringBuilder - contentBlocks: any[] - currentTextBlock: any | null - isInThinkingBlock: boolean - currentThinkingBlock: any | null - isInDesignWorkflowBlock: boolean - designWorkflowContent: string - pendingContent: string - newChatId?: string - doneEventCount: number - streamComplete?: boolean - wasAborted?: boolean - suppressContinueOption?: boolean - /** Track active subagent sessions by parent tool call ID */ - subAgentParentToolCallId?: string - /** Track subagent content per parent tool call */ - subAgentContent: Record - /** Track subagent tool calls per parent tool call */ - subAgentToolCalls: Record - /** Track subagent streaming blocks per parent tool call */ - subAgentBlocks: Record -} - -type SSEHandler = ( - data: any, - context: StreamingContext, - get: () => CopilotStore, - set: any -) => Promise | void - -function appendTextBlock(context: StreamingContext, text: string) { - if (!text) return - context.accumulatedContent.append(text) - if (context.currentTextBlock && context.contentBlocks.length > 0) { - const lastBlock = context.contentBlocks[context.contentBlocks.length - 1] - if (lastBlock.type === TEXT_BLOCK_TYPE && lastBlock === context.currentTextBlock) { - lastBlock.content += text - return - } - } - context.currentTextBlock = contentBlockPool.get() - context.currentTextBlock.type = TEXT_BLOCK_TYPE - context.currentTextBlock.content = text - context.currentTextBlock.timestamp = Date.now() - context.contentBlocks.push(context.currentTextBlock) -} - -function appendContinueOption(content: string): string { - if (//i.test(content)) return content - const suffix = content.trim().length > 0 ? '\n\n' : '' - return `${content}${suffix}${CONTINUE_OPTIONS_TAG}` -} - -function appendContinueOptionBlock(blocks: any[]): any[] { - if (!Array.isArray(blocks)) return blocks - const hasOptions = blocks.some( - (block) => - block?.type === TEXT_BLOCK_TYPE && - typeof block.content === 'string' && - //i.test(block.content) - ) - if (hasOptions) return blocks - return [ - ...blocks, - { - type: TEXT_BLOCK_TYPE, - content: CONTINUE_OPTIONS_TAG, - timestamp: Date.now(), - }, - ] -} - -function beginThinkingBlock(context: StreamingContext) { - if (!context.currentThinkingBlock) { - context.currentThinkingBlock = contentBlockPool.get() - context.currentThinkingBlock.type = THINKING_BLOCK_TYPE - context.currentThinkingBlock.content = '' - context.currentThinkingBlock.timestamp = Date.now() - ;(context.currentThinkingBlock as any).startTime = Date.now() - context.contentBlocks.push(context.currentThinkingBlock) - } - context.isInThinkingBlock = true - context.currentTextBlock = null -} - -/** - * Removes thinking tags (raw or escaped) from streamed content. - */ -function stripThinkingTags(text: string): string { - return text.replace(/<\/?thinking[^>]*>/gi, '').replace(/<\/?thinking[^&]*>/gi, '') -} - -function appendThinkingContent(context: StreamingContext, text: string) { - if (!text) return - const cleanedText = stripThinkingTags(text) - if (!cleanedText) return - if (context.currentThinkingBlock) { - context.currentThinkingBlock.content += cleanedText - } else { - context.currentThinkingBlock = contentBlockPool.get() - context.currentThinkingBlock.type = THINKING_BLOCK_TYPE - context.currentThinkingBlock.content = cleanedText - context.currentThinkingBlock.timestamp = Date.now() - context.currentThinkingBlock.startTime = Date.now() - context.contentBlocks.push(context.currentThinkingBlock) - } - context.isInThinkingBlock = true - context.currentTextBlock = null -} - -function finalizeThinkingBlock(context: StreamingContext) { - if (context.currentThinkingBlock) { - context.currentThinkingBlock.duration = - Date.now() - (context.currentThinkingBlock.startTime || Date.now()) - } - context.isInThinkingBlock = false - context.currentThinkingBlock = null - context.currentTextBlock = null -} - -function upsertToolCallBlock(context: StreamingContext, toolCall: CopilotToolCall) { - let found = false - for (let i = 0; i < context.contentBlocks.length; i++) { - const b = context.contentBlocks[i] as any - if (b.type === 'tool_call' && b.toolCall?.id === toolCall.id) { - context.contentBlocks[i] = { ...b, toolCall } - found = true - break - } - } - if (!found) { - context.contentBlocks.push({ type: 'tool_call', toolCall, timestamp: Date.now() }) - } -} - -function appendSubAgentText(context: StreamingContext, parentToolCallId: string, text: string) { - if (!context.subAgentContent[parentToolCallId]) { - context.subAgentContent[parentToolCallId] = '' - } - if (!context.subAgentBlocks[parentToolCallId]) { - context.subAgentBlocks[parentToolCallId] = [] - } - context.subAgentContent[parentToolCallId] += text - const blocks = context.subAgentBlocks[parentToolCallId] - const lastBlock = blocks[blocks.length - 1] - if (lastBlock && lastBlock.type === 'subagent_text') { - lastBlock.content = (lastBlock.content || '') + text - } else { - blocks.push({ - type: 'subagent_text', - content: text, - timestamp: Date.now(), +async function initiateStream( + prepared: PreparedSendContext, + get: CopilotGet +): Promise { + try { + const { contexts, mode } = prepared + logger.debug('sendMessage: preparing request', { + hasContexts: Array.isArray(contexts), + contextsCount: Array.isArray(contexts) ? contexts.length : 0, + contextsPreview: Array.isArray(contexts) + ? contexts.map((c) => ({ + kind: c?.kind, + chatId: c?.kind === 'past_chat' ? c.chatId : undefined, + workflowId: + c?.kind === 'workflow' || + c?.kind === 'current_workflow' || + c?.kind === 'workflow_block' + ? c.workflowId + : undefined, + label: c?.label, + })) + : undefined, }) - } -} -const sseHandlers: Record = { - chat_id: async (data, context, get) => { - context.newChatId = data.chatId - const { currentChat } = get() - if (!currentChat && context.newChatId) { - await get().handleNewChatCreation(context.newChatId) - } - }, - title_updated: (_data, _context, get, set) => { - const title = _data.title - if (!title) return - const { currentChat, chats } = get() - if (currentChat) { - set({ - currentChat: { ...currentChat, title }, - chats: chats.map((c) => (c.id === currentChat.id ? { ...c, title } : c)), + const { streamingPlanContent } = get() + let messageToSend = prepared.message + if (streamingPlanContent?.trim()) { + messageToSend = `Design Document:\n\n${streamingPlanContent}\n\n==============\n\nUser Query:\n\n${prepared.message}` + logger.debug('[DesignDocument] Prepending plan content to message', { + planLength: streamingPlanContent.length, + originalMessageLength: prepared.message.length, + finalMessageLength: messageToSend.length, }) } - }, - tool_result: (data, context, get, set) => { - try { - const toolCallId: string | undefined = data?.toolCallId || data?.data?.id - const success: boolean | undefined = data?.success - const failedDependency: boolean = data?.failedDependency === true - const skipped: boolean = data?.result?.skipped === true - if (!toolCallId) return - const { toolCallsById } = get() - const current = toolCallsById[toolCallId] - if (current) { - if ( - isRejectedState(current.state) || - isReviewState(current.state) || - isBackgroundState(current.state) - ) { - // Preserve terminal review/rejected state; do not override - return - } - const targetState = success - ? ClientToolCallState.success - : failedDependency || skipped - ? ClientToolCallState.rejected - : ClientToolCallState.error - const updatedMap = { ...toolCallsById } - updatedMap[toolCallId] = { - ...current, - state: targetState, - display: resolveToolDisplay( - current.name, - targetState, - current.id, - (current as any).params - ), - } - set({ toolCallsById: updatedMap }) - // If checkoff_todo succeeded, mark todo as completed in planTodos - if (targetState === ClientToolCallState.success && current.name === 'checkoff_todo') { - try { - const result = data?.result || data?.data?.result || {} - const input = (current as any).params || (current as any).input || {} - const todoId = input.id || input.todoId || result.id || result.todoId - if (todoId) { - get().updatePlanTodoStatus(todoId, 'completed') - } - } catch {} - } + const apiMode: CopilotTransportMode = + mode === 'ask' ? 'ask' : mode === 'plan' ? 'plan' : 'agent' + const uiToApiCommandMap: Record = { actions: 'superagent' } + const commands = contexts + ?.filter((c) => c.kind === 'slash_command' && 'command' in c) + .map((c) => { + const uiCommand = c.command.toLowerCase() + return uiToApiCommandMap[uiCommand] || uiCommand + }) as string[] | undefined + const filteredContexts = contexts?.filter((c) => c.kind !== 'slash_command') - // If mark_todo_in_progress succeeded, set todo executing in planTodos - if ( - targetState === ClientToolCallState.success && - current.name === 'mark_todo_in_progress' - ) { - try { - const result = data?.result || data?.data?.result || {} - const input = (current as any).params || (current as any).input || {} - const todoId = input.id || input.todoId || result.id || result.todoId - if (todoId) { - get().updatePlanTodoStatus(todoId, 'executing') - } - } catch {} - } - } - - // Update inline content block state - for (let i = 0; i < context.contentBlocks.length; i++) { - const b = context.contentBlocks[i] as any - if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) { - if ( - isRejectedState(b.toolCall?.state) || - isReviewState(b.toolCall?.state) || - isBackgroundState(b.toolCall?.state) - ) - break - const targetState = success - ? ClientToolCallState.success - : failedDependency || skipped - ? ClientToolCallState.rejected - : ClientToolCallState.error - context.contentBlocks[i] = { - ...b, - toolCall: { - ...b.toolCall, - state: targetState, - display: resolveToolDisplay( - b.toolCall?.name, - targetState, - toolCallId, - b.toolCall?.params - ), - }, - } - break - } - } - updateStreamingMessage(set, context) - } catch {} - }, - tool_error: (data, context, get, set) => { - try { - const toolCallId: string | undefined = data?.toolCallId || data?.data?.id - const failedDependency: boolean = data?.failedDependency === true - if (!toolCallId) return - const { toolCallsById } = get() - const current = toolCallsById[toolCallId] - if (current) { - if ( - isRejectedState(current.state) || - isReviewState(current.state) || - isBackgroundState(current.state) - ) { - return - } - const targetState = failedDependency - ? ClientToolCallState.rejected - : ClientToolCallState.error - const updatedMap = { ...toolCallsById } - updatedMap[toolCallId] = { - ...current, - state: targetState, - display: resolveToolDisplay( - current.name, - targetState, - current.id, - (current as any).params - ), - } - set({ toolCallsById: updatedMap }) - } - for (let i = 0; i < context.contentBlocks.length; i++) { - const b = context.contentBlocks[i] as any - if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) { - if ( - isRejectedState(b.toolCall?.state) || - isReviewState(b.toolCall?.state) || - isBackgroundState(b.toolCall?.state) - ) - break - const targetState = failedDependency - ? ClientToolCallState.rejected - : ClientToolCallState.error - context.contentBlocks[i] = { - ...b, - toolCall: { - ...b.toolCall, - state: targetState, - display: resolveToolDisplay( - b.toolCall?.name, - targetState, - toolCallId, - b.toolCall?.params - ), - }, - } - break - } - } - updateStreamingMessage(set, context) - } catch {} - }, - tool_generating: (data, context, get, set) => { - const { toolCallId, toolName } = data - if (!toolCallId || !toolName) return - const { toolCallsById } = get() - - // Ensure class-based client tool instances are registered (for interrupts/display) - ensureClientToolInstance(toolName, toolCallId) - - if (!toolCallsById[toolCallId]) { - // Show as pending until we receive full tool_call (with arguments) to decide execution - const initialState = ClientToolCallState.pending - const tc: CopilotToolCall = { - id: toolCallId, - name: toolName, - state: initialState, - display: resolveToolDisplay(toolName, initialState, toolCallId), - } - const updated = { ...toolCallsById, [toolCallId]: tc } - set({ toolCallsById: updated }) - logger.info('[toolCallsById] map updated', updated) - - // Add/refresh inline content block - upsertToolCallBlock(context, tc) - updateStreamingMessage(set, context) - } - }, - tool_call: (data, context, get, set) => { - const toolData = data?.data || {} - const id: string | undefined = toolData.id || data?.toolCallId - const name: string | undefined = toolData.name || data?.toolName - if (!id) return - const args = toolData.arguments - const isPartial = toolData.partial === true - const { toolCallsById } = get() - - // Ensure class-based client tool instances are registered (for interrupts/display) - ensureClientToolInstance(name, id) - - const existing = toolCallsById[id] - const next: CopilotToolCall = existing - ? { - ...existing, - state: ClientToolCallState.pending, - ...(args ? { params: args } : {}), - display: resolveToolDisplay(name, ClientToolCallState.pending, id, args), - } - : { - id, - name: name || 'unknown_tool', - state: ClientToolCallState.pending, - ...(args ? { params: args } : {}), - display: resolveToolDisplay(name, ClientToolCallState.pending, id, args), - } - const updated = { ...toolCallsById, [id]: next } - set({ toolCallsById: updated }) - logger.info('[toolCallsById] → pending', { id, name, params: args }) - - // Ensure an inline content block exists/updated for this tool call - upsertToolCallBlock(context, next) - updateStreamingMessage(set, context) - - // Do not execute on partial tool_call frames - if (isPartial) { - return - } - - // Prefer interface-based registry to determine interrupt and execute - try { - const def = name ? getTool(name) : undefined - if (def) { - const hasInterrupt = - typeof def.hasInterrupt === 'function' - ? !!def.hasInterrupt(args || {}) - : !!def.hasInterrupt - // Check if tool is auto-allowed - if so, execute even if it has an interrupt - const { autoAllowedTools } = get() - const isAutoAllowed = name ? autoAllowedTools.includes(name) : false - if ((!hasInterrupt || isAutoAllowed) && typeof def.execute === 'function') { - if (isAutoAllowed && hasInterrupt) { - logger.info('[toolCallsById] Auto-executing tool with interrupt (auto-allowed)', { - id, - name, - }) - } - const ctx = createExecutionContext({ toolCallId: id, toolName: name || 'unknown_tool' }) - // Defer executing transition by a tick to let pending render - setTimeout(() => { - // Guard against duplicate execution - check if already executing or terminal - const currentState = get().toolCallsById[id]?.state - if (currentState === ClientToolCallState.executing || isTerminalState(currentState)) { - return - } - - const executingMap = { ...get().toolCallsById } - executingMap[id] = { - ...executingMap[id], - state: ClientToolCallState.executing, - display: resolveToolDisplay(name, ClientToolCallState.executing, id, args), - } - set({ toolCallsById: executingMap }) - logger.info('[toolCallsById] pending → executing (registry)', { id, name }) - - // Update inline content block to executing - for (let i = 0; i < context.contentBlocks.length; i++) { - const b = context.contentBlocks[i] as any - if (b.type === 'tool_call' && b.toolCall?.id === id) { - context.contentBlocks[i] = { - ...b, - toolCall: { ...b.toolCall, state: ClientToolCallState.executing }, - } - break - } - } - updateStreamingMessage(set, context) - - Promise.resolve() - .then(async () => { - const result = await def.execute(ctx, args || {}) - const success = - result && typeof result.status === 'number' - ? result.status >= 200 && result.status < 300 - : true - const completeMap = { ...get().toolCallsById } - // Do not override terminal review/rejected - if ( - isRejectedState(completeMap[id]?.state) || - isReviewState(completeMap[id]?.state) || - isBackgroundState(completeMap[id]?.state) - ) { - return - } - completeMap[id] = { - ...completeMap[id], - state: success ? ClientToolCallState.success : ClientToolCallState.error, - display: resolveToolDisplay( - name, - success ? ClientToolCallState.success : ClientToolCallState.error, - id, - args - ), - } - set({ toolCallsById: completeMap }) - logger.info( - `[toolCallsById] executing → ${success ? 'success' : 'error'} (registry)`, - { id, name } - ) - - // Notify backend tool mark-complete endpoint - try { - await fetch('/api/copilot/tools/mark-complete', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - id, - name: name || 'unknown_tool', - status: - typeof result?.status === 'number' ? result.status : success ? 200 : 500, - message: result?.message, - data: result?.data, - }), - }) - } catch {} - }) - .catch((e) => { - const errorMap = { ...get().toolCallsById } - // Do not override terminal review/rejected - if ( - isRejectedState(errorMap[id]?.state) || - isReviewState(errorMap[id]?.state) || - isBackgroundState(errorMap[id]?.state) - ) { - return - } - errorMap[id] = { - ...errorMap[id], - state: ClientToolCallState.error, - display: resolveToolDisplay(name, ClientToolCallState.error, id, args), - } - set({ toolCallsById: errorMap }) - logger.error('Registry auto-execute tool failed', { id, name, error: e }) - }) - }, 0) - return - } - } - } catch (e) { - logger.warn('tool_call registry auto-exec check failed', { id, name, error: e }) - } - - // Class-based auto-exec for non-interrupt tools or auto-allowed tools - try { - const inst = getClientTool(id) as any - const hasInterrupt = !!inst?.getInterruptDisplays?.() - // Check if tool is auto-allowed - if so, execute even if it has an interrupt - const { autoAllowedTools: classAutoAllowed } = get() - const isClassAutoAllowed = name ? classAutoAllowed.includes(name) : false - if ( - (!hasInterrupt || isClassAutoAllowed) && - (typeof inst?.execute === 'function' || typeof inst?.handleAccept === 'function') - ) { - if (isClassAutoAllowed && hasInterrupt) { - logger.info('[toolCallsById] Auto-executing class tool with interrupt (auto-allowed)', { - id, - name, - }) - } - setTimeout(() => { - // Guard against duplicate execution - check if already executing or terminal - const currentState = get().toolCallsById[id]?.state - if (currentState === ClientToolCallState.executing || isTerminalState(currentState)) { - return - } - - const executingMap = { ...get().toolCallsById } - executingMap[id] = { - ...executingMap[id], - state: ClientToolCallState.executing, - display: resolveToolDisplay(name, ClientToolCallState.executing, id, args), - } - set({ toolCallsById: executingMap }) - logger.info('[toolCallsById] pending → executing (class)', { id, name }) - - Promise.resolve() - .then(async () => { - // Use handleAccept for tools with interrupts, execute for others - if (hasInterrupt && typeof inst?.handleAccept === 'function') { - await inst.handleAccept(args || {}) - } else { - await inst.execute(args || {}) - } - // Success/error will be synced via registerToolStateSync - }) - .catch(() => { - const errorMap = { ...get().toolCallsById } - // Do not override terminal review/rejected - if ( - isRejectedState(errorMap[id]?.state) || - isReviewState(errorMap[id]?.state) || - isBackgroundState(errorMap[id]?.state) - ) { - return - } - errorMap[id] = { - ...errorMap[id], - state: ClientToolCallState.error, - display: resolveToolDisplay(name, ClientToolCallState.error, id, args), - } - set({ toolCallsById: errorMap }) - }) - }, 0) - return - } - } catch {} - - // Integration tools: Check auto-allowed or stay in pending state until user confirms - // This handles tools like google_calendar_*, exa_*, gmail_read, etc. that aren't in the client registry - // Only relevant if mode is 'build' (agent) - const { mode, workflowId, autoAllowedTools, executeIntegrationTool } = get() - if (mode === 'build' && workflowId) { - // Check if tool was NOT found in client registry - const def = name ? getTool(name) : undefined - const inst = getClientTool(id) as any - if (!def && !inst && name) { - // Check if this integration tool is auto-allowed - if so, execute it immediately - if (autoAllowedTools.includes(name)) { - logger.info('[build mode] Auto-executing integration tool (auto-allowed)', { id, name }) - // Defer to allow pending state to render briefly - setTimeout(() => { - executeIntegrationTool(id).catch((err) => { - logger.error('[build mode] Auto-execute integration tool failed', { - id, - name, - error: err, - }) - }) - }, 0) - } else { - // Integration tools stay in pending state until user confirms - logger.info('[build mode] Integration tool awaiting user confirmation', { - id, - name, - }) - } - } - } - }, - reasoning: (data, context, _get, set) => { - const phase = (data && (data.phase || data?.data?.phase)) as string | undefined - if (phase === 'start') { - beginThinkingBlock(context) - updateStreamingMessage(set, context) - return - } - if (phase === 'end') { - finalizeThinkingBlock(context) - updateStreamingMessage(set, context) - return - } - const chunk: string = typeof data?.data === 'string' ? data.data : data?.content || '' - if (!chunk) return - appendThinkingContent(context, chunk) - updateStreamingMessage(set, context) - }, - content: (data, context, get, set) => { - if (!data.data) return - context.pendingContent += data.data - - let contentToProcess = context.pendingContent - let hasProcessedContent = false - - const thinkingStartRegex = // - const thinkingEndRegex = /<\/thinking>/ - const designWorkflowStartRegex = // - const designWorkflowEndRegex = /<\/design_workflow>/ - - const splitTrailingPartialTag = ( - text: string, - tags: string[] - ): { text: string; remaining: string } => { - const partialIndex = text.lastIndexOf('<') - if (partialIndex < 0) { - return { text, remaining: '' } - } - const possibleTag = text.substring(partialIndex) - const matchesTagStart = tags.some((tag) => tag.startsWith(possibleTag)) - if (!matchesTagStart) { - return { text, remaining: '' } - } - return { - text: text.substring(0, partialIndex), - remaining: possibleTag, - } - } - - while (contentToProcess.length > 0) { - // Handle design_workflow tags (takes priority over other content processing) - if (context.isInDesignWorkflowBlock) { - const endMatch = designWorkflowEndRegex.exec(contentToProcess) - if (endMatch) { - const designContent = contentToProcess.substring(0, endMatch.index) - context.designWorkflowContent += designContent - context.isInDesignWorkflowBlock = false - - // Update store with complete design workflow content (available in all modes) - logger.info('[design_workflow] Tag complete, setting plan content', { - contentLength: context.designWorkflowContent.length, - }) - set({ streamingPlanContent: context.designWorkflowContent }) - - contentToProcess = contentToProcess.substring(endMatch.index + endMatch[0].length) - hasProcessedContent = true - } else { - // Still in design_workflow block, accumulate content - const { text, remaining } = splitTrailingPartialTag(contentToProcess, [ - '', - ]) - context.designWorkflowContent += text - - // Update store with partial content for streaming effect (available in all modes) - set({ streamingPlanContent: context.designWorkflowContent }) - - contentToProcess = remaining - hasProcessedContent = true - if (remaining) { - break - } - } - continue - } - - if (!context.isInThinkingBlock && !context.isInDesignWorkflowBlock) { - // Check for design_workflow start tag first - const designStartMatch = designWorkflowStartRegex.exec(contentToProcess) - if (designStartMatch) { - const textBeforeDesign = contentToProcess.substring(0, designStartMatch.index) - if (textBeforeDesign) { - appendTextBlock(context, textBeforeDesign) - hasProcessedContent = true - } - context.isInDesignWorkflowBlock = true - context.designWorkflowContent = '' - contentToProcess = contentToProcess.substring( - designStartMatch.index + designStartMatch[0].length - ) - hasProcessedContent = true - continue - } - - const nextMarkIndex = contentToProcess.indexOf('') - const nextCheckIndex = contentToProcess.indexOf('') - const hasMark = nextMarkIndex >= 0 - const hasCheck = nextCheckIndex >= 0 - - const nextTagIndex = - hasMark && hasCheck - ? Math.min(nextMarkIndex, nextCheckIndex) - : hasMark - ? nextMarkIndex - : hasCheck - ? nextCheckIndex - : -1 - - if (nextTagIndex >= 0) { - const isMarkTodo = hasMark && nextMarkIndex === nextTagIndex - const tagStart = isMarkTodo ? '' : '' - const tagEnd = isMarkTodo ? '' : '' - const closingIndex = contentToProcess.indexOf(tagEnd, nextTagIndex + tagStart.length) - - if (closingIndex === -1) { - // Partial tag; wait for additional content - break - } - - const todoId = contentToProcess - .substring(nextTagIndex + tagStart.length, closingIndex) - .trim() - logger.info( - isMarkTodo ? '[TODO] Detected marktodo tag' : '[TODO] Detected checkofftodo tag', - { todoId } - ) - - if (todoId) { - try { - get().updatePlanTodoStatus(todoId, isMarkTodo ? 'executing' : 'completed') - logger.info( - isMarkTodo - ? '[TODO] Successfully marked todo in progress' - : '[TODO] Successfully checked off todo', - { todoId } - ) - } catch (e) { - logger.error( - isMarkTodo - ? '[TODO] Failed to mark todo in progress' - : '[TODO] Failed to checkoff todo', - { todoId, error: e } - ) - } - } else { - logger.warn('[TODO] Empty todoId extracted from todo tag', { tagType: tagStart }) - } - - // Remove the tag AND newlines around it, but preserve ONE newline if both sides had them - let beforeTag = contentToProcess.substring(0, nextTagIndex) - let afterTag = contentToProcess.substring(closingIndex + tagEnd.length) - - const hadNewlineBefore = /(\r?\n)+$/.test(beforeTag) - const hadNewlineAfter = /^(\r?\n)+/.test(afterTag) - - // Strip trailing newlines before the tag - beforeTag = beforeTag.replace(/(\r?\n)+$/, '') - // Strip leading newlines after the tag - afterTag = afterTag.replace(/^(\r?\n)+/, '') - - // If there were newlines on both sides, add back ONE to preserve paragraph breaks - contentToProcess = - beforeTag + (hadNewlineBefore && hadNewlineAfter ? '\n' : '') + afterTag - context.currentTextBlock = null - hasProcessedContent = true - continue - } - } - - if (context.isInThinkingBlock) { - const endMatch = thinkingEndRegex.exec(contentToProcess) - if (endMatch) { - const thinkingContent = contentToProcess.substring(0, endMatch.index) - appendThinkingContent(context, thinkingContent) - finalizeThinkingBlock(context) - contentToProcess = contentToProcess.substring(endMatch.index + endMatch[0].length) - hasProcessedContent = true - } else { - const { text, remaining } = splitTrailingPartialTag(contentToProcess, ['']) - if (text) { - appendThinkingContent(context, text) - hasProcessedContent = true - } - contentToProcess = remaining - if (remaining) { - break - } - } - } else { - const startMatch = thinkingStartRegex.exec(contentToProcess) - if (startMatch) { - const textBeforeThinking = contentToProcess.substring(0, startMatch.index) - if (textBeforeThinking) { - appendTextBlock(context, textBeforeThinking) - hasProcessedContent = true - } - context.isInThinkingBlock = true - context.currentTextBlock = null - contentToProcess = contentToProcess.substring(startMatch.index + startMatch[0].length) - hasProcessedContent = true - } else { - // Check if content might contain partial todo tags and hold them back - let partialTagIndex = contentToProcess.lastIndexOf('<') - - // Also check for partial marktodo or checkofftodo tags - const partialMarkTodo = contentToProcess.lastIndexOf(' partialTagIndex) { - partialTagIndex = partialMarkTodo - } - if (partialCheckoffTodo > partialTagIndex) { - partialTagIndex = partialCheckoffTodo - } - - let textToAdd = contentToProcess - let remaining = '' - if (partialTagIndex >= 0 && partialTagIndex > contentToProcess.length - 50) { - textToAdd = contentToProcess.substring(0, partialTagIndex) - remaining = contentToProcess.substring(partialTagIndex) - } - if (textToAdd) { - appendTextBlock(context, textToAdd) - hasProcessedContent = true - } - contentToProcess = remaining - break - } - } - } - - context.pendingContent = contentToProcess - if (hasProcessedContent) { - updateStreamingMessage(set, context) - } - }, - done: (_data, context) => { - logger.info('[SSE] DONE EVENT RECEIVED', { - doneEventCount: context.doneEventCount, - data: _data, + const result = await sendStreamingMessage({ + message: messageToSend, + userMessageId: prepared.userMessage.id, + chatId: prepared.currentChat?.id, + workflowId: prepared.workflowId || undefined, + mode: apiMode, + model: get().selectedModel, + prefetch: get().agentPrefetch, + createNewChat: !prepared.currentChat, + stream: prepared.stream, + fileAttachments: prepared.fileAttachments, + contexts: filteredContexts, + commands: commands?.length ? commands : undefined, + abortSignal: prepared.nextAbortController.signal, }) - context.doneEventCount++ - if (context.doneEventCount >= 1) { - logger.info('[SSE] Setting streamComplete = true, stream will terminate') - context.streamComplete = true - } - }, - error: (data, context, _get, set) => { - logger.error('Stream error:', data.error) - set((state: CopilotStore) => ({ - messages: state.messages.map((msg) => - msg.id === context.messageId - ? { - ...msg, - content: context.accumulatedContent || 'An error occurred.', - error: data.error, - } - : msg - ), - })) - context.streamComplete = true - }, - stream_end: (_data, context, _get, set) => { - if (context.pendingContent) { - if (context.isInThinkingBlock && context.currentThinkingBlock) { - appendThinkingContent(context, context.pendingContent) - } else if (context.pendingContent.trim()) { - appendTextBlock(context, context.pendingContent) - } - context.pendingContent = '' - } - finalizeThinkingBlock(context) - updateStreamingMessage(set, context) - }, - default: () => {}, + + return { kind: 'success', result } + } catch (error) { + return { kind: 'error', error } + } } -/** - * Helper to update a tool call with subagent data in both toolCallsById and contentBlocks - */ -function updateToolCallWithSubAgentData( - context: StreamingContext, - get: () => CopilotStore, - set: any, - parentToolCallId: string -) { - const { toolCallsById } = get() - const parentToolCall = toolCallsById[parentToolCallId] - if (!parentToolCall) { - logger.warn('[SubAgent] updateToolCallWithSubAgentData: parent tool call not found', { - parentToolCallId, - availableToolCallIds: Object.keys(toolCallsById), - }) +async function processStreamEvents( + initiated: InitiateStreamResult, + prepared: PreparedSendContext, + get: CopilotGet +): Promise { + if (initiated.kind !== 'success') return false + if (!initiated.result.success || !initiated.result.stream) return false + await get().handleStreamingResponse( + initiated.result.stream, + prepared.streamingMessage.id, + false, + prepared.userMessage.id, + prepared.nextAbortController.signal + ) + return true +} + +async function finalizeStream( + initiated: InitiateStreamResult, + processed: boolean, + prepared: PreparedSendContext, + set: CopilotSet +): Promise { + if (processed) { + set({ chatsLastLoadedAt: null, chatsLoadedForWorkflow: null }) return } - // Prepare subagent blocks array for ordered display - const blocks = context.subAgentBlocks[parentToolCallId] || [] + if (initiated.kind === 'success') { + const { result } = initiated + if (result.error === 'Request was aborted') { + return + } - const updatedToolCall: CopilotToolCall = { - ...parentToolCall, - subAgentContent: context.subAgentContent[parentToolCallId] || '', - subAgentToolCalls: context.subAgentToolCalls[parentToolCallId] || [], - subAgentBlocks: blocks, - subAgentStreaming: true, + let errorContent = result.error || 'Failed to send message' + let errorType: + | 'usage_limit' + | 'unauthorized' + | 'forbidden' + | 'rate_limit' + | 'upgrade_required' + | undefined + if (result.status === 401) { + errorContent = + '_Unauthorized request. You need a valid API key to use the copilot. You can get one by going to [sim.ai](https://sim.ai) settings and generating one there._' + errorType = 'unauthorized' + } else if (result.status === 402) { + errorContent = + '_Usage limit exceeded. To continue using this service, upgrade your plan or increase your usage limit to:_' + errorType = 'usage_limit' + } else if (result.status === 403) { + errorContent = + '_Provider config not allowed for non-enterprise users. Please remove the provider config and try again_' + errorType = 'forbidden' + } else if (result.status === 426) { + errorContent = + '_Please upgrade to the latest version of the Sim platform to continue using the copilot._' + errorType = 'upgrade_required' + } else if (result.status === 429) { + errorContent = '_Provider rate limit exceeded. Please try again later._' + errorType = 'rate_limit' + } + + const errorMessage = createErrorMessage(prepared.streamingMessage.id, errorContent, errorType) + set((state) => ({ + messages: state.messages.map((m) => + m.id === prepared.streamingMessage.id ? errorMessage : m + ), + error: errorContent, + isSendingMessage: false, + abortController: null, + })) + set({ activeStream: null }) + writeActiveStreamToStorage(null) + return } - logger.info('[SubAgent] Updating tool call with subagent data', { - parentToolCallId, - parentToolName: parentToolCall.name, - subAgentContentLength: updatedToolCall.subAgentContent?.length, - subAgentBlocksCount: updatedToolCall.subAgentBlocks?.length, - subAgentToolCallsCount: updatedToolCall.subAgentToolCalls?.length, + const error = initiated.error + if (error instanceof Error && error.name === 'AbortError') return + const errorMessage = createErrorMessage( + prepared.streamingMessage.id, + 'Sorry, I encountered an error while processing your message. Please try again.' + ) + set((state) => ({ + messages: state.messages.map((m) => (m.id === prepared.streamingMessage.id ? errorMessage : m)), + error: error instanceof Error ? error.message : 'Failed to send message', + isSendingMessage: false, + abortController: null, + })) + set({ activeStream: null }) + writeActiveStreamToStorage(null) +} + +interface ResumeValidationResult { + nextStream: CopilotStreamInfo + messages: CopilotMessage[] + isFreshResume: boolean +} + +async function validateResumeState( + get: CopilotGet, + set: CopilotSet +): Promise { + const inMemoryStream = get().activeStream + const storedStream = readActiveStreamFromStorage() + const stored = inMemoryStream || storedStream + logger.debug('[Copilot] Resume check', { + hasInMemory: !!inMemoryStream, + hasStored: !!storedStream, + usingStream: inMemoryStream ? 'memory' : storedStream ? 'storage' : 'none', + streamId: stored?.streamId, + lastEventId: stored?.lastEventId, + storedWorkflowId: stored?.workflowId, + storedChatId: stored?.chatId, + userMessageContent: stored?.userMessageContent?.slice(0, 50), + currentWorkflowId: get().workflowId, + isSendingMessage: get().isSendingMessage, + resumeAttempts: stored?.resumeAttempts, }) - // Update in toolCallsById - const updatedMap = { ...toolCallsById, [parentToolCallId]: updatedToolCall } - set({ toolCallsById: updatedMap }) + if (!stored || !stored.streamId) return null + if (get().isSendingMessage) return null + if (get().workflowId && stored.workflowId !== get().workflowId) return null - // Update in contentBlocks - let foundInContentBlocks = false - for (let i = 0; i < context.contentBlocks.length; i++) { - const b = context.contentBlocks[i] as any - if (b.type === 'tool_call' && b.toolCall?.id === parentToolCallId) { - context.contentBlocks[i] = { ...b, toolCall: updatedToolCall } - foundInContentBlocks = true - break + if (stored.resumeAttempts >= MAX_RESUME_ATTEMPTS) { + logger.warn('[Copilot] Too many resume attempts, giving up') + return null + } + + const nextStream: CopilotStreamInfo = { + ...stored, + resumeAttempts: (stored.resumeAttempts || 0) + 1, + } + set({ activeStream: nextStream }) + writeActiveStreamToStorage(nextStream) + + let messages = get().messages + const isFreshResume = messages.length === 0 + if (isFreshResume && nextStream.chatId) { + try { + logger.debug('[Copilot] Loading chat for resume', { chatId: nextStream.chatId }) + const response = await fetch(`${COPILOT_CHAT_API_PATH}?chatId=${nextStream.chatId}`) + if (response.ok) { + const data = await response.json() + if (data.success && data.chat) { + const normalizedMessages = normalizeMessagesForUI(data.chat.messages ?? []) + const toolCallsById = buildToolCallsById(normalizedMessages) + set({ + currentChat: data.chat, + messages: normalizedMessages, + toolCallsById, + streamingPlanContent: data.chat.planArtifact || '', + }) + messages = normalizedMessages + logger.debug('[Copilot] Loaded chat for resume', { + chatId: nextStream.chatId, + messageCount: normalizedMessages.length, + }) + } + } + } catch (e) { + logger.warn('[Copilot] Failed to load chat for resume', { error: String(e) }) } } - if (!foundInContentBlocks) { - logger.warn('[SubAgent] Parent tool call not found in contentBlocks', { - parentToolCallId, - contentBlocksCount: context.contentBlocks.length, - toolCallBlockIds: context.contentBlocks - .filter((b: any) => b.type === 'tool_call') - .map((b: any) => b.toolCall?.id), - }) - } - - updateStreamingMessage(set, context) + return { nextStream, messages, isFreshResume } } -/** - * SSE handlers for subagent events (events with subagent field set) - * These handle content and tool calls from subagents like debug - */ -const subAgentSSEHandlers: Record = { - // Handle subagent response start (ignore - just a marker) - start: () => { - // Subagent start event - no action needed, parent is already tracked from subagent_start - }, +interface ReplayBufferedEventsResult { + nextStream: CopilotStreamInfo + bufferedContent: string + replayBlocks: ClientContentBlock[] | null + resumeFromEventId: number +} - // Handle subagent text content (reasoning/thinking) - content: (data, context, get, set) => { - const parentToolCallId = context.subAgentParentToolCallId - logger.info('[SubAgent] content event', { - parentToolCallId, - hasData: !!data.data, - dataPreview: typeof data.data === 'string' ? data.data.substring(0, 50) : null, - }) - if (!parentToolCallId || !data.data) { - logger.warn('[SubAgent] content missing parentToolCallId or data', { - parentToolCallId, - hasData: !!data.data, - }) - return - } - - appendSubAgentText(context, parentToolCallId, data.data) - - updateToolCallWithSubAgentData(context, get, set, parentToolCallId) - }, - - // Handle subagent reasoning (same as content for subagent display purposes) - reasoning: (data, context, get, set) => { - const parentToolCallId = context.subAgentParentToolCallId - const phase = data?.phase || data?.data?.phase - if (!parentToolCallId) return - - // For reasoning, we just append the content (treating start/end as markers) - if (phase === 'start' || phase === 'end') return - - const chunk = typeof data?.data === 'string' ? data.data : data?.content || '' - if (!chunk) return - - appendSubAgentText(context, parentToolCallId, chunk) - - updateToolCallWithSubAgentData(context, get, set, parentToolCallId) - }, - - // Handle subagent tool_generating (tool is being generated) - tool_generating: () => { - // Tool generating event - no action needed, we'll handle the actual tool_call - }, - - // Handle subagent tool calls - also execute client tools - tool_call: async (data, context, get, set) => { - const parentToolCallId = context.subAgentParentToolCallId - if (!parentToolCallId) return - - const toolData = data?.data || {} - const id: string | undefined = toolData.id || data?.toolCallId - const name: string | undefined = toolData.name || data?.toolName - if (!id || !name) return - const isPartial = toolData.partial === true - - // Arguments can come in different locations depending on SSE format - // Check multiple possible locations - let args = toolData.arguments || toolData.input || data?.arguments || data?.input - - // If arguments is a string, try to parse it as JSON - if (typeof args === 'string') { - try { - args = JSON.parse(args) - } catch { - logger.warn('[SubAgent] Failed to parse arguments string', { args }) - } - } - - logger.info('[SubAgent] tool_call received', { - id, - name, - hasArgs: !!args, - argsKeys: args ? Object.keys(args) : [], - toolDataKeys: Object.keys(toolData), - dataKeys: Object.keys(data || {}), - }) - - // Initialize if needed - if (!context.subAgentToolCalls[parentToolCallId]) { - context.subAgentToolCalls[parentToolCallId] = [] - } - if (!context.subAgentBlocks[parentToolCallId]) { - context.subAgentBlocks[parentToolCallId] = [] - } - - // Ensure client tool instance is registered (for execution) - ensureClientToolInstance(name, id) - - // Create or update the subagent tool call - const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex( - (tc) => tc.id === id - ) - const subAgentToolCall: CopilotToolCall = { - id, - name, - state: ClientToolCallState.pending, - ...(args ? { params: args } : {}), - display: resolveToolDisplay(name, ClientToolCallState.pending, id, args), - } - - if (existingIndex >= 0) { - context.subAgentToolCalls[parentToolCallId][existingIndex] = subAgentToolCall - } else { - context.subAgentToolCalls[parentToolCallId].push(subAgentToolCall) - - // Also add to ordered blocks - context.subAgentBlocks[parentToolCallId].push({ - type: 'subagent_tool_call', - toolCall: subAgentToolCall, - timestamp: Date.now(), - }) - } - - // Also add to main toolCallsById for proper tool execution - const { toolCallsById } = get() - const updated = { ...toolCallsById, [id]: subAgentToolCall } - set({ toolCallsById: updated }) - - updateToolCallWithSubAgentData(context, get, set, parentToolCallId) - - if (isPartial) { - return - } - - // Execute client tools in parallel (non-blocking) - same pattern as main tool_call handler - // Check if tool is auto-allowed - const { autoAllowedTools: subAgentAutoAllowed } = get() - const isSubAgentAutoAllowed = name ? subAgentAutoAllowed.includes(name) : false +async function replayBufferedEvents( + stream: CopilotStreamInfo, + get: CopilotGet, + set: CopilotSet +): Promise { + let nextStream = stream + let bufferedContent = '' + let replayBlocks: ClientContentBlock[] | null = null + let resumeFromEventId = nextStream.lastEventId + if (nextStream.lastEventId > 0) { try { - const def = getTool(name) - if (def) { - const hasInterrupt = - typeof def.hasInterrupt === 'function' - ? !!def.hasInterrupt(args || {}) - : !!def.hasInterrupt - // Auto-execute if no interrupt OR if auto-allowed - if (!hasInterrupt || isSubAgentAutoAllowed) { - if (isSubAgentAutoAllowed && hasInterrupt) { - logger.info('[SubAgent] Auto-executing tool with interrupt (auto-allowed)', { - id, - name, - }) + logger.debug('[Copilot] Fetching all buffered events', { + streamId: nextStream.streamId, + savedLastEventId: nextStream.lastEventId, + }) + const batchUrl = `${COPILOT_CHAT_STREAM_API_PATH}?streamId=${encodeURIComponent( + nextStream.streamId + )}&from=0&to=${encodeURIComponent(String(nextStream.lastEventId))}&batch=true` + const batchResponse = await fetch(batchUrl, { credentials: 'include' }) + if (batchResponse.ok) { + const batchData = await batchResponse.json() + if (batchData.success && Array.isArray(batchData.events)) { + const replayContext = createClientStreamingContext(nextStream.assistantMessageId) + replayContext.suppressStreamingUpdates = true + for (const entry of batchData.events) { + const event = entry.event + if (event) { + await applySseEvent(event, replayContext, get, set) + } + if (typeof entry.eventId === 'number' && entry.eventId > resumeFromEventId) { + resumeFromEventId = entry.eventId + } } - // Auto-execute tools - non-blocking - const ctx = createExecutionContext({ toolCallId: id, toolName: name }) - Promise.resolve() - .then(() => def.execute(ctx, args || {})) - .catch((execErr: any) => { - logger.error('[SubAgent] Tool execution failed', { - id, - name, - error: execErr?.message, - }) - }) + bufferedContent = replayContext.accumulatedContent + replayBlocks = replayContext.contentBlocks + logger.debug('[Copilot] Loaded buffered content instantly', { + eventCount: batchData.events.length, + contentLength: bufferedContent.length, + resumeFromEventId, + }) + } else { + logger.warn('[Copilot] Batch response missing events', { + success: batchData.success, + hasEvents: Array.isArray(batchData.events), + }) } } else { - // Fallback to class-based tools - non-blocking - const instance = getClientTool(id) - if (instance) { - const hasInterruptDisplays = !!instance.getInterruptDisplays?.() - // Auto-execute if no interrupt OR if auto-allowed - if (!hasInterruptDisplays || isSubAgentAutoAllowed) { - if (isSubAgentAutoAllowed && hasInterruptDisplays) { - logger.info('[SubAgent] Auto-executing class tool with interrupt (auto-allowed)', { - id, - name, - }) - } - Promise.resolve() - .then(() => { - // Use handleAccept for tools with interrupts, execute for others - if (hasInterruptDisplays && typeof instance.handleAccept === 'function') { - return instance.handleAccept(args || {}) - } - return instance.execute(args || {}) - }) - .catch((execErr: any) => { - logger.error('[SubAgent] Class tool execution failed', { - id, - name, - error: execErr?.message, - }) - }) - } - } else { - // Check if this is an integration tool (server-side) that should be auto-executed - const isIntegrationTool = !CLASS_TOOL_METADATA[name] - if (isIntegrationTool && isSubAgentAutoAllowed) { - logger.info('[SubAgent] Auto-executing integration tool (auto-allowed)', { - id, - name, - }) - // Execute integration tool via the store method - const { executeIntegrationTool } = get() - executeIntegrationTool(id).catch((err) => { - logger.error('[SubAgent] Integration tool auto-execution failed', { - id, - name, - error: err?.message || err, - }) - }) - } - } + logger.warn('[Copilot] Failed to fetch buffered events', { + status: batchResponse.status, + }) } - } catch (e: any) { - logger.error('[SubAgent] Tool registry/execution error', { id, name, error: e?.message }) + } catch (e) { + logger.warn('[Copilot] Failed to fetch buffered events', { error: String(e) }) } - }, + } - // Handle subagent tool results - tool_result: (data, context, get, set) => { - const parentToolCallId = context.subAgentParentToolCallId - if (!parentToolCallId) return + if (resumeFromEventId > nextStream.lastEventId) { + nextStream = { ...nextStream, lastEventId: resumeFromEventId } + set({ activeStream: nextStream }) + writeActiveStreamToStorage(nextStream) + } - const toolCallId: string | undefined = data?.toolCallId || data?.data?.id - const success: boolean | undefined = data?.success !== false // Default to true if not specified - if (!toolCallId) return + return { nextStream, bufferedContent, replayBlocks, resumeFromEventId } +} - // Initialize if needed - if (!context.subAgentToolCalls[parentToolCallId]) return - if (!context.subAgentBlocks[parentToolCallId]) return +interface ResumeFinalizeResult { + nextStream: CopilotStreamInfo + bufferedContent: string + resumeFromEventId: number +} - // Update the subagent tool call state - const targetState = success ? ClientToolCallState.success : ClientToolCallState.error - const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex( - (tc) => tc.id === toolCallId +function finalizeResume( + messages: CopilotMessage[], + replay: ReplayBufferedEventsResult, + get: CopilotGet, + set: CopilotSet +): ResumeFinalizeResult { + let nextMessages = messages + let cleanedExisting = false + + nextMessages = nextMessages.map((m) => { + if (m.id !== replay.nextStream.assistantMessageId) return m + const hasContinueTag = + (typeof m.content === 'string' && m.content.includes(CONTINUE_OPTIONS_TAG)) || + (Array.isArray(m.contentBlocks) && + m.contentBlocks.some((b) => b.type === 'text' && b.content?.includes(CONTINUE_OPTIONS_TAG))) + if (!hasContinueTag) return m + cleanedExisting = true + return { + ...m, + content: stripContinueOption(m.content || ''), + contentBlocks: stripContinueOptionFromBlocks(m.contentBlocks ?? []), + } + }) + + if (!messages.some((m) => m.id === replay.nextStream.userMessageId)) { + const userMessage = createUserMessage( + replay.nextStream.userMessageContent || '', + replay.nextStream.fileAttachments, + replay.nextStream.contexts, + replay.nextStream.userMessageId ) + nextMessages = [...nextMessages, userMessage] + } - if (existingIndex >= 0) { - const existing = context.subAgentToolCalls[parentToolCallId][existingIndex] - const updatedSubAgentToolCall = { - ...existing, - state: targetState, - display: resolveToolDisplay(existing.name, targetState, toolCallId, existing.params), - } - context.subAgentToolCalls[parentToolCallId][existingIndex] = updatedSubAgentToolCall - - // Also update in ordered blocks - for (const block of context.subAgentBlocks[parentToolCallId]) { - if (block.type === 'subagent_tool_call' && block.toolCall?.id === toolCallId) { - block.toolCall = updatedSubAgentToolCall - break + if (!nextMessages.some((m) => m.id === replay.nextStream.assistantMessageId)) { + const assistantMessage: CopilotMessage = { + ...createStreamingMessage(), + id: replay.nextStream.assistantMessageId, + content: replay.bufferedContent, + contentBlocks: + replay.replayBlocks && replay.replayBlocks.length > 0 + ? replay.replayBlocks + : replay.bufferedContent + ? [{ type: TEXT_BLOCK_TYPE, content: replay.bufferedContent, timestamp: Date.now() }] + : [], + } + nextMessages = [...nextMessages, assistantMessage] + } else if (replay.bufferedContent || (replay.replayBlocks && replay.replayBlocks.length > 0)) { + nextMessages = nextMessages.map((m) => { + if (m.id !== replay.nextStream.assistantMessageId) return m + let nextBlocks = + replay.replayBlocks && replay.replayBlocks.length > 0 ? replay.replayBlocks : null + if (!nextBlocks) { + const existingBlocks = Array.isArray(m.contentBlocks) ? m.contentBlocks : [] + const existingText = extractTextFromBlocks(existingBlocks) + if (existingText && replay.bufferedContent.startsWith(existingText)) { + const delta = replay.bufferedContent.slice(existingText.length) + nextBlocks = delta + ? appendTextToBlocks(existingBlocks, delta) + : cloneContentBlocks(existingBlocks) + } else if (!existingText && existingBlocks.length === 0) { + nextBlocks = replay.bufferedContent + ? [{ type: TEXT_BLOCK_TYPE, content: replay.bufferedContent, timestamp: Date.now() }] + : [] + } else { + nextBlocks = replaceTextBlocks(existingBlocks, replay.bufferedContent) } } - - // Update the individual tool call in toolCallsById so ToolCall component gets latest state - const { toolCallsById } = get() - if (toolCallsById[toolCallId]) { - const updatedMap = { - ...toolCallsById, - [toolCallId]: updatedSubAgentToolCall, - } - set({ toolCallsById: updatedMap }) - logger.info('[SubAgent] Updated subagent tool call state in toolCallsById', { - toolCallId, - name: existing.name, - state: targetState, - }) + return { + ...m, + content: replay.bufferedContent, + contentBlocks: nextBlocks ?? [], } + }) + } + + if (cleanedExisting || nextMessages !== messages || replay.bufferedContent) { + set({ messages: nextMessages, currentUserMessageId: replay.nextStream.userMessageId }) + } else { + set({ currentUserMessageId: replay.nextStream.userMessageId }) + } + + return { + nextStream: replay.nextStream, + bufferedContent: replay.bufferedContent, + resumeFromEventId: replay.resumeFromEventId, + } +} + +async function resumeFromLiveStream( + resume: ResumeFinalizeResult, + isFreshResume: boolean, + get: CopilotGet, + set: CopilotSet +): Promise { + const abortController = new AbortController() + set({ isSendingMessage: true, abortController }) + + try { + logger.debug('[Copilot] Attempting to resume stream', { + streamId: resume.nextStream.streamId, + savedLastEventId: resume.nextStream.lastEventId, + resumeFromEventId: resume.resumeFromEventId, + isFreshResume, + bufferedContentLength: resume.bufferedContent.length, + assistantMessageId: resume.nextStream.assistantMessageId, + chatId: resume.nextStream.chatId, + }) + const result = await sendStreamingMessage({ + message: resume.nextStream.userMessageContent || '', + userMessageId: resume.nextStream.userMessageId, + workflowId: resume.nextStream.workflowId, + chatId: resume.nextStream.chatId || get().currentChat?.id || undefined, + mode: get().mode === 'ask' ? 'ask' : get().mode === 'plan' ? 'plan' : 'agent', + model: get().selectedModel, + prefetch: get().agentPrefetch, + stream: true, + resumeFromEventId: resume.resumeFromEventId, + abortSignal: abortController.signal, + }) + + logger.info('[Copilot] Resume stream result', { + success: result.success, + hasStream: !!result.stream, + error: result.error, + }) + + if (result.success && result.stream) { + await get().handleStreamingResponse( + result.stream, + resume.nextStream.assistantMessageId, + true, + resume.nextStream.userMessageId, + abortController.signal + ) + return true } - updateToolCallWithSubAgentData(context, get, set, parentToolCallId) - }, - - // Handle subagent stream done - just update the streaming state - done: (data, context, get, set) => { - const parentToolCallId = context.subAgentParentToolCallId - if (!parentToolCallId) return - - // Update the tool call with final content but keep streaming true until subagent_end - updateToolCallWithSubAgentData(context, get, set, parentToolCallId) - }, -} - -// Debounced UI update queue for smoother streaming -const streamingUpdateQueue = new Map() -let streamingUpdateRAF: number | null = null -let lastBatchTime = 0 -const MIN_BATCH_INTERVAL = 16 -const MAX_BATCH_INTERVAL = 50 -const MAX_QUEUE_SIZE = 5 - -function stopStreamingUpdates() { - if (streamingUpdateRAF !== null) { - cancelAnimationFrame(streamingUpdateRAF) - streamingUpdateRAF = null - } - streamingUpdateQueue.clear() -} - -function createOptimizedContentBlocks(contentBlocks: any[]): any[] { - const result: any[] = new Array(contentBlocks.length) - for (let i = 0; i < contentBlocks.length; i++) { - const block = contentBlocks[i] - result[i] = { ...block } - } - return result -} - -function updateStreamingMessage(set: any, context: StreamingContext) { - const now = performance.now() - streamingUpdateQueue.set(context.messageId, context) - const timeSinceLastBatch = now - lastBatchTime - const shouldFlushImmediately = - streamingUpdateQueue.size >= MAX_QUEUE_SIZE || timeSinceLastBatch > MAX_BATCH_INTERVAL - - if (streamingUpdateRAF === null) { - const scheduleUpdate = () => { - streamingUpdateRAF = requestAnimationFrame(() => { - const updates = new Map(streamingUpdateQueue) - streamingUpdateQueue.clear() - streamingUpdateRAF = null - lastBatchTime = performance.now() - set((state: CopilotStore) => { - if (updates.size === 0) return state - const messages = state.messages - const lastMessage = messages[messages.length - 1] - const lastMessageUpdate = lastMessage ? updates.get(lastMessage.id) : null - if (updates.size === 1 && lastMessageUpdate) { - const newMessages = [...messages] - newMessages[messages.length - 1] = { - ...lastMessage, - content: '', - contentBlocks: - lastMessageUpdate.contentBlocks.length > 0 - ? createOptimizedContentBlocks(lastMessageUpdate.contentBlocks) - : [], - } - return { messages: newMessages } - } - return { - messages: messages.map((msg) => { - const update = updates.get(msg.id) - if (update) { - return { - ...msg, - content: '', - contentBlocks: - update.contentBlocks.length > 0 - ? createOptimizedContentBlocks(update.contentBlocks) - : [], - } - } - return msg - }), - } - }) - }) + set({ isSendingMessage: false, abortController: null }) + } catch (error) { + if ( + error instanceof Error && + (error.name === 'AbortError' || error.message.includes('aborted')) + ) { + logger.info('[Copilot] Resume stream aborted by user') + set({ isSendingMessage: false, abortController: null }) + return false } - if (shouldFlushImmediately) scheduleUpdate() - else setTimeout(scheduleUpdate, Math.max(0, MIN_BATCH_INTERVAL - timeSinceLastBatch)) + logger.error('[Copilot] Failed to resume stream', { + error: error instanceof Error ? error.message : String(error), + }) + set({ isSendingMessage: false, abortController: null }) } + return false } -async function* parseSSEStream( - reader: ReadableStreamDefaultReader, - decoder: TextDecoder -) { - let buffer = '' - while (true) { - const { done, value } = await reader.read() - if (done) break - const chunk = decoder.decode(value, { stream: true }) - buffer += chunk - const lastNewlineIndex = buffer.lastIndexOf('\n') - if (lastNewlineIndex !== -1) { - const linesToProcess = buffer.substring(0, lastNewlineIndex) - buffer = buffer.substring(lastNewlineIndex + 1) - const lines = linesToProcess.split('\n') - for (let i = 0; i < lines.length; i++) { - const line = lines[i] - if (line.length === 0) continue - if (line.charCodeAt(0) === 100 && line.startsWith(DATA_PREFIX)) { - try { - const jsonStr = line.substring(DATA_PREFIX_LENGTH) - yield JSON.parse(jsonStr) - } catch (error) { - logger.warn('Failed to parse SSE data:', error) - } - } - } - } - } -} +const cachedAutoAllowedTools = readAutoAllowedToolsFromStorage() // Initial state (subset required for UI/streaming) const initialState = { mode: 'build' as const, - selectedModel: 'claude-4.5-opus' as CopilotStore['selectedModel'], + selectedModel: 'claude-4.6-opus' as CopilotStore['selectedModel'], agentPrefetch: false, enabledModels: null as string[] | null, // Null means not loaded yet, empty array means all disabled isCollapsed: false, currentChat: null as CopilotChat | null, chats: [] as CopilotChat[], messages: [] as CopilotMessage[], - checkpoints: [] as any[], - messageCheckpoints: {} as Record, + messageCheckpoints: {} as Record, messageSnapshots: {} as Record, isLoading: false, isLoadingChats: false, @@ -2362,7 +940,9 @@ const initialState = { streamingPlanContent: '', toolCallsById: {} as Record, suppressAutoSelect: false, - autoAllowedTools: [] as string[], + autoAllowedTools: cachedAutoAllowedTools ?? ([] as string[]), + autoAllowedToolsLoaded: cachedAutoAllowedTools !== null, + activeStream: null as CopilotStreamInfo | null, messageQueue: [] as import('./types').QueuedMessage[], suppressAbortContinueOption: false, sensitiveCredentialIds: new Set(), @@ -2380,16 +960,17 @@ export const useCopilotStore = create()( // Workflow selection setWorkflowId: async (workflowId: string | null) => { + clearInitialDiffIfNeeded() const currentWorkflowId = get().workflowId if (currentWorkflowId === workflowId) return const { isSendingMessage } = get() if (isSendingMessage) get().abortMessage() // Abort all in-progress tools and clear any diff preview - abortAllInProgressTools(set, get) - try { - useWorkflowDiffStore.getState().clearDiff({ restoreBaseline: false }) - } catch {} + cleanupActiveState( + set as unknown as (partial: Record) => void, + get as unknown as () => Record + ) set({ ...initialState, @@ -2397,6 +978,9 @@ export const useCopilotStore = create()( mode: get().mode, selectedModel: get().selectedModel, agentPrefetch: get().agentPrefetch, + enabledModels: get().enabledModels, + autoAllowedTools: get().autoAllowedTools, + autoAllowedToolsLoaded: get().autoAllowedToolsLoaded, }) }, @@ -2420,18 +1004,18 @@ export const useCopilotStore = create()( if (currentChat && currentChat.id !== chat.id && isSendingMessage) get().abortMessage() // Abort in-progress tools and clear diff when changing chats - abortAllInProgressTools(set, get) - try { - useWorkflowDiffStore.getState().clearDiff({ restoreBaseline: false }) - } catch {} + cleanupActiveState( + set as unknown as (partial: Record) => void, + get as unknown as () => Record + ) // Restore plan content and config (mode/model) from selected chat const planArtifact = chat.planArtifact || '' - const chatConfig = chat.config || {} + const chatConfig = chat.config ?? {} const chatMode = chatConfig.mode || get().mode const chatModel = chatConfig.model || get().selectedModel - logger.info('[Chat] Restoring chat config', { + logger.debug('[Chat] Restoring chat config', { chatId: chat.id, mode: chatMode, model: chatModel, @@ -2445,7 +1029,7 @@ export const useCopilotStore = create()( const previousModel = get().selectedModel // Optimistically set selected chat and normalize messages for UI - const normalizedMessages = normalizeMessagesForUI(chat.messages || []) + const normalizedMessages = normalizeMessagesForUI(chat.messages ?? []) const toolCallsById = buildToolCallsById(normalizedMessages) set({ @@ -2463,49 +1047,56 @@ export const useCopilotStore = create()( // Background-save the previous chat's latest messages, plan artifact, and config before switching (optimistic) try { if (previousChat && previousChat.id !== chat.id) { - const dbMessages = validateMessagesForLLM(previousMessages) const previousPlanArtifact = get().streamingPlanContent - fetch('/api/copilot/chat/update-messages', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - chatId: previousChat.id, - messages: dbMessages, - planArtifact: previousPlanArtifact || null, - config: { - mode: previousMode, - model: previousModel, - }, - }), - }).catch(() => {}) + void persistMessages({ + chatId: previousChat.id, + messages: previousMessages, + sensitiveCredentialIds: get().sensitiveCredentialIds, + planArtifact: previousPlanArtifact || null, + mode: previousMode, + model: previousModel, + }) } - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed to schedule previous-chat background save', { + error: error instanceof Error ? error.message : String(error), + }) + } // Refresh selected chat from server to ensure we have latest messages/tool calls try { - const response = await fetch(`/api/copilot/chat?workflowId=${workflowId}`) + const response = await fetch(`${COPILOT_CHAT_API_PATH}?workflowId=${workflowId}`) if (!response.ok) throw new Error(`Failed to fetch latest chat data: ${response.status}`) const data = await response.json() if (data.success && Array.isArray(data.chats)) { const latestChat = data.chats.find((c: CopilotChat) => c.id === chat.id) if (latestChat) { - const normalizedMessages = normalizeMessagesForUI(latestChat.messages || []) + const normalizedMessages = normalizeMessagesForUI(latestChat.messages ?? []) const toolCallsById = buildToolCallsById(normalizedMessages) set({ currentChat: latestChat, messages: normalizedMessages, - chats: (get().chats || []).map((c: CopilotChat) => + chats: (get().chats ?? []).map((c: CopilotChat) => c.id === chat.id ? latestChat : c ), toolCallsById, }) try { await get().loadMessageCheckpoints(latestChat.id) - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed loading checkpoints for selected chat', { + chatId: latestChat.id, + error: error instanceof Error ? error.message : String(error), + }) + } } } - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed to refresh selected chat from server', { + error: error instanceof Error ? error.message : String(error), + }) + } }, createNewChat: async () => { @@ -2513,32 +1104,30 @@ export const useCopilotStore = create()( if (isSendingMessage) get().abortMessage() // Abort in-progress tools and clear diff on new chat - abortAllInProgressTools(set, get) - try { - useWorkflowDiffStore.getState().clearDiff({ restoreBaseline: false }) - } catch {} + cleanupActiveState( + set as unknown as (partial: Record) => void, + get as unknown as () => Record + ) // Background-save the current chat before clearing (optimistic) try { const { currentChat, streamingPlanContent, mode, selectedModel } = get() if (currentChat) { const currentMessages = get().messages - const dbMessages = validateMessagesForLLM(currentMessages) - fetch('/api/copilot/chat/update-messages', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - chatId: currentChat.id, - messages: dbMessages, - planArtifact: streamingPlanContent || null, - config: { - mode, - model: selectedModel, - }, - }), - }).catch(() => {}) + void persistMessages({ + chatId: currentChat.id, + messages: currentMessages, + sensitiveCredentialIds: get().sensitiveCredentialIds, + planArtifact: streamingPlanContent || null, + mode, + model: selectedModel, + }) } - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed to schedule current-chat background save', { + error: error instanceof Error ? error.message : String(error), + }) + } set({ currentChat: null, @@ -2554,7 +1143,7 @@ export const useCopilotStore = create()( deleteChat: async (chatId: string) => { try { // Call delete API - const response = await fetch('/api/copilot/chat/delete', { + const response = await fetch(COPILOT_DELETE_CHAT_API_PATH, { method: 'DELETE', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ chatId }), @@ -2579,8 +1168,6 @@ export const useCopilotStore = create()( } }, - areChatsFresh: (_workflowId: string) => false, - loadChats: async (_forceRefresh = false) => { const { workflowId } = get() @@ -2592,7 +1179,7 @@ export const useCopilotStore = create()( // For now always fetch fresh set({ isLoadingChats: true }) try { - const url = `/api/copilot/chat?workflowId=${workflowId}` + const url = `${COPILOT_CHAT_API_PATH}?workflowId=${workflowId}` const response = await fetch(url) if (!response.ok) { throw new Error(`Failed to fetch chats: ${response.status}`) @@ -2619,11 +1206,11 @@ export const useCopilotStore = create()( if (isSendingMessage) { set({ currentChat: { ...updatedCurrentChat, messages: get().messages } }) } else { - const normalizedMessages = normalizeMessagesForUI(updatedCurrentChat.messages || []) + const normalizedMessages = normalizeMessagesForUI(updatedCurrentChat.messages ?? []) // Restore plan artifact and config from refreshed chat const refreshedPlanArtifact = updatedCurrentChat.planArtifact || '' - const refreshedConfig = updatedCurrentChat.config || {} + const refreshedConfig = updatedCurrentChat.config ?? {} const refreshedMode = refreshedConfig.mode || get().mode const refreshedModel = refreshedConfig.model || get().selectedModel const toolCallsById = buildToolCallsById(normalizedMessages) @@ -2639,14 +1226,19 @@ export const useCopilotStore = create()( } try { await get().loadMessageCheckpoints(updatedCurrentChat.id) - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed loading checkpoints for current chat', { + chatId: updatedCurrentChat.id, + error: error instanceof Error ? error.message : String(error), + }) + } } else if (!isSendingMessage && !suppressAutoSelect) { const mostRecentChat: CopilotChat = data.chats[0] - const normalizedMessages = normalizeMessagesForUI(mostRecentChat.messages || []) + const normalizedMessages = normalizeMessagesForUI(mostRecentChat.messages ?? []) // Restore plan artifact and config from most recent chat const planArtifact = mostRecentChat.planArtifact || '' - const chatConfig = mostRecentChat.config || {} + const chatConfig = mostRecentChat.config ?? {} const chatMode = chatConfig.mode || get().mode const chatModel = chatConfig.model || get().selectedModel @@ -2669,7 +1261,12 @@ export const useCopilotStore = create()( }) try { await get().loadMessageCheckpoints(mostRecentChat.id) - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed loading checkpoints for most recent chat', { + chatId: mostRecentChat.id, + error: error instanceof Error ? error.message : String(error), + }) + } } } else { set({ currentChat: null, messages: [] }) @@ -2689,255 +1286,61 @@ export const useCopilotStore = create()( // Send a message (streaming only) sendMessage: async (message: string, options = {}) => { - const { - workflowId, - currentChat, - mode, - revertState, - isSendingMessage, - abortController: activeAbortController, - } = get() - const { - stream = true, - fileAttachments, - contexts, - messageId, - queueIfBusy = true, - } = options as { - stream?: boolean - fileAttachments?: MessageFileAttachment[] - contexts?: ChatContext[] - messageId?: string - queueIfBusy?: boolean - } - - if (!workflowId) return - - // If already sending a message, queue this one instead unless bypassing queue - if (isSendingMessage && !activeAbortController) { - logger.warn('[Copilot] sendMessage: stale sending state detected, clearing', { - originalMessageId: messageId, - }) - set({ isSendingMessage: false }) - } else if (isSendingMessage && activeAbortController?.signal.aborted) { - logger.warn('[Copilot] sendMessage: aborted controller detected, clearing', { - originalMessageId: messageId, - }) - set({ isSendingMessage: false, abortController: null }) - } else if (isSendingMessage) { - if (queueIfBusy) { - get().addToQueue(message, { fileAttachments, contexts, messageId }) - logger.info('[Copilot] Message queued (already sending)', { - queueLength: get().messageQueue.length + 1, - originalMessageId: messageId, - }) - return - } - get().abortMessage({ suppressContinueOption: true }) - } - - const nextAbortController = new AbortController() - set({ isSendingMessage: true, error: null, abortController: nextAbortController }) - - const userMessage = createUserMessage(message, fileAttachments, contexts, messageId) - const streamingMessage = createStreamingMessage() - const snapshot = workflowId ? buildCheckpointWorkflowState(workflowId) : null - if (snapshot) { - set((state) => ({ - messageSnapshots: { ...state.messageSnapshots, [userMessage.id]: snapshot }, - })) - } - - get() - .loadSensitiveCredentialIds() - .catch((err) => { - logger.warn('[Copilot] Failed to load sensitive credential IDs', err) - }) - get() - .loadAutoAllowedTools() - .catch((err) => { - logger.warn('[Copilot] Failed to load auto-allowed tools', err) - }) - - let newMessages: CopilotMessage[] - if (revertState) { - const currentMessages = get().messages - newMessages = [...currentMessages, userMessage, streamingMessage] - set({ revertState: null, inputValue: '' }) - } else { - const currentMessages = get().messages - // If messageId is provided, check if it already exists (e.g., from edit flow) - const existingIndex = messageId ? currentMessages.findIndex((m) => m.id === messageId) : -1 - if (existingIndex !== -1) { - // Replace existing message instead of adding new one - newMessages = [...currentMessages.slice(0, existingIndex), userMessage, streamingMessage] - } else { - // Add new messages normally - newMessages = [...currentMessages, userMessage, streamingMessage] - } - } - - const isFirstMessage = get().messages.length === 0 && !currentChat?.title - set((state) => ({ - messages: newMessages, - currentUserMessageId: userMessage.id, - })) - - if (isFirstMessage) { - const optimisticTitle = message.length > 50 ? `${message.substring(0, 47)}...` : message - set((state) => ({ - currentChat: state.currentChat - ? { ...state.currentChat, title: optimisticTitle } - : state.currentChat, - chats: state.currentChat - ? state.chats.map((c) => - c.id === state.currentChat!.id ? { ...c, title: optimisticTitle } : c - ) - : state.chats, - })) - } - - try { - // Debug: log contexts presence before sending + if (!get().autoAllowedToolsLoaded) { try { - logger.info('sendMessage: preparing request', { - hasContexts: Array.isArray(contexts), - contextsCount: Array.isArray(contexts) ? contexts.length : 0, - contextsPreview: Array.isArray(contexts) - ? contexts.map((c: any) => ({ - kind: c?.kind, - chatId: (c as any)?.chatId, - workflowId: (c as any)?.workflowId, - label: (c as any)?.label, - })) - : undefined, - }) - } catch {} - - // Prepend design document to message if available - const { streamingPlanContent } = get() - let messageToSend = message - if (streamingPlanContent?.trim()) { - messageToSend = `Design Document:\n\n${streamingPlanContent}\n\n==============\n\nUser Query:\n\n${message}` - logger.info('[DesignDocument] Prepending plan content to message', { - planLength: streamingPlanContent.length, - originalMessageLength: message.length, - finalMessageLength: messageToSend.length, + await get().loadAutoAllowedTools() + } catch (error) { + logger.warn('[Copilot] Failed to preload auto-allowed tools before send', { + error: error instanceof Error ? error.message : String(error), }) } - - // Call copilot API - const apiMode: CopilotTransportMode = - mode === 'ask' ? 'ask' : mode === 'plan' ? 'plan' : 'agent' - - // Extract slash commands from contexts (lowercase) and filter them out from contexts - // Map UI command IDs to API command IDs (e.g., "actions" -> "superagent") - const uiToApiCommandMap: Record = { actions: 'superagent' } - const commands = contexts - ?.filter((c) => c.kind === 'slash_command' && 'command' in c) - .map((c) => { - const uiCommand = (c as any).command.toLowerCase() - return uiToApiCommandMap[uiCommand] || uiCommand - }) as string[] | undefined - const filteredContexts = contexts?.filter((c) => c.kind !== 'slash_command') - - const result = await sendStreamingMessage({ - message: messageToSend, - userMessageId: userMessage.id, - chatId: currentChat?.id, - workflowId: workflowId || undefined, - mode: apiMode, - model: get().selectedModel, - prefetch: get().agentPrefetch, - createNewChat: !currentChat, - stream, - fileAttachments, - contexts: filteredContexts, - commands: commands?.length ? commands : undefined, - abortSignal: nextAbortController.signal, - }) - - if (result.success && result.stream) { - await get().handleStreamingResponse( - result.stream, - streamingMessage.id, - false, - userMessage.id - ) - set({ chatsLastLoadedAt: null, chatsLoadedForWorkflow: null }) - } else { - if (result.error === 'Request was aborted') { - return - } - - // Check for specific status codes and provide custom messages - let errorContent = result.error || 'Failed to send message' - let errorType: - | 'usage_limit' - | 'unauthorized' - | 'forbidden' - | 'rate_limit' - | 'upgrade_required' - | undefined - if (result.status === 401) { - errorContent = - '_Unauthorized request. You need a valid API key to use the copilot. You can get one by going to [sim.ai](https://sim.ai) settings and generating one there._' - errorType = 'unauthorized' - } else if (result.status === 402) { - errorContent = - '_Usage limit exceeded. To continue using this service, upgrade your plan or increase your usage limit to:_' - errorType = 'usage_limit' - } else if (result.status === 403) { - errorContent = - '_Provider config not allowed for non-enterprise users. Please remove the provider config and try again_' - errorType = 'forbidden' - } else if (result.status === 426) { - errorContent = - '_Please upgrade to the latest version of the Sim platform to continue using the copilot._' - errorType = 'upgrade_required' - } else if (result.status === 429) { - errorContent = '_Provider rate limit exceeded. Please try again later._' - errorType = 'rate_limit' - } - - const errorMessage = createErrorMessage(streamingMessage.id, errorContent, errorType) - set((state) => ({ - messages: state.messages.map((m) => (m.id === streamingMessage.id ? errorMessage : m)), - error: errorContent, - isSendingMessage: false, - abortController: null, - })) - } - } catch (error) { - if (error instanceof Error && error.name === 'AbortError') return - const errorMessage = createErrorMessage( - streamingMessage.id, - 'Sorry, I encountered an error while processing your message. Please try again.' - ) - set((state) => ({ - messages: state.messages.map((m) => (m.id === streamingMessage.id ? errorMessage : m)), - error: error instanceof Error ? error.message : 'Failed to send message', - isSendingMessage: false, - abortController: null, - })) } + + const prepared = prepareSendContext(get, set, message, options as SendMessageOptionsInput) + if (!prepared) return + + const initiated = await initiateStream(prepared, get) + let finalizedInitiated = initiated + let processed = false + + if (initiated.kind === 'success') { + try { + processed = await processStreamEvents(initiated, prepared, get) + } catch (error) { + finalizedInitiated = { kind: 'error', error } + processed = false + } + } + + await finalizeStream(finalizedInitiated, processed, prepared, set) + }, + + resumeActiveStream: async () => { + const validated = await validateResumeState(get, set) + if (!validated) return false + + const replayed = await replayBufferedEvents(validated.nextStream, get, set) + const finalized = finalizeResume(validated.messages, replayed, get, set) + return resumeFromLiveStream(finalized, validated.isFreshResume, get, set) }, // Abort streaming abortMessage: (options?: { suppressContinueOption?: boolean }) => { const { abortController, isSendingMessage, messages } = get() if (!isSendingMessage || !abortController) return - const suppressContinueOption = options?.suppressContinueOption === true + // Suppress continue option if explicitly requested OR if page is unloading (refresh/close) + const suppressContinueOption = options?.suppressContinueOption === true || isPageUnloading() set({ isAborting: true, suppressAbortContinueOption: suppressContinueOption }) try { abortController.abort() - stopStreamingUpdates() - const lastMessage = messages[messages.length - 1] + flushStreamingUpdates(set) + const { messages: updatedMessages } = get() + const lastMessage = updatedMessages[updatedMessages.length - 1] if (lastMessage && lastMessage.role === 'assistant') { const textContent = lastMessage.contentBlocks ?.filter((b) => b.type === 'text') - .map((b: any) => b.content) + .map((b) => b.content ?? '') .join('') || '' const nextContentBlocks = suppressContinueOption ? (lastMessage.contentBlocks ?? []) @@ -2969,6 +1372,13 @@ export const useCopilotStore = create()( }) } + // Only clear active stream for user-initiated aborts, NOT page unload + // During page unload, keep the stream info so we can resume after refresh + if (!isPageUnloading()) { + set({ activeStream: null }) + writeActiveStreamToStorage(null) + } + // Immediately put all in-progress tools into aborted state abortAllInProgressTools(set, get) @@ -2977,24 +1387,30 @@ export const useCopilotStore = create()( if (currentChat) { try { const currentMessages = get().messages - const dbMessages = validateMessagesForLLM(currentMessages) - fetch('/api/copilot/chat/update-messages', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - chatId: currentChat.id, - messages: dbMessages, - planArtifact: streamingPlanContent || null, - config: { - mode, - model: selectedModel, - }, - }), - }).catch(() => {}) - } catch {} + void persistMessages({ + chatId: currentChat.id, + messages: currentMessages, + sensitiveCredentialIds: get().sensitiveCredentialIds, + planArtifact: streamingPlanContent || null, + mode, + model: selectedModel, + }) + } catch (error) { + logger.warn('[Copilot] Failed to queue abort snapshot persistence', { + error: error instanceof Error ? error.message : String(error), + }) + } } - } catch { + } catch (error) { + logger.warn('[Copilot] Abort flow encountered an error', { + error: error instanceof Error ? error.message : String(error), + }) set({ isSendingMessage: false, isAborting: false }) + // Only clear active stream for user-initiated aborts, NOT page unload + if (!isPageUnloading()) { + set({ activeStream: null }) + writeActiveStreamToStorage(null) + } } }, @@ -3022,7 +1438,13 @@ export const useCopilotStore = create()( abortSignal: abortController.signal, }) if (result.success && result.stream) { - await get().handleStreamingResponse(result.stream, newAssistantMessage.id, false) + await get().handleStreamingResponse( + result.stream, + newAssistantMessage.id, + false, + undefined, + abortController.signal + ) } else { if (result.error === 'Request was aborted') return const errorMessage = createErrorMessage( @@ -3056,7 +1478,7 @@ export const useCopilotStore = create()( }, // Tool-call related APIs are stubbed for now - setToolCallState: (toolCall: any, newState: any) => { + setToolCallState: (toolCall: CopilotToolCall, newState: ClientToolCallState | string) => { try { const id: string | undefined = toolCall?.id if (!id) return @@ -3066,7 +1488,7 @@ export const useCopilotStore = create()( // Preserve rejected state from being overridden if ( isRejectedState(current.state) && - (newState === 'success' || newState === (ClientToolCallState as any).success) + (newState === 'success' || newState === ClientToolCallState.success) ) { return } @@ -3078,6 +1500,7 @@ export const useCopilotStore = create()( else if (newState === 'success' || newState === 'accepted') norm = ClientToolCallState.success else if (newState === 'aborted') norm = ClientToolCallState.aborted + else if (newState === 'background') norm = ClientToolCallState.background else if (typeof newState === 'number') norm = newState as unknown as ClientToolCallState map[id] = { ...current, @@ -3085,10 +1508,15 @@ export const useCopilotStore = create()( display: resolveToolDisplay(current.name, norm, id, current.params), } set({ toolCallsById: map }) - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed to update tool call state', { + error: error instanceof Error ? error.message : String(error), + toolCallId: toolCall?.id, + }) + } }, - updateToolCallParams: (toolCallId: string, params: Record) => { + updateToolCallParams: (toolCallId: string, params: Record) => { try { if (!toolCallId) return const map = { ...get().toolCallsById } @@ -3101,7 +1529,12 @@ export const useCopilotStore = create()( display: resolveToolDisplay(current.name, current.state, toolCallId, updatedParams), } set({ toolCallsById: map }) - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed to update tool call params', { + error: error instanceof Error ? error.message : String(error), + toolCallId, + }) + } }, updatePreviewToolCallState: ( toolCallState: 'accepted' | 'rejected' | 'error', @@ -3122,7 +1555,7 @@ export const useCopilotStore = create()( outer: for (let mi = messages.length - 1; mi >= 0; mi--) { const m = messages[mi] if (m.role !== 'assistant' || !m.contentBlocks) continue - const blocks = m.contentBlocks as any[] + const blocks = m.contentBlocks for (let bi = blocks.length - 1; bi >= 0; bi--) { const b = blocks[bi] if (b?.type === 'tool_call') { @@ -3144,7 +1577,7 @@ export const useCopilotStore = create()( const current = toolCallsById[id] if (!current) return // Do not override a rejected tool with success - if (isRejectedState(current.state) && targetState === (ClientToolCallState as any).success) { + if (isRejectedState(current.state) && targetState === ClientToolCallState.success) { return } @@ -3165,15 +1598,14 @@ export const useCopilotStore = create()( const m = messages[mi] if (m.role !== 'assistant' || !m.contentBlocks) continue let changed = false - const blocks = m.contentBlocks.map((b: any) => { + const blocks = m.contentBlocks.map((b) => { if (b.type === 'tool_call' && b.toolCall?.id === id) { changed = true - const prev = b.toolCall || {} return { ...b, toolCall: { - ...prev, - id, + ...b.toolCall, + id: id!, name: current.name, state: targetState, display: updatedDisplay, @@ -3191,49 +1623,48 @@ export const useCopilotStore = create()( return { messages } }) - // Notify backend mark-complete to finalize tool server-side try { - fetch('/api/copilot/tools/mark-complete', { + fetch(COPILOT_CONFIRM_API_PATH, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ - id, - name: current.name, - status: - targetState === ClientToolCallState.success - ? 200 - : targetState === ClientToolCallState.rejected - ? 409 - : 500, - message: toolCallState, + toolCallId: id, + status: toolCallState, }), - }).catch(() => {}) - } catch {} + }).catch((error) => { + logger.warn('[Copilot] Failed to send tool confirmation', { + error: error instanceof Error ? error.message : String(error), + toolCallId: id, + status: toolCallState, + }) + }) + } catch (error) { + logger.warn('[Copilot] Failed to queue tool confirmation request', { + error: error instanceof Error ? error.message : String(error), + toolCallId: id, + status: toolCallState, + }) + } }, - sendDocsMessage: async (query: string) => { - await get().sendMessage(query) - }, - - saveChatMessages: async (_chatId: string) => {}, - - loadCheckpoints: async (_chatId: string) => set({ checkpoints: [] }), - loadMessageCheckpoints: async (chatId: string) => { const { workflowId } = get() if (!workflowId) return set({ isLoadingCheckpoints: true, checkpointError: null }) try { - const response = await fetch(`/api/copilot/checkpoints?chatId=${chatId}`) + const response = await fetch(`${COPILOT_CHECKPOINTS_API_PATH}?chatId=${chatId}`) if (!response.ok) throw new Error(`Failed to load checkpoints: ${response.statusText}`) const data = await response.json() if (data.success && Array.isArray(data.checkpoints)) { - const grouped = data.checkpoints.reduce((acc: Record, cp: any) => { - const key = cp.messageId || '__no_message__' - acc[key] = acc[key] || [] - acc[key].push(cp) - return acc - }, {}) + const grouped = (data.checkpoints as CheckpointEntry[]).reduce( + (acc: Record, cp: CheckpointEntry) => { + const key = cp.messageId || '__no_message__' + acc[key] = acc[key] ?? [] + acc[key].push(cp) + return acc + }, + {} + ) set({ messageCheckpoints: grouped, isLoadingCheckpoints: false }) } else { throw new Error('Invalid checkpoints response') @@ -3254,9 +1685,9 @@ export const useCopilotStore = create()( try { const { messageCheckpoints } = get() const checkpointMessageId = Object.entries(messageCheckpoints).find(([, cps]) => - (cps || []).some((cp: any) => cp?.id === checkpointId) + (cps ?? []).some((cp) => cp?.id === checkpointId) )?.[0] - const response = await fetch('/api/copilot/checkpoints/revert', { + const response = await fetch(COPILOT_CHECKPOINTS_REVERT_API_PATH, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ checkpointId }), @@ -3271,27 +1702,30 @@ export const useCopilotStore = create()( // Clear any active diff preview try { useWorkflowDiffStore.getState().clearDiff() - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed to clear diff before checkpoint revert', { + error: error instanceof Error ? error.message : String(error), + }) + } // Apply to main workflow store useWorkflowStore.setState({ - blocks: reverted.blocks || {}, - edges: reverted.edges || [], - loops: reverted.loops || {}, - parallels: reverted.parallels || {}, + blocks: reverted.blocks ?? {}, + edges: reverted.edges ?? [], + loops: reverted.loops ?? {}, + parallels: reverted.parallels ?? {}, lastSaved: reverted.lastSaved || Date.now(), - deploymentStatuses: reverted.deploymentStatuses || {}, + deploymentStatuses: reverted.deploymentStatuses ?? {}, }) // Extract and apply subblock values - const values: Record> = {} - Object.entries(reverted.blocks || {}).forEach(([blockId, block]: [string, any]) => { + const values: Record> = {} + Object.entries(reverted.blocks ?? {}).forEach(([blockId, block]) => { + const typedBlock = block as { subBlocks?: Record } values[blockId] = {} - Object.entries((block as any).subBlocks || {}).forEach( - ([subId, sub]: [string, any]) => { - values[blockId][subId] = (sub as any)?.value - } - ) + Object.entries(typedBlock.subBlocks ?? {}).forEach(([subId, sub]) => { + values[blockId][subId] = sub?.value + }) }) const subState = useSubBlockStore.getState() useSubBlockStore.setState({ @@ -3317,7 +1751,7 @@ export const useCopilotStore = create()( }, getCheckpointsForMessage: (messageId: string) => { const { messageCheckpoints } = get() - return messageCheckpoints[messageId] || [] + return messageCheckpoints[messageId] ?? [] }, saveMessageCheckpoint: async (messageId: string) => { if (!messageId) return false @@ -3329,195 +1763,145 @@ export const useCopilotStore = create()( stream: ReadableStream, assistantMessageId: string, isContinuation = false, - triggerUserMessageId?: string + triggerUserMessageId?: string, + abortSignal?: AbortSignal ) => { const reader = stream.getReader() const decoder = new TextDecoder() const startTimeMs = Date.now() + const expectedStreamId = triggerUserMessageId - const context: StreamingContext = { - messageId: assistantMessageId, - accumulatedContent: new StringBuilder(), - contentBlocks: [], - currentTextBlock: null, - isInThinkingBlock: false, - currentThinkingBlock: null, - isInDesignWorkflowBlock: false, - designWorkflowContent: '', - pendingContent: '', - doneEventCount: 0, - subAgentContent: {}, - subAgentToolCalls: {}, - subAgentBlocks: {}, + const context = createClientStreamingContext(assistantMessageId) + if (isContinuation) { + context.suppressContinueOption = true } if (isContinuation) { const { messages } = get() const existingMessage = messages.find((m) => m.id === assistantMessageId) + logger.debug('[Copilot] Continuation init', { + hasMessage: !!existingMessage, + contentLength: existingMessage?.content?.length || 0, + contentPreview: existingMessage?.content?.slice(0, 100) || '', + contentBlocksCount: existingMessage?.contentBlocks?.length || 0, + }) if (existingMessage) { - if (existingMessage.content) context.accumulatedContent.append(existingMessage.content) - context.contentBlocks = existingMessage.contentBlocks - ? [...existingMessage.contentBlocks] + const existingBlocks = Array.isArray(existingMessage.contentBlocks) + ? existingMessage.contentBlocks : [] + if (existingBlocks.length > 0) { + const existingText = extractTextFromBlocks(existingBlocks) + if (existingText) { + context.accumulatedContent += existingText + } + const clonedBlocks = cloneContentBlocks(existingBlocks) + context.contentBlocks = clonedBlocks + context.currentTextBlock = findLastTextBlock(clonedBlocks) + } else if (existingMessage.content) { + const textBlock: ClientContentBlock = { + type: 'text', + content: existingMessage.content, + timestamp: Date.now(), + toolCall: null, + } + context.contentBlocks = [textBlock] + context.currentTextBlock = textBlock + context.accumulatedContent += existingMessage.content + } } } const timeoutId = setTimeout(() => { logger.warn('Stream timeout reached, completing response') reader.cancel() - }, 600000) + }, STREAM_TIMEOUT_MS) try { - for await (const data of parseSSEStream(reader, decoder)) { - const { abortController } = get() - if (abortController?.signal.aborted) { + for await (const data of parseSSEStream(reader, decoder, abortSignal)) { + if (abortSignal?.aborted) { context.wasAborted = true const { suppressAbortContinueOption } = get() - context.suppressContinueOption = suppressAbortContinueOption === true + context.suppressContinueOption = + suppressAbortContinueOption === true || isPageUnloading() if (suppressAbortContinueOption) { set({ suppressAbortContinueOption: false }) } context.pendingContent = '' finalizeThinkingBlock(context) - stopStreamingUpdates() + flushStreamingUpdates(set) reader.cancel() break } + const eventMeta = data as { eventId?: unknown; streamId?: unknown } + const eventId = typeof eventMeta.eventId === 'number' ? eventMeta.eventId : undefined + const streamId = typeof eventMeta.streamId === 'string' ? eventMeta.streamId : undefined + if (expectedStreamId && streamId && streamId !== expectedStreamId) { + logger.warn('[SSE] Ignoring event for mismatched stream', { + expectedStreamId, + streamId, + type: data.type, + }) + continue + } + if (eventId && streamId) { + updateActiveStreamEventId(get, set, streamId, eventId) + } + // Log SSE events for debugging - logger.info('[SSE] Received event', { + logger.debug('[SSE] Received event', { type: data.type, hasSubAgent: !!data.subagent, subagent: data.subagent, dataPreview: typeof data.data === 'string' - ? data.data.substring(0, 100) + ? (data.data as string).substring(0, 100) : JSON.stringify(data.data)?.substring(0, 100), }) - // Handle subagent_start to track parent tool call - if (data.type === 'subagent_start') { - const toolCallId = data.data?.tool_call_id - if (toolCallId) { - context.subAgentParentToolCallId = toolCallId - // Mark the parent tool call as streaming - const { toolCallsById } = get() - const parentToolCall = toolCallsById[toolCallId] - if (parentToolCall) { - const updatedToolCall: CopilotToolCall = { - ...parentToolCall, - subAgentStreaming: true, - } - const updatedMap = { ...toolCallsById, [toolCallId]: updatedToolCall } - set({ toolCallsById: updatedMap }) - } - logger.info('[SSE] Subagent session started', { - subagent: data.subagent, - parentToolCallId: toolCallId, - }) - } - continue - } - - // Handle subagent_end to finalize subagent content - if (data.type === 'subagent_end') { - const parentToolCallId = context.subAgentParentToolCallId - if (parentToolCallId) { - // Mark subagent streaming as complete - const { toolCallsById } = get() - const parentToolCall = toolCallsById[parentToolCallId] - if (parentToolCall) { - const updatedToolCall: CopilotToolCall = { - ...parentToolCall, - subAgentContent: context.subAgentContent[parentToolCallId] || '', - subAgentToolCalls: context.subAgentToolCalls[parentToolCallId] || [], - subAgentBlocks: context.subAgentBlocks[parentToolCallId] || [], - subAgentStreaming: false, // Done streaming - } - const updatedMap = { ...toolCallsById, [parentToolCallId]: updatedToolCall } - set({ toolCallsById: updatedMap }) - logger.info('[SSE] Subagent session ended', { - subagent: data.subagent, - parentToolCallId, - contentLength: context.subAgentContent[parentToolCallId]?.length || 0, - toolCallCount: context.subAgentToolCalls[parentToolCallId]?.length || 0, - }) - } - } - context.subAgentParentToolCallId = undefined - continue - } - - // Check if this is a subagent event (has subagent field) - if (data.subagent) { - const parentToolCallId = context.subAgentParentToolCallId - if (!parentToolCallId) { - logger.warn('[SSE] Subagent event without parent tool call ID', { - type: data.type, - subagent: data.subagent, - }) - continue - } - - logger.info('[SSE] Processing subagent event', { - type: data.type, - subagent: data.subagent, - parentToolCallId, - hasHandler: !!subAgentSSEHandlers[data.type], - }) - - const subAgentHandler = subAgentSSEHandlers[data.type] - if (subAgentHandler) { - await subAgentHandler(data, context, get, set) - } else { - logger.warn('[SSE] No handler for subagent event type', { type: data.type }) - } - // Skip regular handlers for subagent events - if (context.streamComplete) break - continue - } - - const handler = sseHandlers[data.type] || sseHandlers.default - await handler(data, context, get, set) - if (context.streamComplete) break + const shouldContinue = await applySseEvent(data, context, get, set) + if (!shouldContinue) break } if (!context.wasAborted && sseHandlers.stream_end) { - sseHandlers.stream_end({}, context, get, set) + sseHandlers.stream_end({ type: 'done' }, context, get, set) } - if (streamingUpdateRAF !== null) { - cancelAnimationFrame(streamingUpdateRAF) - streamingUpdateRAF = null - } - streamingUpdateQueue.clear() + stopStreamingUpdates() - let sanitizedContentBlocks: any[] = [] + let sanitizedContentBlocks: ClientContentBlock[] = [] if (context.contentBlocks && context.contentBlocks.length > 0) { - const optimizedBlocks = createOptimizedContentBlocks(context.contentBlocks) - sanitizedContentBlocks = optimizedBlocks.map((block: any) => + const optimizedBlocks = context.contentBlocks.map((block) => ({ ...block })) + sanitizedContentBlocks = optimizedBlocks.map((block) => block.type === TEXT_BLOCK_TYPE && typeof block.content === 'string' ? { ...block, content: stripTodoTags(block.content) } : block ) } + if (isContinuation) { + sanitizedContentBlocks = stripContinueOptionFromBlocks(sanitizedContentBlocks) + } if (context.wasAborted && !context.suppressContinueOption) { sanitizedContentBlocks = appendContinueOptionBlock(sanitizedContentBlocks) } - if (context.contentBlocks) { - context.contentBlocks.forEach((block) => { - if (block.type === TEXT_BLOCK_TYPE || block.type === THINKING_BLOCK_TYPE) { - contentBlockPool.release(block) - } - }) + if (!context.streamComplete && !context.wasAborted) { + const resumed = await get().resumeActiveStream() + if (resumed) { + return + } } - const finalContent = stripTodoTags(context.accumulatedContent.toString()) + const finalContent = stripTodoTags(context.accumulatedContent) + const finalContentStripped = isContinuation + ? stripContinueOption(finalContent) + : finalContent const finalContentWithOptions = context.wasAborted && !context.suppressContinueOption ? appendContinueOption(finalContent) - : finalContent + : finalContentStripped + // Step 1: Update messages in state but keep isSendingMessage: true. + // This prevents loadChats from overwriting with stale DB data during persist. set((state) => { const snapshotId = state.currentUserMessageId const nextSnapshots = @@ -3528,7 +1912,7 @@ export const useCopilotStore = create()( return updated })() : state.messageSnapshots - return { + const nextState: Partial = { messages: state.messages.map((msg) => msg.id === assistantMessageId ? { @@ -3538,43 +1922,26 @@ export const useCopilotStore = create()( } : msg ), - isSendingMessage: false, isAborting: false, - abortController: null, currentUserMessageId: null, messageSnapshots: nextSnapshots, } + return nextState }) + // Only clear active stream if stream completed normally or user aborted (not page unload) + if ((context.streamComplete || context.wasAborted) && !isPageUnloading()) { + set({ activeStream: null }) + writeActiveStreamToStorage(null) + } + if (context.newChatId && !get().currentChat) { await get().handleNewChatCreation(context.newChatId) } - // Process next message in queue if any - const nextInQueue = get().messageQueue[0] - if (nextInQueue) { - // Use originalMessageId if available (from edit/resend), otherwise use queue entry id - const messageIdToUse = nextInQueue.originalMessageId || nextInQueue.id - logger.info('[Queue] Processing next queued message', { - id: nextInQueue.id, - originalMessageId: nextInQueue.originalMessageId, - messageIdToUse, - queueLength: get().messageQueue.length, - }) - // Remove from queue and send - get().removeFromQueue(nextInQueue.id) - // Use setTimeout to avoid blocking the current execution - setTimeout(() => { - get().sendMessage(nextInQueue.content, { - stream: true, - fileAttachments: nextInQueue.fileAttachments, - contexts: nextInQueue.contexts, - messageId: messageIdToUse, - }) - }, 100) - } - - // Persist full message state (including contentBlocks), plan artifact, and config to database + // Step 2: Persist messages to DB BEFORE marking stream as done. + // loadChats checks isSendingMessage — while true it preserves in-memory messages. + // Persisting first ensures the DB is up-to-date before we allow overwrites. const { currentChat, streamingPlanContent, mode, selectedModel } = get() if (currentChat) { try { @@ -3582,40 +1949,35 @@ export const useCopilotStore = create()( // Debug: Log what we're about to serialize const lastMsg = currentMessages[currentMessages.length - 1] if (lastMsg?.role === 'assistant') { - logger.info('[Stream Done] About to serialize - last message state', { + logger.debug('[Stream Done] About to serialize - last message state', { id: lastMsg.id, contentLength: lastMsg.content?.length || 0, hasContentBlocks: !!lastMsg.contentBlocks, contentBlockCount: lastMsg.contentBlocks?.length || 0, - contentBlockTypes: (lastMsg.contentBlocks as any[])?.map((b) => b?.type) || [], + contentBlockTypes: lastMsg.contentBlocks?.map((b) => b?.type) ?? [], }) } - const dbMessages = validateMessagesForLLM(currentMessages) const config = { mode, model: selectedModel, } - const saveResponse = await fetch('/api/copilot/chat/update-messages', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - chatId: currentChat.id, - messages: dbMessages, - planArtifact: streamingPlanContent || null, - config, - }), + const persisted = await persistMessages({ + chatId: currentChat.id, + messages: currentMessages, + sensitiveCredentialIds: get().sensitiveCredentialIds, + planArtifact: streamingPlanContent || null, + mode, + model: selectedModel, }) - if (!saveResponse.ok) { - const errorText = await saveResponse.text().catch(() => '') + if (!persisted) { logger.error('[Stream Done] Failed to save messages to DB', { - status: saveResponse.status, - error: errorText, + chatId: currentChat.id, }) } else { logger.info('[Stream Done] Successfully saved messages to DB', { - messageCount: dbMessages.length, + messageCount: currentMessages.length, }) } @@ -3632,16 +1994,39 @@ export const useCopilotStore = create()( } } - // Post copilot_stats record (input/output tokens can be null for now) - try { - // Removed: stats sending now occurs only on accept/reject with minimal payload - } catch {} + // Step 3: NOW mark stream as done. DB is up-to-date, so if loadChats + // overwrites messages it will use the persisted (correct) data. + set({ isSendingMessage: false, abortController: null }) + + // Process next message in queue if any + const nextInQueue = get().messageQueue[0] + if (nextInQueue) { + // Use originalMessageId if available (from edit/resend), otherwise use queue entry id + const messageIdToUse = nextInQueue.originalMessageId || nextInQueue.id + logger.debug('[Queue] Processing next queued message', { + id: nextInQueue.id, + originalMessageId: nextInQueue.originalMessageId, + messageIdToUse, + queueLength: get().messageQueue.length, + }) + // Remove from queue and send + get().removeFromQueue(nextInQueue.id) + // Use setTimeout to avoid blocking the current execution + setTimeout(() => { + get().sendMessage(nextInQueue.content, { + stream: true, + fileAttachments: nextInQueue.fileAttachments, + contexts: nextInQueue.contexts, + messageId: messageIdToUse, + }) + }, QUEUE_PROCESS_DELAY_MS) + } // Invalidate subscription queries to update usage setTimeout(() => { const queryClient = getQueryClient() queryClient.invalidateQueries({ queryKey: subscriptionKeys.all }) - }, 1000) + }, SUBSCRIPTION_INVALIDATE_DELAY_MS) } finally { clearTimeout(timeoutId) } @@ -3668,11 +2053,15 @@ export const useCopilotStore = create()( abortAllInProgressTools(set, get) try { useWorkflowDiffStore.getState().clearDiff() - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed to clear diff on new chat creation', { + error: error instanceof Error ? error.message : String(error), + }) + } set({ currentChat: newChat, - chats: [newChat, ...(get().chats || [])], + chats: [newChat, ...(get().chats ?? [])], chatsLastLoadedAt: null, chatsLoadedForWorkflow: null, planTodos: [], @@ -3685,20 +2074,18 @@ export const useCopilotStore = create()( clearError: () => set({ error: null }), clearSaveError: () => set({ saveError: null }), clearCheckpointError: () => set({ checkpointError: null }), - retrySave: async (_chatId: string) => {}, - cleanup: () => { const { isSendingMessage } = get() if (isSendingMessage) get().abortMessage() - if (streamingUpdateRAF !== null) { - cancelAnimationFrame(streamingUpdateRAF) - streamingUpdateRAF = null - } - streamingUpdateQueue.clear() + stopStreamingUpdates() // Clear any diff on cleanup try { useWorkflowDiffStore.getState().clearDiff() - } catch {} + } catch (error) { + logger.warn('[Copilot] Failed to clear diff on cleanup', { + error: error instanceof Error ? error.message : String(error), + }) + } }, reset: () => { @@ -3736,21 +2123,14 @@ export const useCopilotStore = create()( if (currentChat) { try { const currentMessages = get().messages - const dbMessages = validateMessagesForLLM(currentMessages) const { mode, selectedModel } = get() - - await fetch('/api/copilot/chat/update-messages', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - chatId: currentChat.id, - messages: dbMessages, - planArtifact: null, - config: { - mode, - model: selectedModel, - }, - }), + await persistMessages({ + chatId: currentChat.id, + messages: currentMessages, + sensitiveCredentialIds: get().sensitiveCredentialIds, + planArtifact: null, + mode, + model: selectedModel, }) // Update local chat object @@ -3778,21 +2158,14 @@ export const useCopilotStore = create()( if (currentChat) { try { const currentMessages = get().messages - const dbMessages = validateMessagesForLLM(currentMessages) const { mode, selectedModel } = get() - - await fetch('/api/copilot/chat/update-messages', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - chatId: currentChat.id, - messages: dbMessages, - planArtifact: content, - config: { - mode, - model: selectedModel, - }, - }), + await persistMessages({ + chatId: currentChat.id, + messages: currentMessages, + sensitiveCredentialIds: get().sensitiveCredentialIds, + planArtifact: content, + mode, + model: selectedModel, }) // Update local chat object @@ -3819,219 +2192,43 @@ export const useCopilotStore = create()( setAgentPrefetch: (prefetch) => set({ agentPrefetch: prefetch }), setEnabledModels: (models) => set({ enabledModels: models }), - executeIntegrationTool: async (toolCallId: string) => { - const { toolCallsById, workflowId } = get() - const toolCall = toolCallsById[toolCallId] - if (!toolCall || !workflowId) return - - const { id, name, params } = toolCall - - // Guard against double execution - skip if already executing or in terminal state - if (toolCall.state === ClientToolCallState.executing || isTerminalState(toolCall.state)) { - logger.info('[executeIntegrationTool] Skipping - already executing or terminal', { - id, - name, - state: toolCall.state, - }) - return - } - - // Set to executing state - const executingMap = { ...get().toolCallsById } - executingMap[id] = { - ...executingMap[id], - state: ClientToolCallState.executing, - display: resolveToolDisplay(name, ClientToolCallState.executing, id, params), - } - set({ toolCallsById: executingMap }) - logger.info('[toolCallsById] pending → executing (integration tool)', { id, name }) - - try { - const res = await fetch('/api/copilot/execute-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - toolCallId: id, - toolName: name, - arguments: params || {}, - workflowId, - }), - }) - - const result = await res.json() - const success = result.success && result.result?.success - const completeMap = { ...get().toolCallsById } - - // Do not override terminal review/rejected - if ( - isRejectedState(completeMap[id]?.state) || - isReviewState(completeMap[id]?.state) || - isBackgroundState(completeMap[id]?.state) - ) { - return - } - - completeMap[id] = { - ...completeMap[id], - state: success ? ClientToolCallState.success : ClientToolCallState.error, - display: resolveToolDisplay( - name, - success ? ClientToolCallState.success : ClientToolCallState.error, - id, - params - ), - } - set({ toolCallsById: completeMap }) - logger.info(`[toolCallsById] executing → ${success ? 'success' : 'error'} (integration)`, { - id, - name, - result, - }) - - // Notify backend tool mark-complete endpoint - try { - await fetch('/api/copilot/tools/mark-complete', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - id, - name: name || 'unknown_tool', - status: success ? 200 : 500, - message: success - ? result.result?.output?.content - : result.result?.error || result.error || 'Tool execution failed', - data: success - ? result.result?.output - : { - error: result.result?.error || result.error, - output: result.result?.output, - }, - }), - }) - } catch {} - } catch (e) { - const errorMap = { ...get().toolCallsById } - // Do not override terminal review/rejected - if ( - isRejectedState(errorMap[id]?.state) || - isReviewState(errorMap[id]?.state) || - isBackgroundState(errorMap[id]?.state) - ) { - return - } - errorMap[id] = { - ...errorMap[id], - state: ClientToolCallState.error, - display: resolveToolDisplay(name, ClientToolCallState.error, id, params), - } - set({ toolCallsById: errorMap }) - logger.error('Integration tool execution failed', { id, name, error: e }) - } - }, - - skipIntegrationTool: (toolCallId: string) => { - const { toolCallsById } = get() - const toolCall = toolCallsById[toolCallId] - if (!toolCall) return - - const { id, name, params } = toolCall - - // Set to rejected state - const rejectedMap = { ...get().toolCallsById } - rejectedMap[id] = { - ...rejectedMap[id], - state: ClientToolCallState.rejected, - display: resolveToolDisplay(name, ClientToolCallState.rejected, id, params), - } - set({ toolCallsById: rejectedMap }) - logger.info('[toolCallsById] pending → rejected (integration tool skipped)', { id, name }) - - // Notify backend tool mark-complete endpoint with skip status - fetch('/api/copilot/tools/mark-complete', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - id, - name: name || 'unknown_tool', - status: 200, - message: 'Tool execution skipped by user', - data: { skipped: true }, - }), - }).catch(() => {}) - }, - loadAutoAllowedTools: async () => { try { - logger.info('[AutoAllowedTools] Loading from API...') - const res = await fetch('/api/copilot/auto-allowed-tools') - logger.info('[AutoAllowedTools] Load response', { status: res.status, ok: res.ok }) + logger.debug('[AutoAllowedTools] Loading from API...') + const res = await fetch(COPILOT_AUTO_ALLOWED_TOOLS_API_PATH) + logger.debug('[AutoAllowedTools] Load response', { status: res.status, ok: res.ok }) if (res.ok) { const data = await res.json() - const tools = data.autoAllowedTools || [] - set({ autoAllowedTools: tools }) - logger.info('[AutoAllowedTools] Loaded successfully', { count: tools.length, tools }) + const tools = data.autoAllowedTools ?? [] + set({ autoAllowedTools: tools, autoAllowedToolsLoaded: true }) + writeAutoAllowedToolsToStorage(tools) + logger.debug('[AutoAllowedTools] Loaded successfully', { count: tools.length, tools }) } else { + set({ autoAllowedToolsLoaded: true }) logger.warn('[AutoAllowedTools] Load failed with status', { status: res.status }) } } catch (err) { + set({ autoAllowedToolsLoaded: true }) logger.error('[AutoAllowedTools] Failed to load', { error: err }) } }, addAutoAllowedTool: async (toolId: string) => { try { - logger.info('[AutoAllowedTools] Adding tool...', { toolId }) - const res = await fetch('/api/copilot/auto-allowed-tools', { + logger.debug('[AutoAllowedTools] Adding tool...', { toolId }) + const res = await fetch(COPILOT_AUTO_ALLOWED_TOOLS_API_PATH, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ toolId }), }) - logger.info('[AutoAllowedTools] API response', { toolId, status: res.status, ok: res.ok }) + logger.debug('[AutoAllowedTools] API response', { toolId, status: res.status, ok: res.ok }) if (res.ok) { const data = await res.json() - logger.info('[AutoAllowedTools] API returned', { toolId, tools: data.autoAllowedTools }) - set({ autoAllowedTools: data.autoAllowedTools || [] }) - logger.info('[AutoAllowedTools] Added tool to store', { toolId }) - - // Auto-execute all pending tools of the same type - const { toolCallsById, executeIntegrationTool } = get() - const pendingToolCalls = Object.values(toolCallsById).filter( - (tc) => tc.name === toolId && tc.state === ClientToolCallState.pending - ) - if (pendingToolCalls.length > 0) { - const isIntegrationTool = !CLASS_TOOL_METADATA[toolId] - logger.info('[AutoAllowedTools] Auto-executing pending tools', { - toolId, - count: pendingToolCalls.length, - isIntegrationTool, - }) - for (const tc of pendingToolCalls) { - if (isIntegrationTool) { - // Integration tools use executeIntegrationTool - executeIntegrationTool(tc.id).catch((err) => { - logger.error('[AutoAllowedTools] Auto-execute pending integration tool failed', { - toolCallId: tc.id, - toolId, - error: err, - }) - }) - } else { - // Client tools with interrupts use handleAccept - const inst = getClientTool(tc.id) as any - if (inst && typeof inst.handleAccept === 'function') { - Promise.resolve() - .then(() => inst.handleAccept(tc.params || {})) - .catch((err: any) => { - logger.error('[AutoAllowedTools] Auto-execute pending client tool failed', { - toolCallId: tc.id, - toolId, - error: err, - }) - }) - } - } - } - } + logger.debug('[AutoAllowedTools] API returned', { toolId, tools: data.autoAllowedTools }) + const tools = data.autoAllowedTools ?? [] + set({ autoAllowedTools: tools, autoAllowedToolsLoaded: true }) + writeAutoAllowedToolsToStorage(tools) + logger.debug('[AutoAllowedTools] Added tool to store', { toolId }) } } catch (err) { logger.error('[AutoAllowedTools] Failed to add tool', { toolId, error: err }) @@ -4041,15 +2238,17 @@ export const useCopilotStore = create()( removeAutoAllowedTool: async (toolId: string) => { try { const res = await fetch( - `/api/copilot/auto-allowed-tools?toolId=${encodeURIComponent(toolId)}`, + `${COPILOT_AUTO_ALLOWED_TOOLS_API_PATH}?toolId=${encodeURIComponent(toolId)}`, { method: 'DELETE', } ) if (res.ok) { const data = await res.json() - set({ autoAllowedTools: data.autoAllowedTools || [] }) - logger.info('[AutoAllowedTools] Removed tool', { toolId }) + const tools = data.autoAllowedTools ?? [] + set({ autoAllowedTools: tools, autoAllowedToolsLoaded: true }) + writeAutoAllowedToolsToStorage(tools) + logger.debug('[AutoAllowedTools] Removed tool', { toolId }) } } catch (err) { logger.error('[AutoAllowedTools] Failed to remove tool', { toolId, error: err }) @@ -4058,16 +2257,14 @@ export const useCopilotStore = create()( isToolAutoAllowed: (toolId: string) => { const { autoAllowedTools } = get() - return autoAllowedTools.includes(toolId) + return isToolAutoAllowedByList(toolId, autoAllowedTools) }, // Credential masking loadSensitiveCredentialIds: async () => { try { - const res = await fetch('/api/copilot/execute-copilot-server-tool', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ toolName: 'get_credentials', payload: {} }), + const res = await fetch(COPILOT_CREDENTIALS_API_PATH, { + credentials: 'include', }) if (!res.ok) { logger.warn('[loadSensitiveCredentialIds] Failed to fetch credentials', { @@ -4077,8 +2274,8 @@ export const useCopilotStore = create()( } const json = await res.json() // Credentials are at result.oauth.connected.credentials - const credentials = json?.result?.oauth?.connected?.credentials || [] - logger.info('[loadSensitiveCredentialIds] Response', { + const credentials = json?.result?.oauth?.connected?.credentials ?? [] + logger.debug('[loadSensitiveCredentialIds] Response', { hasResult: !!json?.result, credentialCount: credentials.length, }) @@ -4089,7 +2286,7 @@ export const useCopilotStore = create()( } } set({ sensitiveCredentialIds: ids }) - logger.info('[loadSensitiveCredentialIds] Loaded credential IDs', { + logger.debug('[loadSensitiveCredentialIds] Loaded credential IDs', { count: ids.size, }) } catch (err) { @@ -4132,7 +2329,7 @@ export const useCopilotStore = create()( removeFromQueue: (id) => { set({ messageQueue: get().messageQueue.filter((m) => m.id !== id) }) - logger.info('[Queue] Message removed from queue', { + logger.debug('[Queue] Message removed from queue', { id, queueLength: get().messageQueue.length, }) @@ -4146,7 +2343,7 @@ export const useCopilotStore = create()( queue.splice(index, 1) queue.splice(index - 1, 0, item) set({ messageQueue: queue }) - logger.info('[Queue] Message moved up in queue', { id, newIndex: index - 1 }) + logger.debug('[Queue] Message moved up in queue', { id, newIndex: index - 1 }) } }, @@ -4184,56 +2381,3 @@ export const useCopilotStore = create()( }, })) ) - -// Sync class-based tool instance state changes back into the store map -try { - registerToolStateSync((toolCallId: string, nextState: any) => { - const state = useCopilotStore.getState() - const current = state.toolCallsById[toolCallId] - if (!current) return - let mapped: ClientToolCallState = current.state - if (nextState === 'executing') mapped = ClientToolCallState.executing - else if (nextState === 'pending') mapped = ClientToolCallState.pending - else if (nextState === 'success' || nextState === 'accepted') - mapped = ClientToolCallState.success - else if (nextState === 'error' || nextState === 'errored') mapped = ClientToolCallState.error - else if (nextState === 'rejected') mapped = ClientToolCallState.rejected - else if (nextState === 'aborted') mapped = ClientToolCallState.aborted - else if (nextState === 'review') mapped = (ClientToolCallState as any).review - else if (nextState === 'background') mapped = (ClientToolCallState as any).background - else if (typeof nextState === 'number') mapped = nextState as unknown as ClientToolCallState - - // Store-authoritative gating: ignore invalid/downgrade transitions - const isTerminal = (s: ClientToolCallState) => - s === ClientToolCallState.success || - s === ClientToolCallState.error || - s === ClientToolCallState.rejected || - s === ClientToolCallState.aborted || - (s as any) === (ClientToolCallState as any).review || - (s as any) === (ClientToolCallState as any).background - - // If we've already reached a terminal state, ignore any further non-terminal updates - if (isTerminal(current.state) && !isTerminal(mapped)) { - return - } - // Prevent downgrades (executing → pending, pending → generating) - if ( - (current.state === ClientToolCallState.executing && mapped === ClientToolCallState.pending) || - (current.state === ClientToolCallState.pending && - mapped === (ClientToolCallState as any).generating) - ) { - return - } - // No-op if unchanged - if (mapped === current.state) return - const updated = { - ...state.toolCallsById, - [toolCallId]: { - ...current, - state: mapped, - display: resolveToolDisplay(current.name, mapped, toolCallId, current.params), - }, - } - useCopilotStore.setState({ toolCallsById: updated }) - }) -} catch {} diff --git a/apps/sim/stores/panel/copilot/types.ts b/apps/sim/stores/panel/copilot/types.ts index 49b76bd62..06b753232 100644 --- a/apps/sim/stores/panel/copilot/types.ts +++ b/apps/sim/stores/panel/copilot/types.ts @@ -2,6 +2,7 @@ import type { CopilotMode, CopilotModelId } from '@/lib/copilot/models' export type { CopilotMode, CopilotModelId } from '@/lib/copilot/models' +import type { ClientContentBlock } from '@/lib/copilot/client-sse/types' import type { ClientToolCallState, ClientToolDisplay } from '@/lib/copilot/tools/client/base-tool' import type { WorkflowState } from '@/stores/workflows/workflow/types' @@ -21,7 +22,8 @@ export interface CopilotToolCall { id: string name: string state: ClientToolCallState - params?: Record + params?: Record + input?: Record display?: ClientToolDisplay /** Content streamed from a subagent (e.g., debug agent) */ subAgentContent?: string @@ -33,6 +35,20 @@ export interface CopilotToolCall { subAgentStreaming?: boolean } +export interface CopilotStreamInfo { + streamId: string + workflowId: string + chatId?: string + userMessageId: string + assistantMessageId: string + lastEventId: number + resumeAttempts: number + userMessageContent: string + fileAttachments?: MessageFileAttachment[] + contexts?: ChatContext[] + startedAt: number +} + export interface MessageFileAttachment { id: string key: string @@ -48,18 +64,7 @@ export interface CopilotMessage { timestamp: string citations?: { id: number; title: string; url: string; similarity?: number }[] toolCalls?: CopilotToolCall[] - contentBlocks?: Array< - | { type: 'text'; content: string; timestamp: number } - | { - type: 'thinking' - content: string - timestamp: number - duration?: number - startTime?: number - } - | { type: 'tool_call'; toolCall: CopilotToolCall; timestamp: number } - | { type: 'contexts'; contexts: ChatContext[]; timestamp: number } - > + contentBlocks?: ClientContentBlock[] fileAttachments?: MessageFileAttachment[] contexts?: ChatContext[] errorType?: 'usage_limit' | 'unauthorized' | 'forbidden' | 'rate_limit' | 'upgrade_required' @@ -96,6 +101,16 @@ import type { CopilotChat as ApiCopilotChat } from '@/lib/copilot/api' export type CopilotChat = ApiCopilotChat +/** + * A checkpoint entry as returned from the checkpoints API. + */ +export interface CheckpointEntry { + id: string + messageId?: string + workflowState?: Record + createdAt?: string +} + export interface CopilotState { mode: CopilotMode selectedModel: CopilotModelId @@ -108,8 +123,7 @@ export interface CopilotState { messages: CopilotMessage[] workflowId: string | null - checkpoints: any[] - messageCheckpoints: Record + messageCheckpoints: Record messageSnapshots: Record isLoading: boolean @@ -153,6 +167,10 @@ export interface CopilotState { // Auto-allowed integration tools (tools that can run without confirmation) autoAllowedTools: string[] + autoAllowedToolsLoaded: boolean + + // Active stream metadata for reconnect/replay + activeStream: CopilotStreamInfo | null // Message queue for messages sent while another is in progress messageQueue: QueuedMessage[] @@ -170,7 +188,6 @@ export interface CopilotActions { setWorkflowId: (workflowId: string | null) => Promise validateCurrentChat: () => boolean loadChats: (forceRefresh?: boolean) => Promise - areChatsFresh: (workflowId: string) => boolean selectChat: (chat: CopilotChat) => Promise createNewChat: () => Promise deleteChat: (chatId: string) => Promise @@ -194,22 +211,18 @@ export interface CopilotActions { toolCallState: 'accepted' | 'rejected' | 'error', toolCallId?: string ) => void - setToolCallState: (toolCall: any, newState: ClientToolCallState, options?: any) => void - updateToolCallParams: (toolCallId: string, params: Record) => void - sendDocsMessage: (query: string, options?: { stream?: boolean; topK?: number }) => Promise - saveChatMessages: (chatId: string) => Promise - - loadCheckpoints: (chatId: string) => Promise + resumeActiveStream: () => Promise + setToolCallState: (toolCall: CopilotToolCall, newState: ClientToolCallState | string) => void + updateToolCallParams: (toolCallId: string, params: Record) => void loadMessageCheckpoints: (chatId: string) => Promise revertToCheckpoint: (checkpointId: string) => Promise - getCheckpointsForMessage: (messageId: string) => any[] + getCheckpointsForMessage: (messageId: string) => CheckpointEntry[] saveMessageCheckpoint: (messageId: string) => Promise clearMessages: () => void clearError: () => void clearSaveError: () => void clearCheckpointError: () => void - retrySave: (chatId: string) => Promise cleanup: () => void reset: () => void @@ -228,11 +241,10 @@ export interface CopilotActions { stream: ReadableStream, messageId: string, isContinuation?: boolean, - triggerUserMessageId?: string + triggerUserMessageId?: string, + abortSignal?: AbortSignal ) => Promise handleNewChatCreation: (newChatId: string) => Promise - executeIntegrationTool: (toolCallId: string) => Promise - skipIntegrationTool: (toolCallId: string) => void loadAutoAllowedTools: () => Promise addAutoAllowedTool: (toolId: string) => Promise removeAutoAllowedTool: (toolId: string) => Promise diff --git a/apps/sim/stores/workflow-diff/store.ts b/apps/sim/stores/workflow-diff/store.ts index 285be7e11..339465ec5 100644 --- a/apps/sim/stores/workflow-diff/store.ts +++ b/apps/sim/stores/workflow-diff/store.ts @@ -1,7 +1,7 @@ import { createLogger } from '@sim/logger' import { create } from 'zustand' import { devtools } from 'zustand/middleware' -import { getClientTool } from '@/lib/copilot/tools/client/manager' +import { COPILOT_STATS_API_PATH } from '@/lib/copilot/constants' import { stripWorkflowDiffMarkers, WorkflowDiffEngine } from '@/lib/workflows/diff' import { enqueueReplaceWorkflowState } from '@/lib/workflows/operations/socket-operations' import { validateWorkflowState } from '@/lib/workflows/sanitization/validation' @@ -22,6 +22,17 @@ import { const logger = createLogger('WorkflowDiffStore') const diffEngine = new WorkflowDiffEngine() +const RESET_DIFF_STATE = { + hasActiveDiff: false, + isShowingDiff: false, + isDiffReady: false, + baselineWorkflow: null, + baselineWorkflowId: null, + diffAnalysis: null, + diffMetadata: null, + diffError: null, + _triggerMessageId: null, +} /** * Detects when a diff contains no meaningful changes. @@ -66,7 +77,7 @@ export const useWorkflowDiffStore = create { + setProposedChanges: async (proposedState, diffAnalysis, options) => { const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId if (!activeWorkflowId) { logger.error('Cannot apply diff without an active workflow') @@ -105,22 +116,19 @@ export const useWorkflowDiffStore = create { @@ -253,7 +252,7 @@ export const useWorkflowDiffStore = create { + acceptChanges: async (options) => { const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId if (!activeWorkflowId) { logger.error('No active workflow ID found when accepting diff') @@ -294,17 +293,7 @@ export const useWorkflowDiffStore = create {}) + }).catch((error) => { + logger.warn('Failed to send diff-accepted stats', { + error: error instanceof Error ? error.message : String(error), + messageId: triggerMessageId, + }) + }) } findLatestEditWorkflowToolCallId().then((toolCallId) => { if (toolCallId) { - getClientTool(toolCallId) - ?.handleAccept?.() - ?.catch?.((error: Error) => { - logger.warn('Failed to notify tool accept state', { error }) + import('@/stores/panel/copilot/store') + .then(({ useCopilotStore }) => { + useCopilotStore.getState().updatePreviewToolCallState('accepted', toolCallId) + }) + .catch((error) => { + logger.warn('Failed to update tool accept state', { error }) }) } }) }, - rejectChanges: async () => { + rejectChanges: async (options) => { const { baselineWorkflow, baselineWorkflowId, _triggerMessageId, diffAnalysis } = get() const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId @@ -384,17 +380,7 @@ export const useWorkflowDiffStore = create {}) + }).catch((error) => { + logger.warn('Failed to send diff-rejected stats', { + error: error instanceof Error ? error.message : String(error), + messageId: _triggerMessageId, + }) + }) } findLatestEditWorkflowToolCallId().then((toolCallId) => { if (toolCallId) { - getClientTool(toolCallId) - ?.handleReject?.() - ?.catch?.((error: Error) => { - logger.warn('Failed to notify tool reject state', { error }) + import('@/stores/panel/copilot/store') + .then(({ useCopilotStore }) => { + useCopilotStore.getState().updatePreviewToolCallState('rejected', toolCallId) + }) + .catch((error) => { + logger.warn('Failed to update tool reject state', { error }) }) } }) @@ -472,11 +465,13 @@ export const useWorkflowDiffStore = create { const block = currentBlocks[blockId] - return block && (block as any).is_diff !== 'new' + const blockDiffState = (block as { is_diff?: string } | undefined)?.is_diff + return block && blockDiffState !== 'new' }) || diffAnalysis.edited_blocks?.some((blockId) => { const block = currentBlocks[blockId] - return block && (block as any).is_diff !== 'edited' + const blockDiffState = (block as { is_diff?: string } | undefined)?.is_diff + return block && blockDiffState !== 'edited' }) if (!needsUpdate) { @@ -490,11 +485,12 @@ export const useWorkflowDiffStore = create { const isNewBlock = diffAnalysis.new_blocks?.includes(blockId) const isEditedBlock = diffAnalysis.edited_blocks?.includes(blockId) + const blockDiffState = (block as { is_diff?: string } | undefined)?.is_diff - if (isNewBlock && (block as any).is_diff !== 'new') { + if (isNewBlock && blockDiffState !== 'new') { updatedBlocks[blockId] = { ...block, is_diff: 'new' } hasChanges = true - } else if (isEditedBlock && (block as any).is_diff !== 'edited') { + } else if (isEditedBlock && blockDiffState !== 'edited') { updatedBlocks[blockId] = { ...block, is_diff: 'edited' } // Re-apply field_diffs if available diff --git a/apps/sim/stores/workflow-diff/types.ts b/apps/sim/stores/workflow-diff/types.ts index fe40b0842..5356046fe 100644 --- a/apps/sim/stores/workflow-diff/types.ts +++ b/apps/sim/stores/workflow-diff/types.ts @@ -13,12 +13,21 @@ export interface WorkflowDiffState { _triggerMessageId?: string | null } +export interface DiffActionOptions { + /** Skip recording this operation for undo/redo. Used during undo/redo replay. */ + skipRecording?: boolean +} + export interface WorkflowDiffActions { - setProposedChanges: (workflowState: WorkflowState, diffAnalysis?: DiffAnalysis) => Promise + setProposedChanges: ( + workflowState: WorkflowState, + diffAnalysis?: DiffAnalysis, + options?: DiffActionOptions + ) => Promise clearDiff: (options?: { restoreBaseline?: boolean }) => void toggleDiffView: () => void - acceptChanges: () => Promise - rejectChanges: () => Promise + acceptChanges: (options?: DiffActionOptions) => Promise + rejectChanges: (options?: DiffActionOptions) => Promise reapplyDiffMarkers: () => void _batchedStateUpdate: (updates: Partial) => void } diff --git a/apps/sim/stores/workflow-diff/utils.ts b/apps/sim/stores/workflow-diff/utils.ts index 3245875f7..b5cdd4996 100644 --- a/apps/sim/stores/workflow-diff/utils.ts +++ b/apps/sim/stores/workflow-diff/utils.ts @@ -26,7 +26,7 @@ export function extractSubBlockValues( Object.entries(workflowState.blocks || {}).forEach(([blockId, block]) => { values[blockId] = {} Object.entries(block.subBlocks || {}).forEach(([subBlockId, subBlock]) => { - values[blockId][subBlockId] = (subBlock as any)?.value ?? null + values[blockId][subBlockId] = subBlock?.value ?? null }) }) return values @@ -37,10 +37,27 @@ export function applyWorkflowStateToStores( workflowState: WorkflowState, options?: { updateLastSaved?: boolean } ) { + logger.debug('[applyWorkflowStateToStores] Applying state', { + workflowId, + blockCount: Object.keys(workflowState.blocks || {}).length, + edgeCount: workflowState.edges?.length ?? 0, + edgePreview: workflowState.edges?.slice(0, 3).map((e) => `${e.source} -> ${e.target}`), + }) const workflowStore = useWorkflowStore.getState() - workflowStore.replaceWorkflowState(cloneWorkflowState(workflowState), options) + const cloned = cloneWorkflowState(workflowState) + logger.debug('[applyWorkflowStateToStores] Cloned state edges', { + clonedEdgeCount: cloned.edges?.length ?? 0, + }) + workflowStore.replaceWorkflowState(cloned, options) const subBlockValues = extractSubBlockValues(workflowState) useSubBlockStore.getState().setWorkflowValues(workflowId, subBlockValues) + + // Verify what's in the store after apply + const afterState = workflowStore.getWorkflowState() + logger.info('[applyWorkflowStateToStores] Applied workflow state to stores', { + workflowId, + afterEdgeCount: afterState.edges?.length ?? 0, + }) } export function captureBaselineSnapshot(workflowId: string): WorkflowState { @@ -91,7 +108,7 @@ export async function persistWorkflowStateToServer( export async function getLatestUserMessageId(): Promise { try { const { useCopilotStore } = await import('@/stores/panel/copilot/store') - const { messages } = useCopilotStore.getState() as any + const { messages } = useCopilotStore.getState() if (!Array.isArray(messages) || messages.length === 0) { return null } @@ -111,21 +128,19 @@ export async function getLatestUserMessageId(): Promise { export async function findLatestEditWorkflowToolCallId(): Promise { try { const { useCopilotStore } = await import('@/stores/panel/copilot/store') - const { messages, toolCallsById } = useCopilotStore.getState() as any + const { messages, toolCallsById } = useCopilotStore.getState() for (let mi = messages.length - 1; mi >= 0; mi--) { const message = messages[mi] if (message.role !== 'assistant' || !message.contentBlocks) continue - for (const block of message.contentBlocks as any[]) { + for (const block of message.contentBlocks) { if (block?.type === 'tool_call' && block.toolCall?.name === 'edit_workflow') { return block.toolCall?.id } } } - const fallback = Object.values(toolCallsById).filter( - (call: any) => call.name === 'edit_workflow' - ) as any[] + const fallback = Object.values(toolCallsById).filter((call) => call.name === 'edit_workflow') return fallback.length ? fallback[fallback.length - 1].id : undefined } catch (error) { @@ -134,7 +149,7 @@ export async function findLatestEditWorkflowToolCallId(): Promise) => void) { let updateTimer: NodeJS.Timeout | null = null const UPDATE_DEBOUNCE_MS = 16 let pendingUpdates: Partial = {} diff --git a/apps/sim/tools/jira/add_attachment.ts b/apps/sim/tools/jira/add_attachment.ts index 0fa9946e3..07b6e1d16 100644 --- a/apps/sim/tools/jira/add_attachment.ts +++ b/apps/sim/tools/jira/add_attachment.ts @@ -1,4 +1,5 @@ import type { JiraAddAttachmentParams, JiraAddAttachmentResponse } from '@/tools/jira/types' +import { TIMESTAMP_OUTPUT } from '@/tools/jira/types' import type { ToolConfig } from '@/tools/types' export const jiraAddAttachmentTool: ToolConfig = @@ -75,9 +76,40 @@ export const jiraAddAttachmentTool: ToolConfig = { id: 'jira_add_comment', name: 'Jira Add Comment', @@ -38,6 +55,13 @@ export const jiraAddCommentTool: ToolConfig { if (!params.cloudId) return undefined as any - return { + const payload: Record = { body: { type: 'doc', version: 1, content: [ { type: 'paragraph', - content: [ - { - type: 'text', - text: params?.body || '', - }, - ], + content: [{ type: 'text', text: params.body ?? '' }], }, ], }, } + if (params.visibility) payload.visibility = params.visibility + return payload }, }, transformResponse: async (response: Response, params?: JiraAddCommentParams) => { - if (!params?.cloudId) { - const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) - // Make the actual request with the resolved cloudId - const commentUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params?.issueKey}/comment` + const payload: Record = { + body: { + type: 'doc', + version: 1, + content: [ + { + type: 'paragraph', + content: [{ type: 'text', text: params?.body ?? '' }], + }, + ], + }, + } + if (params?.visibility) payload.visibility = params.visibility + + const makeRequest = async (cloudId: string) => { + const commentUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params!.issueKey}/comment` const commentResponse = await fetch(commentUrl, { method: 'POST', headers: { Accept: 'application/json', 'Content-Type': 'application/json', - Authorization: `Bearer ${params?.accessToken}`, + Authorization: `Bearer ${params!.accessToken}`, }, - body: JSON.stringify({ - body: { - type: 'doc', - version: 1, - content: [ - { - type: 'paragraph', - content: [ - { - type: 'text', - text: params?.body || '', - }, - ], - }, - ], - }, - }), + body: JSON.stringify(payload), }) if (!commentResponse.ok) { @@ -124,48 +141,46 @@ export const jiraAddCommentTool: ToolConfig = { id: 'jira_add_watcher', @@ -87,16 +72,15 @@ export const jiraAddWatcherTool: ToolConfig { if (!params?.cloudId) { const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) - // Make the actual request with the resolved cloudId - const watcherUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params?.issueKey}/watchers` + const watcherUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params!.issueKey}/watchers` const watcherResponse = await fetch(watcherUrl, { method: 'POST', headers: { Accept: 'application/json', 'Content-Type': 'application/json', - Authorization: `Bearer ${params?.accessToken}`, + Authorization: `Bearer ${params!.accessToken}`, }, - body: JSON.stringify(params?.accountId), + body: JSON.stringify(params!.accountId), }) if (!watcherResponse.ok) { @@ -112,14 +96,13 @@ export const jiraAddWatcherTool: ToolConfig = { + timeSpentSeconds: Number(params.timeSpentSeconds), + comment: params.comment + ? { + type: 'doc', + version: 1, + content: [ + { + type: 'paragraph', + content: [{ type: 'text', text: params.comment }], + }, + ], + } + : undefined, + started: + (params.started ? params.started.replace(/Z$/, '+0000') : undefined) || + new Date().toISOString().replace(/Z$/, '+0000'), + } + if (params.visibility) body.visibility = params.visibility + return body +} + +/** + * Transforms a worklog API response into typed output. + */ +function transformWorklogResponse(data: any, params: JiraAddWorklogParams) { + return { + ts: new Date().toISOString(), + issueKey: params.issueKey ?? 'unknown', + worklogId: data?.id ?? 'unknown', + timeSpent: data?.timeSpent ?? '', + timeSpentSeconds: data?.timeSpentSeconds ?? Number(params.timeSpentSeconds) ?? 0, + author: transformUser(data?.author) ?? { accountId: '', displayName: '' }, + started: data?.started ?? '', + created: data?.created ?? '', + success: true, + } +} + export const jiraAddWorklogTool: ToolConfig = { id: 'jira_add_worklog', name: 'Jira Add Worklog', @@ -50,6 +94,13 @@ export const jiraAddWorklogTool: ToolConfig { if (!params.cloudId) return undefined as any - return { - timeSpentSeconds: Number(params.timeSpentSeconds), - comment: params.comment - ? { - type: 'doc', - version: 1, - content: [ - { - type: 'paragraph', - content: [ - { - type: 'text', - text: params.comment, - }, - ], - }, - ], - } - : undefined, - started: - (params.started ? params.started.replace(/Z$/, '+0000') : undefined) || - new Date().toISOString().replace(/Z$/, '+0000'), - } + return buildWorklogBody(params) }, }, transformResponse: async (response: Response, params?: JiraAddWorklogParams) => { - if (!params?.cloudId) { - if (!params?.timeSpentSeconds || params.timeSpentSeconds <= 0) { - throw new Error('timeSpentSeconds is required and must be greater than 0') - } - const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) - // Make the actual request with the resolved cloudId - const worklogUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params?.issueKey}/worklog` + if (!params?.timeSpentSeconds || params.timeSpentSeconds <= 0) { + throw new Error('timeSpentSeconds is required and must be greater than 0') + } + + const makeRequest = async (cloudId: string) => { + const worklogUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params!.issueKey}/worklog` const worklogResponse = await fetch(worklogUrl, { method: 'POST', headers: { Accept: 'application/json', 'Content-Type': 'application/json', - Authorization: `Bearer ${params?.accessToken}`, + Authorization: `Bearer ${params!.accessToken}`, }, - body: JSON.stringify({ - timeSpentSeconds: params?.timeSpentSeconds ? Number(params.timeSpentSeconds) : 0, - comment: params?.comment - ? { - type: 'doc', - version: 1, - content: [ - { - type: 'paragraph', - content: [ - { - type: 'text', - text: params.comment, - }, - ], - }, - ], - } - : undefined, - // Preserve milliseconds and convert trailing Z to +0000 as required by Jira examples - started: - (params?.started ? params.started.replace(/Z$/, '+0000') : undefined) || - new Date().toISOString().replace(/Z$/, '+0000'), - }), + body: JSON.stringify(buildWorklogBody(params!)), }) if (!worklogResponse.ok) { @@ -152,48 +157,47 @@ export const jiraAddWorklogTool: ToolConfig = { id: 'jira_assign_issue', @@ -144,8 +129,11 @@ export const jiraAssignIssueTool: ToolConfig = { id: 'jira_bulk_read', name: 'Jira Bulk Read', - description: 'Retrieve multiple Jira issues in bulk', + description: 'Retrieve multiple Jira issues from a project in bulk', version: '1.0.0', oauth: { @@ -41,44 +43,18 @@ export const jiraBulkRetrieveTool: ToolConfig { - // Always return accessible resources endpoint; transformResponse will build search URLs - return 'https://api.atlassian.com/oauth/token/accessible-resources' - }, + url: () => 'https://api.atlassian.com/oauth/token/accessible-resources', method: 'GET', headers: (params: JiraRetrieveBulkParams) => ({ Authorization: `Bearer ${params.accessToken}`, Accept: 'application/json', }), - body: (params: JiraRetrieveBulkParams) => - params.cloudId - ? { - jql: '', // Will be set in transformResponse when we know the resolved project key - startAt: 0, - maxResults: 100, - fields: ['summary', 'description', 'created', 'updated'], - } - : {}, }, transformResponse: async (response: Response, params?: JiraRetrieveBulkParams) => { const MAX_TOTAL = 1000 const PAGE_SIZE = 100 - // Helper to extract description text safely (ADF can be nested) - const extractDescription = (desc: any): string => { - try { - return ( - desc?.content?.[0]?.content?.[0]?.text || - desc?.content?.flatMap((c: any) => c?.content || [])?.find((c: any) => c?.text)?.text || - '' - ) - } catch (_e) { - return '' - } - } - - // Helper to resolve a project reference (id or key) to its canonical key const resolveProjectKey = async (cloudId: string, accessToken: string, ref: string) => { const refTrimmed = (ref || '').trim() if (!refTrimmed) return refTrimmed @@ -87,128 +63,166 @@ export const jiraBulkRetrieveTool: ToolConfig { + if (params?.cloudId) return params.cloudId const accessibleResources = await response.json() const normalizedInput = `https://${params?.domain}`.toLowerCase() const matchedResource = accessibleResources.find( (r: any) => r.url.toLowerCase() === normalizedInput ) - - const projectKey = await resolveProjectKey( - matchedResource.id, - params!.accessToken, - params!.projectId - ) - const jql = `project = ${projectKey} ORDER BY updated DESC` - - let startAt = 0 - let collected: any[] = [] - let total = 0 - - while (startAt < MAX_TOTAL) { - const queryParams = new URLSearchParams({ - jql, - fields: 'summary,description,created,updated', - maxResults: String(PAGE_SIZE), - }) - if (startAt > 0) { - queryParams.set('startAt', String(startAt)) - } - const url = `https://api.atlassian.com/ex/jira/${matchedResource.id}/rest/api/3/search/jql?${queryParams.toString()}` - const pageResponse = await fetch(url, { - method: 'GET', - headers: { - Authorization: `Bearer ${params?.accessToken}`, - Accept: 'application/json', - }, - }) - - const pageData = await pageResponse.json() - const issues = pageData.issues || [] - total = pageData.total || issues.length - collected = collected.concat(issues) - - if (collected.length >= Math.min(total, MAX_TOTAL) || issues.length === 0) break - startAt += PAGE_SIZE - } - - return { - success: true, - output: collected.slice(0, MAX_TOTAL).map((issue: any) => ({ - ts: new Date().toISOString(), - summary: issue.fields?.summary, - description: extractDescription(issue.fields?.description), - created: issue.fields?.created, - updated: issue.fields?.updated, - })), - } + if (matchedResource) return matchedResource.id + if (Array.isArray(accessibleResources) && accessibleResources.length > 0) + return accessibleResources[0].id + throw new Error('No Jira resources found') } - // cloudId present: resolve project and paginate using the Search API - // Resolve to canonical project key for consistent JQL - const projectKey = await resolveProjectKey( - params!.cloudId!, - params!.accessToken, - params!.projectId - ) - + const cloudId = await resolveCloudId() + const projectKey = await resolveProjectKey(cloudId, params!.accessToken, params!.projectId) const jql = `project = ${projectKey} ORDER BY updated DESC` - // Always do full pagination with resolved key let collected: any[] = [] - let total = 0 - let startAt = 0 - while (startAt < MAX_TOTAL) { + let nextPageToken: string | undefined + let total: number | null = null + + while (collected.length < MAX_TOTAL) { const queryParams = new URLSearchParams({ jql, - fields: 'summary,description,created,updated', + fields: 'summary,description,status,issuetype,priority,assignee,created,updated', maxResults: String(PAGE_SIZE), }) - if (startAt > 0) { - queryParams.set('startAt', String(startAt)) - } - const url = `https://api.atlassian.com/ex/jira/${params?.cloudId}/rest/api/3/search/jql?${queryParams.toString()}` + if (nextPageToken) queryParams.set('nextPageToken', nextPageToken) + + const url = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/search/jql?${queryParams.toString()}` const pageResponse = await fetch(url, { method: 'GET', headers: { - Authorization: `Bearer ${params?.accessToken}`, + Authorization: `Bearer ${params!.accessToken}`, Accept: 'application/json', }, }) + + if (!pageResponse.ok) { + let message = `Failed to bulk read Jira issues (${pageResponse.status})` + try { + const err = await pageResponse.json() + message = err?.errorMessages?.join(', ') || err?.message || message + } catch (_e) {} + throw new Error(message) + } + const pageData = await pageResponse.json() const issues = pageData.issues || [] - total = pageData.total || issues.length + if (pageData.total != null) total = pageData.total collected = collected.concat(issues) - if (issues.length === 0 || collected.length >= Math.min(total, MAX_TOTAL)) break - startAt += PAGE_SIZE + + if (pageData.isLast || !pageData.nextPageToken || issues.length === 0) break + nextPageToken = pageData.nextPageToken } return { success: true, - output: collected.slice(0, MAX_TOTAL).map((issue: any) => ({ + output: { ts: new Date().toISOString(), - summary: issue.fields?.summary, - description: extractDescription(issue.fields?.description), - created: issue.fields?.created, - updated: issue.fields?.updated, - })), + total, + issues: collected.slice(0, MAX_TOTAL).map((issue: any) => ({ + id: issue.id ?? '', + key: issue.key ?? '', + self: issue.self ?? '', + summary: issue.fields?.summary ?? '', + description: extractAdfText(issue.fields?.description), + status: { + id: issue.fields?.status?.id ?? '', + name: issue.fields?.status?.name ?? '', + }, + issuetype: { + id: issue.fields?.issuetype?.id ?? '', + name: issue.fields?.issuetype?.name ?? '', + }, + priority: issue.fields?.priority + ? { id: issue.fields.priority.id ?? '', name: issue.fields.priority.name ?? '' } + : null, + assignee: issue.fields?.assignee + ? { + accountId: issue.fields.assignee.accountId ?? '', + displayName: issue.fields.assignee.displayName ?? '', + } + : null, + created: issue.fields?.created ?? '', + updated: issue.fields?.updated ?? '', + })), + nextPageToken: nextPageToken ?? null, + isLast: !nextPageToken || collected.length >= MAX_TOTAL, + }, } }, outputs: { + ts: TIMESTAMP_OUTPUT, + total: { + type: 'number', + description: 'Total number of issues in the project (may not always be available)', + optional: true, + }, issues: { type: 'array', - description: - 'Array of Jira issues with ts, summary, description, created, and updated timestamps', + description: 'Array of Jira issues', + items: { + type: 'object', + properties: { + id: { type: 'string', description: 'Issue ID' }, + key: { type: 'string', description: 'Issue key (e.g., PROJ-123)' }, + self: { type: 'string', description: 'REST API URL for this issue' }, + summary: { type: 'string', description: 'Issue summary' }, + description: { type: 'string', description: 'Issue description text', optional: true }, + status: { + type: 'object', + description: 'Issue status', + properties: { + id: { type: 'string', description: 'Status ID' }, + name: { type: 'string', description: 'Status name' }, + }, + }, + issuetype: { + type: 'object', + description: 'Issue type', + properties: { + id: { type: 'string', description: 'Issue type ID' }, + name: { type: 'string', description: 'Issue type name' }, + }, + }, + priority: { + type: 'object', + description: 'Issue priority', + properties: { + id: { type: 'string', description: 'Priority ID' }, + name: { type: 'string', description: 'Priority name' }, + }, + optional: true, + }, + assignee: { + type: 'object', + description: 'Assigned user', + properties: { + accountId: { type: 'string', description: 'Atlassian account ID' }, + displayName: { type: 'string', description: 'Display name' }, + }, + optional: true, + }, + created: { type: 'string', description: 'ISO 8601 creation timestamp' }, + updated: { type: 'string', description: 'ISO 8601 last updated timestamp' }, + }, + }, }, + nextPageToken: { + type: 'string', + description: 'Cursor token for the next page. Null when no more results.', + optional: true, + }, + isLast: { type: 'boolean', description: 'Whether this is the last page of results' }, }, } diff --git a/apps/sim/tools/jira/create_issue_link.ts b/apps/sim/tools/jira/create_issue_link.ts index 1f814cd1a..79e9f1aea 100644 --- a/apps/sim/tools/jira/create_issue_link.ts +++ b/apps/sim/tools/jira/create_issue_link.ts @@ -1,26 +1,7 @@ +import type { JiraCreateIssueLinkParams, JiraCreateIssueLinkResponse } from '@/tools/jira/types' +import { TIMESTAMP_OUTPUT } from '@/tools/jira/types' import { getJiraCloudId } from '@/tools/jira/utils' -import type { ToolConfig, ToolResponse } from '@/tools/types' - -export interface JiraCreateIssueLinkParams { - accessToken: string - domain: string - inwardIssueKey: string - outwardIssueKey: string - linkType: string - comment?: string - cloudId?: string -} - -export interface JiraCreateIssueLinkResponse extends ToolResponse { - output: { - ts: string - inwardIssue: string - outwardIssue: string - linkType: string - linkId?: string - success: boolean - } -} +import type { ToolConfig } from '@/tools/types' export const jiraCreateIssueLinkTool: ToolConfig< JiraCreateIssueLinkParams, @@ -84,7 +65,6 @@ export const jiraCreateIssueLinkTool: ToolConfig< request: { url: (_params: JiraCreateIssueLinkParams) => { - // Always discover first; actual POST happens in transformResponse return 'https://api.atlassian.com/oauth/token/accessible-resources' }, method: () => 'GET', @@ -99,10 +79,8 @@ export const jiraCreateIssueLinkTool: ToolConfig< }, transformResponse: async (response: Response, params?: JiraCreateIssueLinkParams) => { - // Resolve cloudId const cloudId = params?.cloudId || (await getJiraCloudId(params!.domain, params!.accessToken)) - // Fetch and resolve link type by id/name/inward/outward (case-insensitive) const typesResp = await fetch( `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issueLinkType`, { @@ -136,7 +114,6 @@ export const jiraCreateIssueLinkTool: ToolConfig< throw new Error(`Unknown issue link type "${params!.linkType}". Available: ${available}`) } - // Create issue link const linkUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issueLink` const linkResponse = await fetch(linkUrl, { method: 'POST', @@ -179,21 +156,26 @@ export const jiraCreateIssueLinkTool: ToolConfig< throw new Error(message) } - // Try to extract the newly created link ID from the Location header - const location = linkResponse.headers.get('location') || linkResponse.headers.get('Location') - let linkId: string | undefined - if (location) { - const match = location.match(/\/issueLink\/(\d+)/) - if (match) linkId = match[1] + let linkId: string | null = null + + try { + const linkData = await linkResponse.json() + if (linkData?.id) linkId = String(linkData.id) + } catch { + const location = linkResponse.headers.get('location') || linkResponse.headers.get('Location') + if (location) { + const match = location.match(/\/issueLink\/(\d+)/) + if (match) linkId = match[1] + } } return { success: true, output: { ts: new Date().toISOString(), - inwardIssue: params?.inwardIssueKey || 'unknown', - outwardIssue: params?.outwardIssueKey || 'unknown', - linkType: params?.linkType || 'unknown', + inwardIssue: params!.inwardIssueKey || 'unknown', + outwardIssue: params!.outwardIssueKey || 'unknown', + linkType: params!.linkType || 'unknown', linkId, success: true, }, @@ -201,7 +183,7 @@ export const jiraCreateIssueLinkTool: ToolConfig< }, outputs: { - ts: { type: 'string', description: 'Timestamp of the operation' }, + ts: TIMESTAMP_OUTPUT, inwardIssue: { type: 'string', description: 'Inward issue key' }, outwardIssue: { type: 'string', description: 'Outward issue key' }, linkType: { type: 'string', description: 'Type of issue link' }, diff --git a/apps/sim/tools/jira/delete_attachment.ts b/apps/sim/tools/jira/delete_attachment.ts index fadc77911..36a879c15 100644 --- a/apps/sim/tools/jira/delete_attachment.ts +++ b/apps/sim/tools/jira/delete_attachment.ts @@ -1,20 +1,7 @@ +import type { JiraDeleteAttachmentParams, JiraDeleteAttachmentResponse } from '@/tools/jira/types' +import { TIMESTAMP_OUTPUT } from '@/tools/jira/types' import { getJiraCloudId } from '@/tools/jira/utils' -import type { ToolConfig, ToolResponse } from '@/tools/types' - -export interface JiraDeleteAttachmentParams { - accessToken: string - domain: string - attachmentId: string - cloudId?: string -} - -export interface JiraDeleteAttachmentResponse extends ToolResponse { - output: { - ts: string - attachmentId: string - success: boolean - } -} +import type { ToolConfig } from '@/tools/types' export const jiraDeleteAttachmentTool: ToolConfig< JiraDeleteAttachmentParams, @@ -127,7 +114,7 @@ export const jiraDeleteAttachmentTool: ToolConfig< }, outputs: { - ts: { type: 'string', description: 'Timestamp of the operation' }, + ts: TIMESTAMP_OUTPUT, attachmentId: { type: 'string', description: 'Deleted attachment ID' }, }, } diff --git a/apps/sim/tools/jira/delete_comment.ts b/apps/sim/tools/jira/delete_comment.ts index d6b68301e..cde50ace4 100644 --- a/apps/sim/tools/jira/delete_comment.ts +++ b/apps/sim/tools/jira/delete_comment.ts @@ -1,22 +1,7 @@ +import type { JiraDeleteCommentParams, JiraDeleteCommentResponse } from '@/tools/jira/types' +import { TIMESTAMP_OUTPUT } from '@/tools/jira/types' import { getJiraCloudId } from '@/tools/jira/utils' -import type { ToolConfig, ToolResponse } from '@/tools/types' - -export interface JiraDeleteCommentParams { - accessToken: string - domain: string - issueKey: string - commentId: string - cloudId?: string -} - -export interface JiraDeleteCommentResponse extends ToolResponse { - output: { - ts: string - issueKey: string - commentId: string - success: boolean - } -} +import type { ToolConfig } from '@/tools/types' export const jiraDeleteCommentTool: ToolConfig = { @@ -135,7 +120,7 @@ export const jiraDeleteCommentTool: ToolConfig = { id: 'jira_delete_issue', @@ -170,7 +156,7 @@ export const jiraDeleteIssueTool: ToolConfig { if (!params?.cloudId) { const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) - // Make the actual request with the resolved cloudId - const issueLinkUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issueLink/${params?.linkId}` + const issueLinkUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issueLink/${params!.linkId}` const issueLinkResponse = await fetch(issueLinkUrl, { method: 'DELETE', headers: { Accept: 'application/json', - Authorization: `Bearer ${params?.accessToken}`, + Authorization: `Bearer ${params!.accessToken}`, }, }) @@ -100,13 +86,12 @@ export const jiraDeleteIssueLinkTool: ToolConfig< success: true, output: { ts: new Date().toISOString(), - linkId: params?.linkId || 'unknown', + linkId: params!.linkId || 'unknown', success: true, }, } } - // If cloudId was provided, process the response if (!response.ok) { let message = `Failed to delete issue link (${response.status})` try { @@ -120,14 +105,14 @@ export const jiraDeleteIssueLinkTool: ToolConfig< success: true, output: { ts: new Date().toISOString(), - linkId: params?.linkId || 'unknown', + linkId: params!.linkId || 'unknown', success: true, }, } }, outputs: { - ts: { type: 'string', description: 'Timestamp of the operation' }, + ts: TIMESTAMP_OUTPUT, linkId: { type: 'string', description: 'Deleted link ID' }, }, } diff --git a/apps/sim/tools/jira/delete_worklog.ts b/apps/sim/tools/jira/delete_worklog.ts index 46c0fa826..260abc0c0 100644 --- a/apps/sim/tools/jira/delete_worklog.ts +++ b/apps/sim/tools/jira/delete_worklog.ts @@ -1,22 +1,7 @@ +import type { JiraDeleteWorklogParams, JiraDeleteWorklogResponse } from '@/tools/jira/types' +import { TIMESTAMP_OUTPUT } from '@/tools/jira/types' import { getJiraCloudId } from '@/tools/jira/utils' -import type { ToolConfig, ToolResponse } from '@/tools/types' - -export interface JiraDeleteWorklogParams { - accessToken: string - domain: string - issueKey: string - worklogId: string - cloudId?: string -} - -export interface JiraDeleteWorklogResponse extends ToolResponse { - output: { - ts: string - issueKey: string - worklogId: string - success: boolean - } -} +import type { ToolConfig } from '@/tools/types' export const jiraDeleteWorklogTool: ToolConfig = { @@ -83,13 +68,12 @@ export const jiraDeleteWorklogTool: ToolConfig { if (!params?.cloudId) { const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) - // Make the actual request with the resolved cloudId - const worklogUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params?.issueKey}/worklog/${params?.worklogId}` + const worklogUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params!.issueKey}/worklog/${params!.worklogId}` const worklogResponse = await fetch(worklogUrl, { method: 'DELETE', headers: { Accept: 'application/json', - Authorization: `Bearer ${params?.accessToken}`, + Authorization: `Bearer ${params!.accessToken}`, }, }) @@ -106,14 +90,13 @@ export const jiraDeleteWorklogTool: ToolConfig { - if (!params?.cloudId) { - const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) - // Make the actual request with the resolved cloudId - const attachmentsUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params?.issueKey}?fields=attachment` + const fetchAttachments = async (cloudId: string) => { + const attachmentsUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params!.issueKey}?fields=attachment` const attachmentsResponse = await fetch(attachmentsUrl, { method: 'GET', headers: { Accept: 'application/json', - Authorization: `Bearer ${params?.accessToken}`, + Authorization: `Bearer ${params!.accessToken}`, }, }) @@ -82,60 +98,46 @@ export const jiraGetAttachmentsTool: ToolConfig< throw new Error(message) } - const data = await attachmentsResponse.json() + return attachmentsResponse.json() + } - return { - success: true, - output: { - ts: new Date().toISOString(), - issueKey: params?.issueKey || 'unknown', - attachments: (data?.fields?.attachment || []).map((att: any) => ({ - id: att.id, - filename: att.filename, - size: att.size, - mimeType: att.mimeType, - created: att.created, - author: att.author?.displayName || att.author?.accountId || 'Unknown', - })), - }, + let data: any + + if (!params?.cloudId) { + const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) + data = await fetchAttachments(cloudId) + } else { + if (!response.ok) { + let message = `Failed to get attachments from Jira issue (${response.status})` + try { + const err = await response.json() + message = err?.errorMessages?.join(', ') || err?.message || message + } catch (_e) {} + throw new Error(message) } + data = await response.json() } - // If cloudId was provided, process the response - if (!response.ok) { - let message = `Failed to get attachments from Jira issue (${response.status})` - try { - const err = await response.json() - message = err?.errorMessages?.join(', ') || err?.message || message - } catch (_e) {} - throw new Error(message) - } - - const data = await response.json() - return { success: true, output: { ts: new Date().toISOString(), - issueKey: params?.issueKey || 'unknown', - attachments: (data?.fields?.attachment || []).map((att: any) => ({ - id: att.id, - filename: att.filename, - size: att.size, - mimeType: att.mimeType, - created: att.created, - author: att.author?.displayName || att.author?.accountId || 'Unknown', - })), + issueKey: params?.issueKey ?? 'unknown', + attachments: (data?.fields?.attachment ?? []).map(transformAttachment), }, } }, outputs: { - ts: { type: 'string', description: 'Timestamp of the operation' }, + ts: TIMESTAMP_OUTPUT, issueKey: { type: 'string', description: 'Issue key' }, attachments: { type: 'array', - description: 'Array of attachments with id, filename, size, mimeType, created, author', + description: 'Array of attachments', + items: { + type: 'object', + properties: ATTACHMENT_ITEM_PROPERTIES, + }, }, }, } diff --git a/apps/sim/tools/jira/get_comments.ts b/apps/sim/tools/jira/get_comments.ts index e51db0ee4..af6ec05de 100644 --- a/apps/sim/tools/jira/get_comments.ts +++ b/apps/sim/tools/jira/get_comments.ts @@ -1,27 +1,22 @@ -import { getJiraCloudId } from '@/tools/jira/utils' -import type { ToolConfig, ToolResponse } from '@/tools/types' +import type { JiraGetCommentsParams, JiraGetCommentsResponse } from '@/tools/jira/types' +import { COMMENT_ITEM_PROPERTIES, TIMESTAMP_OUTPUT } from '@/tools/jira/types' +import { extractAdfText, getJiraCloudId, transformUser } from '@/tools/jira/utils' +import type { ToolConfig } from '@/tools/types' -export interface JiraGetCommentsParams { - accessToken: string - domain: string - issueKey: string - startAt?: number - maxResults?: number - cloudId?: string -} - -export interface JiraGetCommentsResponse extends ToolResponse { - output: { - ts: string - issueKey: string - total: number - comments: Array<{ - id: string - author: string - body: string - created: string - updated: string - }> +/** + * Transforms a raw Jira comment object into typed output. + */ +function transformComment(comment: any) { + return { + id: comment.id ?? '', + body: extractAdfText(comment.body) ?? '', + author: transformUser(comment.author) ?? { accountId: '', displayName: '' }, + updateAuthor: transformUser(comment.updateAuthor), + created: comment.created ?? '', + updated: comment.updated ?? '', + visibility: comment.visibility + ? { type: comment.visibility.type ?? '', value: comment.visibility.value ?? '' } + : null, } } @@ -67,6 +62,13 @@ export const jiraGetCommentsTool: ToolConfig { if (params.cloudId) { - const startAt = params.startAt || 0 - const maxResults = params.maxResults || 50 - return `https://api.atlassian.com/ex/jira/${params.cloudId}/rest/api/3/issue/${params.issueKey}/comment?startAt=${startAt}&maxResults=${maxResults}` + const startAt = params.startAt ?? 0 + const maxResults = params.maxResults ?? 50 + const orderBy = params.orderBy ?? '-created' + return `https://api.atlassian.com/ex/jira/${params.cloudId}/rest/api/3/issue/${params.issueKey}/comment?startAt=${startAt}&maxResults=${maxResults}&orderBy=${orderBy}` } return 'https://api.atlassian.com/oauth/token/accessible-resources' }, @@ -95,29 +98,16 @@ export const jiraGetCommentsTool: ToolConfig { - // Extract text from Atlassian Document Format - const extractText = (content: any): string => { - if (!content) return '' - if (typeof content === 'string') return content - if (Array.isArray(content)) { - return content.map(extractText).join(' ') - } - if (content.type === 'text') return content.text || '' - if (content.content) return extractText(content.content) - return '' - } - - if (!params?.cloudId) { - const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) - // Make the actual request with the resolved cloudId - const startAt = params?.startAt || 0 - const maxResults = params?.maxResults || 50 - const commentsUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params?.issueKey}/comment?startAt=${startAt}&maxResults=${maxResults}` + const fetchComments = async (cloudId: string) => { + const startAt = params?.startAt ?? 0 + const maxResults = params?.maxResults ?? 50 + const orderBy = params?.orderBy ?? '-created' + const commentsUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params!.issueKey}/comment?startAt=${startAt}&maxResults=${maxResults}&orderBy=${orderBy}` const commentsResponse = await fetch(commentsUrl, { method: 'GET', headers: { Accept: 'application/json', - Authorization: `Bearer ${params?.accessToken}`, + Authorization: `Bearer ${params!.accessToken}`, }, }) @@ -130,61 +120,52 @@ export const jiraGetCommentsTool: ToolConfig ({ - id: comment.id, - author: comment.author?.displayName || comment.author?.accountId || 'Unknown', - body: extractText(comment.body), - created: comment.created, - updated: comment.updated, - })), - }, + let data: any + + if (!params?.cloudId) { + const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) + data = await fetchComments(cloudId) + } else { + if (!response.ok) { + let message = `Failed to get comments from Jira issue (${response.status})` + try { + const err = await response.json() + message = err?.errorMessages?.join(', ') || err?.message || message + } catch (_e) {} + throw new Error(message) } + data = await response.json() } - // If cloudId was provided, process the response - if (!response.ok) { - let message = `Failed to get comments from Jira issue (${response.status})` - try { - const err = await response.json() - message = err?.errorMessages?.join(', ') || err?.message || message - } catch (_e) {} - throw new Error(message) - } - - const data = await response.json() - return { success: true, output: { ts: new Date().toISOString(), - issueKey: params?.issueKey || 'unknown', - total: data.total || 0, - comments: (data.comments || []).map((comment: any) => ({ - id: comment.id, - author: comment.author?.displayName || comment.author?.accountId || 'Unknown', - body: extractText(comment.body), - created: comment.created, - updated: comment.updated, - })), + issueKey: params?.issueKey ?? 'unknown', + total: data.total ?? 0, + startAt: data.startAt ?? 0, + maxResults: data.maxResults ?? 0, + comments: (data.comments ?? []).map(transformComment), }, } }, outputs: { - ts: { type: 'string', description: 'Timestamp of the operation' }, + ts: TIMESTAMP_OUTPUT, issueKey: { type: 'string', description: 'Issue key' }, total: { type: 'number', description: 'Total number of comments' }, + startAt: { type: 'number', description: 'Pagination start index' }, + maxResults: { type: 'number', description: 'Maximum results per page' }, comments: { type: 'array', - description: 'Array of comments with id, author, body, created, updated', + description: 'Array of comments', + items: { + type: 'object', + properties: COMMENT_ITEM_PROPERTIES, + }, }, }, } diff --git a/apps/sim/tools/jira/get_users.ts b/apps/sim/tools/jira/get_users.ts index 246ef1693..71cbae350 100644 --- a/apps/sim/tools/jira/get_users.ts +++ b/apps/sim/tools/jira/get_users.ts @@ -1,38 +1,20 @@ +import type { JiraGetUsersParams, JiraGetUsersResponse } from '@/tools/jira/types' +import { TIMESTAMP_OUTPUT, USER_OUTPUT_PROPERTIES } from '@/tools/jira/types' import { getJiraCloudId } from '@/tools/jira/utils' -import type { ToolConfig, ToolResponse } from '@/tools/types' +import type { ToolConfig } from '@/tools/types' -export interface JiraGetUsersParams { - accessToken: string - domain: string - accountId?: string - startAt?: number - maxResults?: number - cloudId?: string -} - -export interface JiraUser { - accountId: string - accountType?: string - active: boolean - displayName: string - emailAddress?: string - avatarUrls?: { - '16x16'?: string - '24x24'?: string - '32x32'?: string - '48x48'?: string - } - timeZone?: string - self?: string -} - -export interface JiraGetUsersResponse extends ToolResponse { - output: { - ts: string - users: JiraUser[] - total?: number - startAt?: number - maxResults?: number +/** + * Transforms a raw Jira user API object into typed output. + */ +function transformUserOutput(user: any) { + return { + accountId: user.accountId ?? '', + accountType: user.accountType ?? null, + active: user.active ?? false, + displayName: user.displayName ?? '', + emailAddress: user.emailAddress ?? null, + avatarUrl: user.avatarUrls?.['48x48'] ?? null, + timeZone: user.timeZone ?? null, } } @@ -112,9 +94,7 @@ export const jiraGetUsersTool: ToolConfig { - if (!params?.cloudId) { - const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) - + const fetchUsers = async (cloudId: string) => { let usersUrl: string if (params!.accountId) { usersUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/user?accountId=${encodeURIComponent(params!.accountId)}` @@ -144,71 +124,49 @@ export const jiraGetUsersTool: ToolConfig ({ - accountId: user.accountId, - accountType: user.accountType, - active: user.active, - displayName: user.displayName, - emailAddress: user.emailAddress, - avatarUrls: user.avatarUrls, - timeZone: user.timeZone, - self: user.self, - })), - total: params!.accountId ? 1 : users.length, - startAt: params!.startAt || 0, - maxResults: params!.maxResults || 50, - }, + if (!params?.cloudId) { + const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) + data = await fetchUsers(cloudId) + } else { + if (!response.ok) { + let message = `Failed to get Jira users (${response.status})` + try { + const err = await response.json() + message = err?.errorMessages?.join(', ') || err?.message || message + } catch (_e) {} + throw new Error(message) } + data = await response.json() } - if (!response.ok) { - let message = `Failed to get Jira users (${response.status})` - try { - const err = await response.json() - message = err?.errorMessages?.join(', ') || err?.message || message - } catch (_e) {} - throw new Error(message) - } - - const data = await response.json() - const users = params?.accountId ? [data] : data return { success: true, output: { ts: new Date().toISOString(), - users: users.map((user: any) => ({ - accountId: user.accountId, - accountType: user.accountType, - active: user.active, - displayName: user.displayName, - emailAddress: user.emailAddress, - avatarUrls: user.avatarUrls, - timeZone: user.timeZone, - self: user.self, - })), + users: users.map(transformUserOutput), total: params?.accountId ? 1 : users.length, - startAt: params?.startAt || 0, - maxResults: params?.maxResults || 50, + startAt: params?.startAt ?? 0, + maxResults: params?.maxResults ?? 50, }, } }, outputs: { - ts: { type: 'string', description: 'Timestamp of the operation' }, + ts: TIMESTAMP_OUTPUT, users: { - type: 'json', - description: - 'Array of users with accountId, displayName, emailAddress, active status, and avatarUrls', + type: 'array', + description: 'Array of Jira users', + items: { + type: 'object', + properties: USER_OUTPUT_PROPERTIES, + }, }, total: { type: 'number', description: 'Total number of users returned' }, startAt: { type: 'number', description: 'Pagination start index' }, diff --git a/apps/sim/tools/jira/get_worklogs.ts b/apps/sim/tools/jira/get_worklogs.ts index e9e18c523..2818cd51a 100644 --- a/apps/sim/tools/jira/get_worklogs.ts +++ b/apps/sim/tools/jira/get_worklogs.ts @@ -1,30 +1,22 @@ -import { getJiraCloudId } from '@/tools/jira/utils' -import type { ToolConfig, ToolResponse } from '@/tools/types' +import type { JiraGetWorklogsParams, JiraGetWorklogsResponse } from '@/tools/jira/types' +import { TIMESTAMP_OUTPUT, WORKLOG_ITEM_PROPERTIES } from '@/tools/jira/types' +import { extractAdfText, getJiraCloudId, transformUser } from '@/tools/jira/utils' +import type { ToolConfig } from '@/tools/types' -export interface JiraGetWorklogsParams { - accessToken: string - domain: string - issueKey: string - startAt?: number - maxResults?: number - cloudId?: string -} - -export interface JiraGetWorklogsResponse extends ToolResponse { - output: { - ts: string - issueKey: string - total: number - worklogs: Array<{ - id: string - author: string - timeSpentSeconds: number - timeSpent: string - comment?: string - created: string - updated: string - started: string - }> +/** + * Transforms a raw Jira worklog object into typed output. + */ +function transformWorklog(worklog: any) { + return { + id: worklog.id ?? '', + author: transformUser(worklog.author) ?? { accountId: '', displayName: '' }, + updateAuthor: transformUser(worklog.updateAuthor), + comment: worklog.comment ? (extractAdfText(worklog.comment) ?? null) : null, + started: worklog.started ?? '', + timeSpent: worklog.timeSpent ?? '', + timeSpentSeconds: worklog.timeSpentSeconds ?? 0, + created: worklog.created ?? '', + updated: worklog.updated ?? '', } } @@ -82,8 +74,8 @@ export const jiraGetWorklogsTool: ToolConfig { if (params.cloudId) { - const startAt = params.startAt || 0 - const maxResults = params.maxResults || 50 + const startAt = params.startAt ?? 0 + const maxResults = params.maxResults ?? 50 return `https://api.atlassian.com/ex/jira/${params.cloudId}/rest/api/3/issue/${params.issueKey}/worklog?startAt=${startAt}&maxResults=${maxResults}` } return 'https://api.atlassian.com/oauth/token/accessible-resources' @@ -98,29 +90,15 @@ export const jiraGetWorklogsTool: ToolConfig { - // Extract text from Atlassian Document Format - const extractText = (content: any): string => { - if (!content) return '' - if (typeof content === 'string') return content - if (Array.isArray(content)) { - return content.map(extractText).join(' ') - } - if (content.type === 'text') return content.text || '' - if (content.content) return extractText(content.content) - return '' - } - - if (!params?.cloudId) { - const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) - // Make the actual request with the resolved cloudId - const startAt = params?.startAt || 0 - const maxResults = params?.maxResults || 50 - const worklogsUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params?.issueKey}/worklog?startAt=${startAt}&maxResults=${maxResults}` + const fetchWorklogs = async (cloudId: string) => { + const startAt = params?.startAt ?? 0 + const maxResults = params?.maxResults ?? 50 + const worklogsUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params!.issueKey}/worklog?startAt=${startAt}&maxResults=${maxResults}` const worklogsResponse = await fetch(worklogsUrl, { method: 'GET', headers: { Accept: 'application/json', - Authorization: `Bearer ${params?.accessToken}`, + Authorization: `Bearer ${params!.accessToken}`, }, }) @@ -133,68 +111,52 @@ export const jiraGetWorklogsTool: ToolConfig ({ - id: worklog.id, - author: worklog.author?.displayName || worklog.author?.accountId || 'Unknown', - timeSpentSeconds: worklog.timeSpentSeconds, - timeSpent: worklog.timeSpent, - comment: worklog.comment ? extractText(worklog.comment) : undefined, - created: worklog.created, - updated: worklog.updated, - started: worklog.started, - })), - }, + let data: any + + if (!params?.cloudId) { + const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) + data = await fetchWorklogs(cloudId) + } else { + if (!response.ok) { + let message = `Failed to get worklogs from Jira issue (${response.status})` + try { + const err = await response.json() + message = err?.errorMessages?.join(', ') || err?.message || message + } catch (_e) {} + throw new Error(message) } + data = await response.json() } - // If cloudId was provided, process the response - if (!response.ok) { - let message = `Failed to get worklogs from Jira issue (${response.status})` - try { - const err = await response.json() - message = err?.errorMessages?.join(', ') || err?.message || message - } catch (_e) {} - throw new Error(message) - } - - const data = await response.json() - return { success: true, output: { ts: new Date().toISOString(), - issueKey: params?.issueKey || 'unknown', - total: data.total || 0, - worklogs: (data.worklogs || []).map((worklog: any) => ({ - id: worklog.id, - author: worklog.author?.displayName || worklog.author?.accountId || 'Unknown', - timeSpentSeconds: worklog.timeSpentSeconds, - timeSpent: worklog.timeSpent, - comment: worklog.comment ? extractText(worklog.comment) : undefined, - created: worklog.created, - updated: worklog.updated, - started: worklog.started, - })), + issueKey: params?.issueKey ?? 'unknown', + total: data.total ?? 0, + startAt: data.startAt ?? 0, + maxResults: data.maxResults ?? 0, + worklogs: (data.worklogs ?? []).map(transformWorklog), }, } }, outputs: { - ts: { type: 'string', description: 'Timestamp of the operation' }, + ts: TIMESTAMP_OUTPUT, issueKey: { type: 'string', description: 'Issue key' }, total: { type: 'number', description: 'Total number of worklogs' }, + startAt: { type: 'number', description: 'Pagination start index' }, + maxResults: { type: 'number', description: 'Maximum results per page' }, worklogs: { type: 'array', - description: - 'Array of worklogs with id, author, timeSpentSeconds, timeSpent, comment, created, updated, started', + description: 'Array of worklogs', + items: { + type: 'object', + properties: WORKLOG_ITEM_PROPERTIES, + }, }, }, } diff --git a/apps/sim/tools/jira/remove_watcher.ts b/apps/sim/tools/jira/remove_watcher.ts index 6a66fe05f..7a8007f1f 100644 --- a/apps/sim/tools/jira/remove_watcher.ts +++ b/apps/sim/tools/jira/remove_watcher.ts @@ -1,22 +1,7 @@ +import type { JiraRemoveWatcherParams, JiraRemoveWatcherResponse } from '@/tools/jira/types' +import { TIMESTAMP_OUTPUT } from '@/tools/jira/types' import { getJiraCloudId } from '@/tools/jira/utils' -import type { ToolConfig, ToolResponse } from '@/tools/types' - -export interface JiraRemoveWatcherParams { - accessToken: string - domain: string - issueKey: string - accountId: string - cloudId?: string -} - -export interface JiraRemoveWatcherResponse extends ToolResponse { - output: { - ts: string - issueKey: string - watcherAccountId: string - success: boolean - } -} +import type { ToolConfig } from '@/tools/types' export const jiraRemoveWatcherTool: ToolConfig = { @@ -83,13 +68,12 @@ export const jiraRemoveWatcherTool: ToolConfig { if (!params?.cloudId) { const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) - // Make the actual request with the resolved cloudId - const watcherUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params?.issueKey}/watchers?accountId=${params?.accountId}` + const watcherUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params!.issueKey}/watchers?accountId=${params!.accountId}` const watcherResponse = await fetch(watcherUrl, { method: 'DELETE', headers: { Accept: 'application/json', - Authorization: `Bearer ${params?.accessToken}`, + Authorization: `Bearer ${params!.accessToken}`, }, }) @@ -106,14 +90,13 @@ export const jiraRemoveWatcherTool: ToolConfig ({ + id: c.id ?? '', + name: c.name ?? '', + description: c.description ?? null, + })), + fixVersions: (fields.fixVersions ?? []).map((v: any) => ({ + id: v.id ?? '', + name: v.name ?? '', + released: v.released ?? null, + releaseDate: v.releaseDate ?? null, + })), + resolution: fields.resolution + ? { + id: fields.resolution.id ?? '', + name: fields.resolution.name ?? '', + description: fields.resolution.description ?? null, + } + : null, + duedate: fields.duedate ?? null, + created: fields.created ?? '', + updated: fields.updated ?? '', + resolutiondate: fields.resolutiondate ?? null, + timetracking: fields.timetracking + ? { + originalEstimate: fields.timetracking.originalEstimate ?? null, + remainingEstimate: fields.timetracking.remainingEstimate ?? null, + timeSpent: fields.timetracking.timeSpent ?? null, + originalEstimateSeconds: fields.timetracking.originalEstimateSeconds ?? null, + remainingEstimateSeconds: fields.timetracking.remainingEstimateSeconds ?? null, + timeSpentSeconds: fields.timetracking.timeSpentSeconds ?? null, + } + : null, + parent: fields.parent + ? { + id: fields.parent.id ?? '', + key: fields.parent.key ?? '', + summary: fields.parent.fields?.summary ?? null, + } + : null, + issuelinks: (fields.issuelinks ?? []).map((link: any) => ({ + id: link.id ?? '', + type: { + id: link.type?.id ?? '', + name: link.type?.name ?? '', + inward: link.type?.inward ?? '', + outward: link.type?.outward ?? '', + }, + inwardIssue: link.inwardIssue + ? { + id: link.inwardIssue.id ?? '', + key: link.inwardIssue.key ?? '', + statusName: link.inwardIssue.fields?.status?.name ?? null, + summary: link.inwardIssue.fields?.summary ?? null, + } + : null, + outwardIssue: link.outwardIssue + ? { + id: link.outwardIssue.id ?? '', + key: link.outwardIssue.key ?? '', + statusName: link.outwardIssue.fields?.status?.name ?? null, + summary: link.outwardIssue.fields?.summary ?? null, + } + : null, + })), + subtasks: (fields.subtasks ?? []).map((sub: any) => ({ + id: sub.id ?? '', + key: sub.key ?? '', + summary: sub.fields?.summary ?? '', + statusName: sub.fields?.status?.name ?? '', + issueTypeName: sub.fields?.issuetype?.name ?? null, + })), + votes: fields.votes + ? { + votes: fields.votes.votes ?? 0, + hasVoted: fields.votes.hasVoted ?? false, + } + : null, + watches: + (fields.watches ?? fields.watcher) + ? { + watchCount: (fields.watches ?? fields.watcher)?.watchCount ?? 0, + isWatching: (fields.watches ?? fields.watcher)?.isWatching ?? false, + } + : null, + comments: ((fields.comment?.comments ?? fields.comment) || []).map((c: any) => ({ + id: c.id ?? '', + body: extractAdfText(c.body) ?? '', + author: transformUser(c.author), + updateAuthor: transformUser(c.updateAuthor), + created: c.created ?? '', + updated: c.updated ?? '', + })), + worklogs: ((fields.worklog?.worklogs ?? fields.worklog) || []).map((w: any) => ({ + id: w.id ?? '', + author: transformUser(w.author), + updateAuthor: transformUser(w.updateAuthor), + comment: w.comment ? (extractAdfText(w.comment) ?? null) : null, + started: w.started ?? '', + timeSpent: w.timeSpent ?? '', + timeSpentSeconds: w.timeSpentSeconds ?? 0, + created: w.created ?? '', + updated: w.updated ?? '', + })), + attachments: (fields.attachment ?? []).map((att: any) => ({ + id: att.id ?? '', + filename: att.filename ?? '', + mimeType: att.mimeType ?? '', + size: att.size ?? 0, + content: att.content ?? '', + thumbnail: att.thumbnail ?? null, + author: transformUser(att.author), + created: att.created ?? '', + })), + } +} + export const jiraRetrieveTool: ToolConfig = { id: 'jira_retrieve', name: 'Jira Retrieve', @@ -29,12 +195,6 @@ export const jiraRetrieveTool: ToolConfig { if (params.cloudId) { - // Request with broad expands; additional endpoints fetched in transform for completeness return `https://api.atlassian.com/ex/jira/${params.cloudId}/rest/api/3/issue/${params.issueKey}?expand=renderedFields,names,schema,transitions,operations,editmeta,changelog,versionedRepresentations` } - // If no cloudId, use the accessible resources endpoint return 'https://api.atlassian.com/oauth/token/accessible-resources' }, method: 'GET', @@ -70,21 +228,16 @@ export const jiraRetrieveTool: ToolConfig { if (!params?.issueKey) { - throw new Error( - 'Select a project to read issues, or provide an issue key to read a single issue.' - ) + throw new Error('Provide an issue key to retrieve a single issue.') } - // If we don't have a cloudId, resolve it robustly using the Jira utils helper - if (!params?.cloudId) { - const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) - // Now fetch the actual issue with the found cloudId - const issueUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params?.issueKey}?expand=renderedFields,names,schema,transitions,operations,editmeta,changelog,versionedRepresentations` + const fetchIssue = async (cloudId: string) => { + const issueUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params.issueKey}?expand=renderedFields,names,schema,transitions,operations,editmeta,changelog,versionedRepresentations` const issueResponse = await fetch(issueUrl, { method: 'GET', headers: { Accept: 'application/json', - Authorization: `Bearer ${params?.accessToken}`, + Authorization: `Bearer ${params.accessToken}`, }, }) @@ -97,19 +250,20 @@ export const jiraRetrieveTool: ToolConfig { const base = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params.issueKey}` const [commentsResp, worklogResp, watchersResp] = await Promise.all([ fetch(`${base}/comment?maxResults=100&orderBy=-created`, { - headers: { Accept: 'application/json', Authorization: `Bearer ${params!.accessToken}` }, + headers: { Accept: 'application/json', Authorization: `Bearer ${params.accessToken}` }, }), fetch(`${base}/worklog?maxResults=100`, { - headers: { Accept: 'application/json', Authorization: `Bearer ${params!.accessToken}` }, + headers: { Accept: 'application/json', Authorization: `Bearer ${params.accessToken}` }, }), fetch(`${base}/watchers`, { - headers: { Accept: 'application/json', Authorization: `Bearer ${params!.accessToken}` }, + headers: { Accept: 'application/json', Authorization: `Bearer ${params.accessToken}` }, }), ]) @@ -117,124 +271,68 @@ export const jiraRetrieveTool: ToolConfig ({ + id: c.id ?? '', + name: c.name ?? '', + description: c.description ?? null, + })), + resolution: fields.resolution + ? { + id: fields.resolution.id ?? '', + name: fields.resolution.name ?? '', + description: fields.resolution.description ?? null, + } + : null, + duedate: fields.duedate ?? null, + created: fields.created ?? '', + updated: fields.updated ?? '', + } +} + export const jiraSearchIssuesTool: ToolConfig = { id: 'jira_search_issues', name: 'Jira Search Issues', @@ -33,24 +99,24 @@ export const jiraSearchIssuesTool: ToolConfig 0) @@ -77,22 +143,19 @@ export const jiraSearchIssuesTool: ToolConfig 'GET', - headers: (params: JiraSearchIssuesParams) => { - return { - Accept: 'application/json', - 'Content-Type': 'application/json', - Authorization: `Bearer ${params.accessToken}`, - } - }, + headers: (params: JiraSearchIssuesParams) => ({ + Accept: 'application/json', + 'Content-Type': 'application/json', + Authorization: `Bearer ${params.accessToken}`, + }), body: () => undefined as any, }, transformResponse: async (response: Response, params?: JiraSearchIssuesParams) => { - if (!params?.cloudId) { - const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) + const performSearch = async (cloudId: string) => { const query = new URLSearchParams() if (params?.jql) query.set('jql', params.jql) - if (typeof params?.startAt === 'number') query.set('startAt', String(params.startAt)) + if (params?.nextPageToken) query.set('nextPageToken', params.nextPageToken) if (typeof params?.maxResults === 'number') query.set('maxResults', String(params.maxResults)) if (Array.isArray(params?.fields) && params.fields.length > 0) query.set('fields', params.fields.join(',')) @@ -103,12 +166,6 @@ export const jiraSearchIssuesTool: ToolConfig ({ - key: issue.key, - summary: issue.fields?.summary, - status: issue.fields?.status?.name, - assignee: issue.fields?.assignee?.displayName || issue.fields?.assignee?.accountId, - created: issue.fields?.created, - updated: issue.fields?.updated, - })), - }, + let data: any + + if (!params?.cloudId) { + const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) + data = await performSearch(cloudId) + } else { + if (!response.ok) { + let message = `Failed to search Jira issues (${response.status})` + try { + const err = await response.json() + message = err?.errorMessages?.join(', ') || err?.message || message + } catch (_e) {} + throw new Error(message) } + data = await response.json() } - if (!response.ok) { - let message = `Failed to search Jira issues (${response.status})` - try { - const err = await response.json() - message = err?.errorMessages?.join(', ') || err?.message || message - } catch (_e) {} - throw new Error(message) - } - - const data = await response.json() - return { success: true, output: { ts: new Date().toISOString(), - total: data?.total || 0, - startAt: data?.startAt || 0, - maxResults: data?.maxResults || 0, - issues: (data?.issues || []).map((issue: any) => ({ - key: issue.key, - summary: issue.fields?.summary, - status: issue.fields?.status?.name, - assignee: issue.fields?.assignee?.displayName || issue.fields?.assignee?.accountId, - created: issue.fields?.created, - updated: issue.fields?.updated, - })), + issues: (data?.issues ?? []).map(transformSearchIssue), + nextPageToken: data?.nextPageToken ?? null, + isLast: data?.isLast ?? true, + total: data?.total ?? null, }, } }, outputs: { - ts: { type: 'string', description: 'Timestamp of the operation' }, - total: { type: 'number', description: 'Total number of matching issues' }, - startAt: { type: 'number', description: 'Pagination start index' }, - maxResults: { type: 'number', description: 'Maximum results per page' }, + ts: TIMESTAMP_OUTPUT, issues: { type: 'array', - description: 'Array of matching issues with key, summary, status, assignee, created, updated', + description: 'Array of matching issues', + items: { + type: 'object', + properties: SEARCH_ISSUE_ITEM_PROPERTIES, + }, + }, + nextPageToken: { + type: 'string', + description: 'Cursor token for the next page. Null when no more results.', + optional: true, + }, + isLast: { type: 'boolean', description: 'Whether this is the last page of results' }, + total: { + type: 'number', + description: 'Total number of matching issues (may not always be available)', + optional: true, }, }, } diff --git a/apps/sim/tools/jira/transition_issue.ts b/apps/sim/tools/jira/transition_issue.ts index f79f04090..9da0146c6 100644 --- a/apps/sim/tools/jira/transition_issue.ts +++ b/apps/sim/tools/jira/transition_issue.ts @@ -1,4 +1,5 @@ import type { JiraTransitionIssueParams, JiraTransitionIssueResponse } from '@/tools/jira/types' +import { TIMESTAMP_OUTPUT } from '@/tools/jira/types' import { getJiraCloudId } from '@/tools/jira/utils' import type { ToolConfig } from '@/tools/types' @@ -48,6 +49,12 @@ export const jiraTransitionIssueTool: ToolConfig< visibility: 'user-or-llm', description: 'Optional comment to add when transitioning the issue', }, + resolution: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Resolution name to set during transition (e.g., "Fixed", "Won\'t Fix")', + }, cloudId: { type: 'string', required: false, @@ -74,87 +81,47 @@ export const jiraTransitionIssueTool: ToolConfig< }, body: (params: JiraTransitionIssueParams) => { if (!params.cloudId) return undefined as any - const body: any = { - transition: { - id: params.transitionId, - }, - } - - if (params.comment) { - body.update = { - comment: [ - { - add: { - body: { - type: 'doc', - version: 1, - content: [ - { - type: 'paragraph', - content: [ - { - type: 'text', - text: params.comment, - }, - ], - }, - ], - }, - }, - }, - ], - } - } - - return body + return buildTransitionBody(params) }, }, transformResponse: async (response: Response, params?: JiraTransitionIssueParams) => { - if (!params?.cloudId) { - const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) - const transitionUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params!.issueKey}/transitions` - - const body: any = { - transition: { - id: params!.transitionId, + const performTransition = async (cloudId: string) => { + // First, fetch available transitions to get the name and target status + const transitionsUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params!.issueKey}/transitions` + const transitionsResp = await fetch(transitionsUrl, { + method: 'GET', + headers: { + Accept: 'application/json', + Authorization: `Bearer ${params!.accessToken}`, }, - } + }) - if (params!.comment) { - body.update = { - comment: [ - { - add: { - body: { - type: 'doc', - version: 1, - content: [ - { - type: 'paragraph', - content: [ - { - type: 'text', - text: params!.comment, - }, - ], - }, - ], - }, - }, - }, - ], + let transitionName: string | null = null + let toStatus: { id: string; name: string } | null = null + + if (transitionsResp.ok) { + const transitionsData = await transitionsResp.json() + const transition = (transitionsData?.transitions ?? []).find( + (t: any) => String(t.id) === String(params!.transitionId) + ) + if (transition) { + transitionName = transition.name ?? null + toStatus = transition.to + ? { id: transition.to.id ?? '', name: transition.to.name ?? '' } + : null } } - const transitionResponse = await fetch(transitionUrl, { + // Perform the transition + const transitionResponse = await fetch(transitionsUrl, { method: 'POST', headers: { Accept: 'application/json', 'Content-Type': 'application/json', Authorization: `Bearer ${params!.accessToken}`, }, - body: JSON.stringify(body), + body: JSON.stringify(buildTransitionBody(params!)), }) if (!transitionResponse.ok) { @@ -166,42 +133,119 @@ export const jiraTransitionIssueTool: ToolConfig< throw new Error(message) } - // Transition endpoint returns 204 No Content on success - return { - success: true, - output: { - ts: new Date().toISOString(), - issueKey: params!.issueKey, - transitionId: params!.transitionId, - success: true, - }, + return { transitionName, toStatus } + } + + let transitionName: string | null = null + let toStatus: { id: string; name: string } | null = null + + if (!params?.cloudId) { + const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) + const result = await performTransition(cloudId) + transitionName = result.transitionName + toStatus = result.toStatus + } else { + // When cloudId was provided, the initial request was the POST transition. + // We need to fetch transition metadata separately. + if (!response.ok) { + let message = `Failed to transition Jira issue (${response.status})` + try { + const err = await response.json() + message = err?.errorMessages?.join(', ') || err?.message || message + } catch (_e) {} + throw new Error(message) } - } - if (!response.ok) { - let message = `Failed to transition Jira issue (${response.status})` + // Fetch transition metadata for the response try { - const err = await response.json() - message = err?.errorMessages?.join(', ') || err?.message || message - } catch (_e) {} - throw new Error(message) + const transitionsUrl = `https://api.atlassian.com/ex/jira/${params.cloudId}/rest/api/3/issue/${params.issueKey}/transitions` + const transitionsResp = await fetch(transitionsUrl, { + method: 'GET', + headers: { + Accept: 'application/json', + Authorization: `Bearer ${params.accessToken}`, + }, + }) + if (transitionsResp.ok) { + const transitionsData = await transitionsResp.json() + const transition = (transitionsData?.transitions ?? []).find( + (t: any) => String(t.id) === String(params.transitionId) + ) + if (transition) { + transitionName = transition.name ?? null + toStatus = transition.to + ? { id: transition.to.id ?? '', name: transition.to.name ?? '' } + : null + } + } + } catch {} } - // Transition endpoint returns 204 No Content on success return { success: true, output: { ts: new Date().toISOString(), - issueKey: params?.issueKey || 'unknown', - transitionId: params?.transitionId || 'unknown', + issueKey: params?.issueKey ?? 'unknown', + transitionId: params?.transitionId ?? 'unknown', + transitionName, + toStatus, success: true, }, } }, outputs: { - ts: { type: 'string', description: 'Timestamp of the operation' }, + ts: TIMESTAMP_OUTPUT, issueKey: { type: 'string', description: 'Issue key that was transitioned' }, transitionId: { type: 'string', description: 'Applied transition ID' }, + transitionName: { type: 'string', description: 'Applied transition name', optional: true }, + toStatus: { + type: 'object', + description: 'Target status after transition', + properties: { + id: { type: 'string', description: 'Status ID' }, + name: { type: 'string', description: 'Status name' }, + }, + optional: true, + }, }, } + +/** + * Builds the transition request body per Jira API v3. + */ +function buildTransitionBody(params: JiraTransitionIssueParams) { + const body: any = { + transition: { id: params.transitionId }, + } + + if (params.resolution) { + body.fields = { + ...body.fields, + resolution: { name: params.resolution }, + } + } + + if (params.comment) { + body.update = { + comment: [ + { + add: { + body: { + type: 'doc', + version: 1, + content: [ + { + type: 'paragraph', + content: [{ type: 'text', text: params.comment }], + }, + ], + }, + }, + }, + ], + } + } + + return body +} diff --git a/apps/sim/tools/jira/types.ts b/apps/sim/tools/jira/types.ts index 0ed2ba646..850ccb36d 100644 --- a/apps/sim/tools/jira/types.ts +++ b/apps/sim/tools/jira/types.ts @@ -1,5 +1,741 @@ import type { UserFile } from '@/executor/types' -import type { ToolFileData, ToolResponse } from '@/tools/types' +import type { OutputProperty, ToolResponse } from '@/tools/types' + +/** + * Shared output property constants for Jira tools. + * Based on Jira Cloud REST API v3 response schemas: + * @see https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-issues/ + * @see https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-issue-search/ + * @see https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-issue-comments/ + * @see https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-issue-worklogs/ + */ + +/** + * User object properties shared across issues, comments, and worklogs. + * Based on Jira API v3 user structure (accountId-based). + */ +export const USER_OUTPUT_PROPERTIES = { + accountId: { type: 'string', description: 'Atlassian account ID of the user' }, + displayName: { type: 'string', description: 'Display name of the user' }, + active: { type: 'boolean', description: 'Whether the user account is active', optional: true }, + emailAddress: { type: 'string', description: 'Email address of the user', optional: true }, + accountType: { + type: 'string', + description: 'Type of account (e.g., atlassian, app, customer)', + optional: true, + }, + avatarUrl: { + type: 'string', + description: 'URL to the user avatar (48x48)', + optional: true, + }, + timeZone: { type: 'string', description: 'User timezone', optional: true }, +} as const satisfies Record + +/** + * User object output definition. + */ +export const USER_OUTPUT: OutputProperty = { + type: 'object', + description: 'Jira user object', + properties: USER_OUTPUT_PROPERTIES, +} + +/** + * Status object properties from Jira API v3. + * Based on IssueBean.fields.status structure. + */ +export const STATUS_OUTPUT_PROPERTIES = { + id: { type: 'string', description: 'Status ID' }, + name: { type: 'string', description: 'Status name (e.g., Open, In Progress, Done)' }, + description: { type: 'string', description: 'Status description', optional: true }, + statusCategory: { + type: 'object', + description: 'Status category grouping', + properties: { + id: { type: 'number', description: 'Status category ID' }, + key: { + type: 'string', + description: 'Status category key (e.g., new, indeterminate, done)', + }, + name: { + type: 'string', + description: 'Status category name (e.g., To Do, In Progress, Done)', + }, + colorName: { + type: 'string', + description: 'Status category color (e.g., blue-gray, yellow, green)', + }, + }, + optional: true, + }, +} as const satisfies Record + +/** + * Status object output definition. + */ +export const STATUS_OUTPUT: OutputProperty = { + type: 'object', + description: 'Issue status', + properties: STATUS_OUTPUT_PROPERTIES, +} + +/** + * Issue type object properties from Jira API v3. + * Based on IssueBean.fields.issuetype structure. + */ +export const ISSUE_TYPE_OUTPUT_PROPERTIES = { + id: { type: 'string', description: 'Issue type ID' }, + name: { type: 'string', description: 'Issue type name (e.g., Task, Bug, Story, Epic)' }, + description: { type: 'string', description: 'Issue type description', optional: true }, + subtask: { type: 'boolean', description: 'Whether this is a subtask type' }, + iconUrl: { type: 'string', description: 'URL to the issue type icon', optional: true }, +} as const satisfies Record + +/** + * Issue type object output definition. + */ +export const ISSUE_TYPE_OUTPUT: OutputProperty = { + type: 'object', + description: 'Issue type', + properties: ISSUE_TYPE_OUTPUT_PROPERTIES, +} + +/** + * Project object properties from Jira API v3. + * Based on IssueBean.fields.project structure. + */ +export const PROJECT_OUTPUT_PROPERTIES = { + id: { type: 'string', description: 'Project ID' }, + key: { type: 'string', description: 'Project key (e.g., PROJ)' }, + name: { type: 'string', description: 'Project name' }, + projectTypeKey: { + type: 'string', + description: 'Project type key (e.g., software, business)', + optional: true, + }, +} as const satisfies Record + +/** + * Project object output definition. + */ +export const PROJECT_OUTPUT: OutputProperty = { + type: 'object', + description: 'Jira project', + properties: PROJECT_OUTPUT_PROPERTIES, +} + +/** + * Priority object properties from Jira API v3. + * Based on IssueBean.fields.priority structure. + */ +export const PRIORITY_OUTPUT_PROPERTIES = { + id: { type: 'string', description: 'Priority ID' }, + name: { type: 'string', description: 'Priority name (e.g., Highest, High, Medium, Low, Lowest)' }, + iconUrl: { type: 'string', description: 'URL to the priority icon', optional: true }, +} as const satisfies Record + +/** + * Priority object output definition. + */ +export const PRIORITY_OUTPUT: OutputProperty = { + type: 'object', + description: 'Issue priority', + properties: PRIORITY_OUTPUT_PROPERTIES, +} + +/** + * Resolution object properties from Jira API v3. + * Based on IssueBean.fields.resolution structure. + */ +export const RESOLUTION_OUTPUT_PROPERTIES = { + id: { type: 'string', description: 'Resolution ID' }, + name: { type: 'string', description: "Resolution name (e.g., Fixed, Duplicate, Won't Fix)" }, + description: { type: 'string', description: 'Resolution description', optional: true }, +} as const satisfies Record + +/** + * Resolution object output definition. + */ +export const RESOLUTION_OUTPUT: OutputProperty = { + type: 'object', + description: 'Issue resolution', + properties: RESOLUTION_OUTPUT_PROPERTIES, + optional: true, +} + +/** + * Component object properties from Jira API v3. + * Based on IssueBean.fields.components structure. + */ +export const COMPONENT_OUTPUT_PROPERTIES = { + id: { type: 'string', description: 'Component ID' }, + name: { type: 'string', description: 'Component name' }, + description: { type: 'string', description: 'Component description', optional: true }, +} as const satisfies Record + +/** + * Version object properties from Jira API v3. + * Based on IssueBean.fields.fixVersions / versions structure. + */ +export const VERSION_OUTPUT_PROPERTIES = { + id: { type: 'string', description: 'Version ID' }, + name: { type: 'string', description: 'Version name' }, + released: { type: 'boolean', description: 'Whether the version is released', optional: true }, + releaseDate: { type: 'string', description: 'Release date (YYYY-MM-DD)', optional: true }, +} as const satisfies Record + +/** + * Time tracking object properties from Jira API v3. + * Based on IssueBean.fields.timetracking structure. + */ +export const TIME_TRACKING_OUTPUT_PROPERTIES = { + originalEstimate: { + type: 'string', + description: 'Original estimate in human-readable format (e.g., 1w 2d)', + optional: true, + }, + remainingEstimate: { + type: 'string', + description: 'Remaining estimate in human-readable format', + optional: true, + }, + timeSpent: { + type: 'string', + description: 'Time spent in human-readable format', + optional: true, + }, + originalEstimateSeconds: { + type: 'number', + description: 'Original estimate in seconds', + optional: true, + }, + remainingEstimateSeconds: { + type: 'number', + description: 'Remaining estimate in seconds', + optional: true, + }, + timeSpentSeconds: { + type: 'number', + description: 'Time spent in seconds', + optional: true, + }, +} as const satisfies Record + +/** + * Time tracking object output definition. + */ +export const TIME_TRACKING_OUTPUT: OutputProperty = { + type: 'object', + description: 'Time tracking information', + properties: TIME_TRACKING_OUTPUT_PROPERTIES, + optional: true, +} + +/** + * Issue link object properties from Jira API v3. + * Based on IssueBean.fields.issuelinks structure. + */ +export const ISSUE_LINK_ITEM_PROPERTIES = { + id: { type: 'string', description: 'Issue link ID' }, + type: { + type: 'object', + description: 'Link type information', + properties: { + id: { type: 'string', description: 'Link type ID' }, + name: { type: 'string', description: 'Link type name (e.g., Blocks, Relates)' }, + inward: { type: 'string', description: 'Inward description (e.g., is blocked by)' }, + outward: { type: 'string', description: 'Outward description (e.g., blocks)' }, + }, + }, + inwardIssue: { + type: 'object', + description: 'Inward linked issue', + properties: { + id: { type: 'string', description: 'Issue ID' }, + key: { type: 'string', description: 'Issue key' }, + statusName: { type: 'string', description: 'Issue status name', optional: true }, + summary: { type: 'string', description: 'Issue summary', optional: true }, + }, + optional: true, + }, + outwardIssue: { + type: 'object', + description: 'Outward linked issue', + properties: { + id: { type: 'string', description: 'Issue ID' }, + key: { type: 'string', description: 'Issue key' }, + statusName: { type: 'string', description: 'Issue status name', optional: true }, + summary: { type: 'string', description: 'Issue summary', optional: true }, + }, + optional: true, + }, +} as const satisfies Record + +/** + * Subtask item properties from Jira API v3. + */ +export const SUBTASK_ITEM_PROPERTIES = { + id: { type: 'string', description: 'Subtask issue ID' }, + key: { type: 'string', description: 'Subtask issue key' }, + summary: { type: 'string', description: 'Subtask summary' }, + statusName: { type: 'string', description: 'Subtask status name' }, + issueTypeName: { type: 'string', description: 'Subtask issue type name', optional: true }, +} as const satisfies Record + +/** + * Comment item properties from Jira API v3. + * Based on GET /rest/api/3/issue/{issueIdOrKey}/comment response. + */ +export const COMMENT_ITEM_PROPERTIES = { + id: { type: 'string', description: 'Comment ID' }, + body: { type: 'string', description: 'Comment body text (extracted from ADF)' }, + author: { + type: 'object', + description: 'Comment author', + properties: USER_OUTPUT_PROPERTIES, + }, + updateAuthor: { + type: 'object', + description: 'User who last updated the comment', + properties: USER_OUTPUT_PROPERTIES, + optional: true, + }, + created: { type: 'string', description: 'ISO 8601 timestamp when the comment was created' }, + updated: { type: 'string', description: 'ISO 8601 timestamp when the comment was last updated' }, + visibility: { + type: 'object', + description: 'Comment visibility restriction', + properties: { + type: { type: 'string', description: 'Restriction type (e.g., role, group)' }, + value: { type: 'string', description: 'Restriction value (e.g., Administrators)' }, + }, + optional: true, + }, +} as const satisfies Record + +/** + * Comment object output definition. + */ +export const COMMENT_OUTPUT: OutputProperty = { + type: 'object', + description: 'Jira comment object', + properties: COMMENT_ITEM_PROPERTIES, +} + +/** + * Comments array output definition. + */ +export const COMMENTS_OUTPUT: OutputProperty = { + type: 'array', + description: 'Array of Jira comments', + items: { + type: 'object', + properties: COMMENT_ITEM_PROPERTIES, + }, +} + +/** + * Attachment item properties from Jira API v3. + * Based on IssueBean.fields.attachment structure. + */ +export const ATTACHMENT_ITEM_PROPERTIES = { + id: { type: 'string', description: 'Attachment ID' }, + filename: { type: 'string', description: 'Attachment file name' }, + mimeType: { type: 'string', description: 'MIME type of the attachment' }, + size: { type: 'number', description: 'File size in bytes' }, + content: { type: 'string', description: 'URL to download the attachment content' }, + thumbnail: { + type: 'string', + description: 'URL to the attachment thumbnail', + optional: true, + }, + author: { + type: 'object', + description: 'Attachment author', + properties: USER_OUTPUT_PROPERTIES, + optional: true, + }, + created: { type: 'string', description: 'ISO 8601 timestamp when the attachment was created' }, +} as const satisfies Record + +/** + * Attachment object output definition. + */ +export const ATTACHMENT_OUTPUT: OutputProperty = { + type: 'object', + description: 'Jira attachment object', + properties: ATTACHMENT_ITEM_PROPERTIES, +} + +/** + * Attachments array output definition. + */ +export const ATTACHMENTS_OUTPUT: OutputProperty = { + type: 'array', + description: 'Array of Jira attachments', + items: { + type: 'object', + properties: ATTACHMENT_ITEM_PROPERTIES, + }, +} + +/** + * Worklog item properties from Jira API v3. + * Based on GET /rest/api/3/issue/{issueIdOrKey}/worklog response. + */ +export const WORKLOG_ITEM_PROPERTIES = { + id: { type: 'string', description: 'Worklog ID' }, + author: { + type: 'object', + description: 'Worklog author', + properties: USER_OUTPUT_PROPERTIES, + }, + updateAuthor: { + type: 'object', + description: 'User who last updated the worklog', + properties: USER_OUTPUT_PROPERTIES, + optional: true, + }, + comment: { type: 'string', description: 'Worklog comment text', optional: true }, + started: { type: 'string', description: 'ISO 8601 timestamp when the work started' }, + timeSpent: { type: 'string', description: 'Time spent in human-readable format (e.g., 3h 20m)' }, + timeSpentSeconds: { type: 'number', description: 'Time spent in seconds' }, + created: { type: 'string', description: 'ISO 8601 timestamp when the worklog was created' }, + updated: { type: 'string', description: 'ISO 8601 timestamp when the worklog was last updated' }, +} as const satisfies Record + +/** + * Worklog object output definition. + */ +export const WORKLOG_OUTPUT: OutputProperty = { + type: 'object', + description: 'Jira worklog object', + properties: WORKLOG_ITEM_PROPERTIES, +} + +/** + * Worklogs array output definition. + */ +export const WORKLOGS_OUTPUT: OutputProperty = { + type: 'array', + description: 'Array of Jira worklogs', + items: { + type: 'object', + properties: WORKLOG_ITEM_PROPERTIES, + }, +} + +/** + * Transition object properties from Jira API v3. + * Based on GET /rest/api/3/issue/{issueIdOrKey}/transitions response. + */ +export const TRANSITION_ITEM_PROPERTIES = { + id: { type: 'string', description: 'Transition ID' }, + name: { type: 'string', description: 'Transition name (e.g., Start Progress, Done)' }, + hasScreen: { + type: 'boolean', + description: 'Whether the transition has an associated screen', + optional: true, + }, + isGlobal: { type: 'boolean', description: 'Whether the transition is global', optional: true }, + isConditional: { + type: 'boolean', + description: 'Whether the transition is conditional', + optional: true, + }, + to: { + type: 'object', + description: 'Target status after transition', + properties: STATUS_OUTPUT_PROPERTIES, + }, +} as const satisfies Record + +/** + * Full issue item properties for retrieve/search outputs. + * Based on IssueBean structure from Jira API v3. + */ +export const ISSUE_ITEM_PROPERTIES = { + id: { type: 'string', description: 'Issue ID' }, + key: { type: 'string', description: 'Issue key (e.g., PROJ-123)' }, + self: { type: 'string', description: 'REST API URL for this issue' }, + summary: { type: 'string', description: 'Issue summary' }, + description: { + type: 'string', + description: 'Issue description text (extracted from ADF)', + optional: true, + }, + status: { + type: 'object', + description: 'Issue status', + properties: STATUS_OUTPUT_PROPERTIES, + }, + issuetype: { + type: 'object', + description: 'Issue type', + properties: ISSUE_TYPE_OUTPUT_PROPERTIES, + }, + project: { + type: 'object', + description: 'Project the issue belongs to', + properties: PROJECT_OUTPUT_PROPERTIES, + }, + priority: { + type: 'object', + description: 'Issue priority', + properties: PRIORITY_OUTPUT_PROPERTIES, + optional: true, + }, + assignee: { + type: 'object', + description: 'Assigned user', + properties: USER_OUTPUT_PROPERTIES, + optional: true, + }, + reporter: { + type: 'object', + description: 'Reporter user', + properties: USER_OUTPUT_PROPERTIES, + optional: true, + }, + creator: { + type: 'object', + description: 'Issue creator', + properties: USER_OUTPUT_PROPERTIES, + optional: true, + }, + labels: { + type: 'array', + description: 'Issue labels', + items: { type: 'string' }, + }, + components: { + type: 'array', + description: 'Issue components', + items: { + type: 'object', + properties: COMPONENT_OUTPUT_PROPERTIES, + }, + optional: true, + }, + fixVersions: { + type: 'array', + description: 'Fix versions', + items: { + type: 'object', + properties: VERSION_OUTPUT_PROPERTIES, + }, + optional: true, + }, + resolution: { + type: 'object', + description: 'Issue resolution', + properties: RESOLUTION_OUTPUT_PROPERTIES, + optional: true, + }, + duedate: { type: 'string', description: 'Due date (YYYY-MM-DD)', optional: true }, + created: { type: 'string', description: 'ISO 8601 timestamp when the issue was created' }, + updated: { type: 'string', description: 'ISO 8601 timestamp when the issue was last updated' }, + resolutiondate: { + type: 'string', + description: 'ISO 8601 timestamp when the issue was resolved', + optional: true, + }, + timetracking: TIME_TRACKING_OUTPUT, + parent: { + type: 'object', + description: 'Parent issue (for subtasks)', + properties: { + id: { type: 'string', description: 'Parent issue ID' }, + key: { type: 'string', description: 'Parent issue key' }, + summary: { type: 'string', description: 'Parent issue summary', optional: true }, + }, + optional: true, + }, + issuelinks: { + type: 'array', + description: 'Linked issues', + items: { + type: 'object', + properties: ISSUE_LINK_ITEM_PROPERTIES, + }, + optional: true, + }, + subtasks: { + type: 'array', + description: 'Subtask issues', + items: { + type: 'object', + properties: SUBTASK_ITEM_PROPERTIES, + }, + optional: true, + }, + votes: { + type: 'object', + description: 'Vote information', + properties: { + votes: { type: 'number', description: 'Number of votes' }, + hasVoted: { type: 'boolean', description: 'Whether the current user has voted' }, + }, + optional: true, + }, + watches: { + type: 'object', + description: 'Watch information', + properties: { + watchCount: { type: 'number', description: 'Number of watchers' }, + isWatching: { type: 'boolean', description: 'Whether the current user is watching' }, + }, + optional: true, + }, + comments: { + type: 'array', + description: 'Issue comments (fetched separately)', + items: { + type: 'object', + properties: COMMENT_ITEM_PROPERTIES, + }, + optional: true, + }, + worklogs: { + type: 'array', + description: 'Issue worklogs (fetched separately)', + items: { + type: 'object', + properties: WORKLOG_ITEM_PROPERTIES, + }, + optional: true, + }, + attachments: { + type: 'array', + description: 'Issue attachments', + items: { + type: 'object', + properties: ATTACHMENT_ITEM_PROPERTIES, + }, + optional: true, + }, + issueKey: { type: 'string', description: 'Issue key (e.g., PROJ-123)' }, +} as const satisfies Record + +/** + * Issue object output definition. + */ +export const ISSUE_OUTPUT: OutputProperty = { + type: 'object', + description: 'Jira issue object', + properties: ISSUE_ITEM_PROPERTIES, +} + +/** + * Issues array output definition for search endpoints. + */ +export const ISSUES_OUTPUT: OutputProperty = { + type: 'array', + description: 'Array of Jira issues', + items: { + type: 'object', + properties: ISSUE_ITEM_PROPERTIES, + }, +} + +/** + * Search issue item properties (lighter than full issue for search results). + * Based on POST /rest/api/3/search/jql response. + */ +export const SEARCH_ISSUE_ITEM_PROPERTIES = { + id: { type: 'string', description: 'Issue ID' }, + key: { type: 'string', description: 'Issue key (e.g., PROJ-123)' }, + self: { type: 'string', description: 'REST API URL for this issue' }, + summary: { type: 'string', description: 'Issue summary' }, + description: { + type: 'string', + description: 'Issue description text (extracted from ADF)', + optional: true, + }, + status: { + type: 'object', + description: 'Issue status', + properties: STATUS_OUTPUT_PROPERTIES, + }, + issuetype: { + type: 'object', + description: 'Issue type', + properties: ISSUE_TYPE_OUTPUT_PROPERTIES, + }, + project: { + type: 'object', + description: 'Project the issue belongs to', + properties: PROJECT_OUTPUT_PROPERTIES, + }, + priority: { + type: 'object', + description: 'Issue priority', + properties: PRIORITY_OUTPUT_PROPERTIES, + optional: true, + }, + assignee: { + type: 'object', + description: 'Assigned user', + properties: USER_OUTPUT_PROPERTIES, + optional: true, + }, + reporter: { + type: 'object', + description: 'Reporter user', + properties: USER_OUTPUT_PROPERTIES, + optional: true, + }, + labels: { + type: 'array', + description: 'Issue labels', + items: { type: 'string' }, + }, + components: { + type: 'array', + description: 'Issue components', + items: { + type: 'object', + properties: COMPONENT_OUTPUT_PROPERTIES, + }, + optional: true, + }, + resolution: { + type: 'object', + description: 'Issue resolution', + properties: RESOLUTION_OUTPUT_PROPERTIES, + optional: true, + }, + duedate: { type: 'string', description: 'Due date (YYYY-MM-DD)', optional: true }, + created: { type: 'string', description: 'ISO 8601 timestamp when the issue was created' }, + updated: { type: 'string', description: 'ISO 8601 timestamp when the issue was last updated' }, +} as const satisfies Record + +/** + * Common timestamp output property. + */ +export const TIMESTAMP_OUTPUT: OutputProperty = { + type: 'string', + description: 'ISO 8601 timestamp of the operation', +} + +/** + * Common issue key output property. + */ +export const ISSUE_KEY_OUTPUT: OutputProperty = { + type: 'string', + description: 'Jira issue key (e.g., PROJ-123)', +} + +/** + * Common success status output property. + */ +export const SUCCESS_OUTPUT: OutputProperty = { + type: 'boolean', + description: 'Operation success status', +} + +// --- Parameter interfaces --- export interface JiraRetrieveParams { accessToken: string @@ -11,11 +747,122 @@ export interface JiraRetrieveParams { export interface JiraRetrieveResponse extends ToolResponse { output: { ts: string + id: string issueKey: string + key: string + self: string summary: string - description: string + description: string | null + status: { + id: string + name: string + description?: string + statusCategory?: { + id: number + key: string + name: string + colorName: string + } + } + issuetype: { + id: string + name: string + description?: string + subtask: boolean + iconUrl?: string + } + project: { + id: string + key: string + name: string + projectTypeKey?: string + } + priority: { + id: string + name: string + iconUrl?: string + } | null + assignee: { + accountId: string + displayName: string + active?: boolean + emailAddress?: string + avatarUrl?: string + } | null + reporter: { + accountId: string + displayName: string + active?: boolean + emailAddress?: string + avatarUrl?: string + } | null + creator: { + accountId: string + displayName: string + active?: boolean + } | null + labels: string[] + components: Array<{ id: string; name: string; description?: string }> + fixVersions: Array<{ id: string; name: string; released?: boolean; releaseDate?: string }> + resolution: { id: string; name: string; description?: string } | null + duedate: string | null created: string updated: string + resolutiondate: string | null + timetracking: { + originalEstimate?: string + remainingEstimate?: string + timeSpent?: string + originalEstimateSeconds?: number + remainingEstimateSeconds?: number + timeSpentSeconds?: number + } | null + parent: { id: string; key: string; summary?: string } | null + issuelinks: Array<{ + id: string + type: { id: string; name: string; inward: string; outward: string } + inwardIssue?: { id: string; key: string; statusName?: string; summary?: string } + outwardIssue?: { id: string; key: string; statusName?: string; summary?: string } + }> + subtasks: Array<{ + id: string + key: string + summary: string + statusName: string + issueTypeName?: string + }> + votes: { votes: number; hasVoted: boolean } | null + watches: { watchCount: number; isWatching: boolean } | null + comments: Array<{ + id: string + body: string + author: { accountId: string; displayName: string } | null + updateAuthor?: { accountId: string; displayName: string } | null + created: string + updated: string + }> + worklogs: Array<{ + id: string + author: { accountId: string; displayName: string } | null + updateAuthor?: { accountId: string; displayName: string } | null + comment?: string | null + started: string + timeSpent: string + timeSpentSeconds: number + created: string + updated: string + }> + attachments: Array<{ + id: string + filename: string + mimeType: string + size: number + content: string + thumbnail?: string | null + author: { accountId: string; displayName: string } | null + created: string + }> + issue: Record } } @@ -29,11 +876,23 @@ export interface JiraRetrieveBulkParams { export interface JiraRetrieveResponseBulk extends ToolResponse { output: { ts: string - summary: string - description: string - created: string - updated: string - }[] + total: number | null + issues: Array<{ + id: string + key: string + self: string + summary: string + description: string | null + status: { id: string; name: string } + issuetype: { id: string; name: string } + priority: { id: string; name: string } | null + assignee: { accountId: string; displayName: string } | null + created: string + updated: string + }> + nextPageToken: string | null + isLast: boolean + } } export interface JiraUpdateParams { @@ -42,11 +901,17 @@ export interface JiraUpdateParams { projectId?: string issueKey: string summary?: string - title?: string description?: string - status?: string priority?: string assignee?: string + labels?: string[] + components?: string[] + duedate?: string + fixVersions?: string[] + environment?: string + customFieldId?: string + customFieldValue?: string + notifyUsers?: boolean cloudId?: string } @@ -71,7 +936,9 @@ export interface JiraWriteParams { issueType: string parent?: { key: string } labels?: string[] + components?: string[] duedate?: string + fixVersions?: string[] reporter?: string environment?: string customFieldId?: string @@ -81,10 +948,13 @@ export interface JiraWriteParams { export interface JiraWriteResponse extends ToolResponse { output: { ts: string + id: string issueKey: string + self: string summary: string success: boolean url: string + assigneeId: string | null } } @@ -112,7 +982,6 @@ export interface JiraCloudResource { avatarUrl: string } -// Delete Issue export interface JiraDeleteIssueParams { accessToken: string domain: string @@ -129,7 +998,6 @@ export interface JiraDeleteIssueResponse extends ToolResponse { } } -// Assign Issue export interface JiraAssignIssueParams { accessToken: string domain: string @@ -147,13 +1015,13 @@ export interface JiraAssignIssueResponse extends ToolResponse { } } -// Transition Issue export interface JiraTransitionIssueParams { accessToken: string domain: string issueKey: string transitionId: string comment?: string + resolution?: string cloudId?: string } @@ -162,16 +1030,17 @@ export interface JiraTransitionIssueResponse extends ToolResponse { ts: string issueKey: string transitionId: string + transitionName: string | null + toStatus: { id: string; name: string } | null success: boolean } } -// Search Issues export interface JiraSearchIssuesParams { accessToken: string domain: string jql: string - startAt?: number + nextPageToken?: string maxResults?: number fields?: string[] cloudId?: string @@ -180,27 +1049,41 @@ export interface JiraSearchIssuesParams { export interface JiraSearchIssuesResponse extends ToolResponse { output: { ts: string - total: number - startAt: number - maxResults: number issues: Array<{ + id: string key: string + self: string summary: string - status: string - assignee?: string - priority?: string + description: string | null + status: { + id: string + name: string + statusCategory?: { id: number; key: string; name: string; colorName: string } + } + issuetype: { id: string; name: string; subtask: boolean } + project: { id: string; key: string; name: string } + priority: { id: string; name: string } | null + assignee: { accountId: string; displayName: string } | null + reporter: { accountId: string; displayName: string } | null + labels: string[] + components: Array<{ id: string; name: string }> + resolution: { id: string; name: string } | null + duedate: string | null created: string updated: string }> + nextPageToken: string | null + isLast: boolean + total: number | null } } -// Comments export interface JiraAddCommentParams { accessToken: string domain: string issueKey: string body: string + visibility?: { type: string; value: string } cloudId?: string } @@ -210,6 +1093,9 @@ export interface JiraAddCommentResponse extends ToolResponse { issueKey: string commentId: string body: string + author: { accountId: string; displayName: string } + created: string + updated: string success: boolean } } @@ -220,6 +1106,7 @@ export interface JiraGetCommentsParams { issueKey: string startAt?: number maxResults?: number + orderBy?: string cloudId?: string } @@ -228,12 +1115,16 @@ export interface JiraGetCommentsResponse extends ToolResponse { ts: string issueKey: string total: number + startAt: number + maxResults: number comments: Array<{ id: string - author: string body: string + author: { accountId: string; displayName: string; active?: boolean } + updateAuthor: { accountId: string; displayName: string } | null created: string updated: string + visibility: { type: string; value: string } | null }> } } @@ -244,6 +1135,7 @@ export interface JiraUpdateCommentParams { issueKey: string commentId: string body: string + visibility?: { type: string; value: string } cloudId?: string } @@ -253,6 +1145,9 @@ export interface JiraUpdateCommentResponse extends ToolResponse { issueKey: string commentId: string body: string + author: { accountId: string; displayName: string } + created: string + updated: string success: boolean } } @@ -274,7 +1169,6 @@ export interface JiraDeleteCommentResponse extends ToolResponse { } } -// Attachments export interface JiraGetAttachmentsParams { accessToken: string domain: string @@ -289,11 +1183,12 @@ export interface JiraGetAttachmentsResponse extends ToolResponse { attachments: Array<{ id: string filename: string - author: string - created: string - size: number mimeType: string + size: number content: string + thumbnail: string | null + author: { accountId: string; displayName: string } | null + created: string }> } } @@ -325,12 +1220,22 @@ export interface JiraAddAttachmentResponse extends ToolResponse { output: { ts: string issueKey: string + attachments: Array<{ + id: string + filename: string + mimeType: string + size: number + content: string + }> attachmentIds: string[] - files: ToolFileData[] + files: Array<{ + name: string + mimeType: string + size: number + }> } } -// Worklogs export interface JiraAddWorklogParams { accessToken: string domain: string @@ -338,6 +1243,7 @@ export interface JiraAddWorklogParams { timeSpentSeconds: number comment?: string started?: string + visibility?: { type: string; value: string } cloudId?: string } @@ -346,7 +1252,11 @@ export interface JiraAddWorklogResponse extends ToolResponse { ts: string issueKey: string worklogId: string + timeSpent: string timeSpentSeconds: number + author: { accountId: string; displayName: string } + started: string + created: string success: boolean } } @@ -365,15 +1275,18 @@ export interface JiraGetWorklogsResponse extends ToolResponse { ts: string issueKey: string total: number + startAt: number + maxResults: number worklogs: Array<{ id: string - author: string - timeSpentSeconds: number + author: { accountId: string; displayName: string } + updateAuthor: { accountId: string; displayName: string } | null + comment: string | null + started: string timeSpent: string - comment?: string + timeSpentSeconds: number created: string updated: string - started: string }> } } @@ -386,6 +1299,7 @@ export interface JiraUpdateWorklogParams { timeSpentSeconds?: number comment?: string started?: string + visibility?: { type: string; value: string } cloudId?: string } @@ -394,6 +1308,8 @@ export interface JiraUpdateWorklogResponse extends ToolResponse { ts: string issueKey: string worklogId: string + timeSpent: string | null + timeSpentSeconds: number | null success: boolean } } @@ -415,7 +1331,6 @@ export interface JiraDeleteWorklogResponse extends ToolResponse { } } -// Issue Links export interface JiraCreateIssueLinkParams { accessToken: string domain: string @@ -432,6 +1347,7 @@ export interface JiraCreateIssueLinkResponse extends ToolResponse { inwardIssue: string outwardIssue: string linkType: string + linkId: string | null success: boolean } } @@ -451,7 +1367,6 @@ export interface JiraDeleteIssueLinkResponse extends ToolResponse { } } -// Watchers export interface JiraAddWatcherParams { accessToken: string domain: string @@ -486,6 +1401,33 @@ export interface JiraRemoveWatcherResponse extends ToolResponse { } } +export interface JiraGetUsersParams { + accessToken: string + domain: string + accountId?: string + startAt?: number + maxResults?: number + cloudId?: string +} + +export interface JiraGetUsersResponse extends ToolResponse { + output: { + ts: string + users: Array<{ + accountId: string + accountType?: string + active: boolean + displayName: string + emailAddress?: string + avatarUrl?: string + timeZone?: string + }> + total: number + startAt: number + maxResults: number + } +} + export type JiraResponse = | JiraRetrieveResponse | JiraUpdateResponse @@ -510,3 +1452,4 @@ export type JiraResponse = | JiraDeleteIssueLinkResponse | JiraAddWatcherResponse | JiraRemoveWatcherResponse + | JiraGetUsersResponse diff --git a/apps/sim/tools/jira/update.ts b/apps/sim/tools/jira/update.ts index c9a656838..bf42d6cd2 100644 --- a/apps/sim/tools/jira/update.ts +++ b/apps/sim/tools/jira/update.ts @@ -1,4 +1,5 @@ import type { JiraUpdateParams, JiraUpdateResponse } from '@/tools/jira/types' +import { TIMESTAMP_OUTPUT } from '@/tools/jira/types' import type { ToolConfig } from '@/tools/types' export const jiraUpdateTool: ToolConfig = { @@ -25,12 +26,6 @@ export const jiraUpdateTool: ToolConfig = visibility: 'user-only', description: 'Your Jira domain (e.g., yourcompany.atlassian.net)', }, - projectId: { - type: 'string', - required: false, - visibility: 'user-or-llm', - description: 'Jira project key (e.g., PROJ). Optional when updating a single issue.', - }, issueKey: { type: 'string', required: true, @@ -49,23 +44,65 @@ export const jiraUpdateTool: ToolConfig = visibility: 'user-or-llm', description: 'New description for the issue', }, - status: { - type: 'string', - required: false, - visibility: 'user-or-llm', - description: 'New status for the issue', - }, priority: { type: 'string', required: false, visibility: 'user-or-llm', - description: 'New priority for the issue', + description: 'New priority ID or name for the issue (e.g., "High")', }, assignee: { type: 'string', required: false, visibility: 'user-or-llm', - description: 'New assignee for the issue', + description: 'New assignee account ID for the issue', + }, + labels: { + type: 'json', + required: false, + visibility: 'user-or-llm', + description: 'Labels to set on the issue (array of label name strings)', + }, + components: { + type: 'json', + required: false, + visibility: 'user-or-llm', + description: 'Components to set on the issue (array of component name strings)', + }, + duedate: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Due date for the issue (format: YYYY-MM-DD)', + }, + fixVersions: { + type: 'json', + required: false, + visibility: 'user-or-llm', + description: 'Fix versions to set (array of version name strings)', + }, + environment: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Environment information for the issue', + }, + customFieldId: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Custom field ID to update (e.g., customfield_10001)', + }, + customFieldValue: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Value for the custom field', + }, + notifyUsers: { + type: 'boolean', + required: false, + visibility: 'user-or-llm', + description: 'Whether to send email notifications about this update (default: true)', }, cloudId: { type: 'string', @@ -83,17 +120,22 @@ export const jiraUpdateTool: ToolConfig = 'Content-Type': 'application/json', }), body: (params) => { - // Pass all parameters to the internal API route return { domain: params.domain, accessToken: params.accessToken, issueKey: params.issueKey, summary: params.summary, - title: params.title, // Support both for backwards compatibility description: params.description, - status: params.status, priority: params.priority, assignee: params.assignee, + labels: params.labels, + components: params.components, + duedate: params.duedate, + fixVersions: params.fixVersions, + environment: params.environment, + customFieldId: params.customFieldId, + customFieldValue: params.customFieldValue, + notifyUsers: params.notifyUsers, cloudId: params.cloudId, } }, @@ -116,12 +158,10 @@ export const jiraUpdateTool: ToolConfig = const data = JSON.parse(responseText) - // The internal API route already returns the correct format if (data.success && data.output) { return data } - // Fallback for unexpected response format return { success: data.success || false, output: data.output || { @@ -135,7 +175,7 @@ export const jiraUpdateTool: ToolConfig = }, outputs: { - ts: { type: 'string', description: 'Timestamp of the operation' }, + ts: TIMESTAMP_OUTPUT, issueKey: { type: 'string', description: 'Updated issue key (e.g., PROJ-123)' }, summary: { type: 'string', description: 'Issue summary after update' }, }, diff --git a/apps/sim/tools/jira/update_comment.ts b/apps/sim/tools/jira/update_comment.ts index b469e24f0..d9c273987 100644 --- a/apps/sim/tools/jira/update_comment.ts +++ b/apps/sim/tools/jira/update_comment.ts @@ -1,7 +1,24 @@ import type { JiraUpdateCommentParams, JiraUpdateCommentResponse } from '@/tools/jira/types' -import { getJiraCloudId } from '@/tools/jira/utils' +import { TIMESTAMP_OUTPUT, USER_OUTPUT_PROPERTIES } from '@/tools/jira/types' +import { extractAdfText, getJiraCloudId, transformUser } from '@/tools/jira/utils' import type { ToolConfig } from '@/tools/types' +/** + * Transforms an update comment API response into typed output. + */ +function transformUpdateCommentResponse(data: any, params: JiraUpdateCommentParams) { + return { + ts: new Date().toISOString(), + issueKey: params.issueKey ?? 'unknown', + commentId: data?.id ?? params.commentId ?? 'unknown', + body: data?.body ? (extractAdfText(data.body) ?? params.body ?? '') : (params.body ?? ''), + author: transformUser(data?.author) ?? { accountId: '', displayName: '' }, + created: data?.created ?? '', + updated: data?.updated ?? '', + success: true, + } +} + export const jiraUpdateCommentTool: ToolConfig = { id: 'jira_update_comment', @@ -45,6 +62,13 @@ export const jiraUpdateCommentTool: ToolConfig { if (!params.cloudId) return undefined as any - return { + const payload: Record = { body: { type: 'doc', version: 1, content: [ { type: 'paragraph', - content: [ - { - type: 'text', - text: params.body, - }, - ], + content: [{ type: 'text', text: params.body }], }, ], }, } + if (params.visibility) payload.visibility = params.visibility + return payload }, }, transformResponse: async (response: Response, params?: JiraUpdateCommentParams) => { - if (!params?.cloudId) { - const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) - // Make the actual request with the resolved cloudId - const commentUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params?.issueKey}/comment/${params?.commentId}` + const payload: Record = { + body: { + type: 'doc', + version: 1, + content: [ + { + type: 'paragraph', + content: [{ type: 'text', text: params?.body ?? '' }], + }, + ], + }, + } + if (params?.visibility) payload.visibility = params.visibility + + const makeRequest = async (cloudId: string) => { + const commentUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params!.issueKey}/comment/${params!.commentId}` const commentResponse = await fetch(commentUrl, { method: 'PUT', headers: { Accept: 'application/json', 'Content-Type': 'application/json', - Authorization: `Bearer ${params?.accessToken}`, + Authorization: `Bearer ${params!.accessToken}`, }, - body: JSON.stringify({ - body: { - type: 'doc', - version: 1, - content: [ - { - type: 'paragraph', - content: [ - { - type: 'text', - text: params?.body, - }, - ], - }, - ], - }, - }), + body: JSON.stringify(payload), }) if (!commentResponse.ok) { @@ -131,48 +148,46 @@ export const jiraUpdateCommentTool: ToolConfig = { + timeSpentSeconds: params.timeSpentSeconds ? Number(params.timeSpentSeconds) : undefined, + comment: params.comment + ? { + type: 'doc', + version: 1, + content: [ + { + type: 'paragraph', + content: [ + { + type: 'text', + text: params.comment, + }, + ], + }, + ], + } + : undefined, + started: params.started ? params.started.replace(/Z$/, '+0000') : undefined, + } + if (params.visibility) body.visibility = params.visibility + return body +} + +function transformWorklogResponse(data: any, params: JiraUpdateWorklogParams) { + return { + ts: new Date().toISOString(), + issueKey: params.issueKey || 'unknown', + worklogId: data?.id || params.worklogId || 'unknown', + timeSpent: data?.timeSpent ?? null, + timeSpentSeconds: data?.timeSpentSeconds ?? null, + comment: data?.comment ? extractAdfText(data.comment) : null, + author: data?.author ? transformUser(data.author) : null, + updateAuthor: data?.updateAuthor ? transformUser(data.updateAuthor) : null, + started: data?.started || null, + created: data?.created || null, + updated: data?.updated || null, + success: true, + } +} + export const jiraUpdateWorklogTool: ToolConfig = { id: 'jira_update_worklog', @@ -57,6 +101,13 @@ export const jiraUpdateWorklogTool: ToolConfig { if (!params.cloudId) return undefined as any - return { - timeSpentSeconds: Number(params.timeSpentSeconds), - comment: params.comment - ? { - type: 'doc', - version: 1, - content: [ - { - type: 'paragraph', - content: [ - { - type: 'text', - text: params.comment, - }, - ], - }, - ], - } - : undefined, - started: params.started, - } + return buildWorklogBody(params) }, }, transformResponse: async (response: Response, params?: JiraUpdateWorklogParams) => { if (!params?.cloudId) { const cloudId = await getJiraCloudId(params!.domain, params!.accessToken) - // Make the actual request with the resolved cloudId - const worklogUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params?.issueKey}/worklog/${params?.worklogId}` + const worklogUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${params!.issueKey}/worklog/${params!.worklogId}` const worklogResponse = await fetch(worklogUrl, { method: 'PUT', headers: { Accept: 'application/json', 'Content-Type': 'application/json', - Authorization: `Bearer ${params?.accessToken}`, + Authorization: `Bearer ${params!.accessToken}`, }, - body: JSON.stringify({ - timeSpentSeconds: params?.timeSpentSeconds ? Number(params.timeSpentSeconds) : 0, - comment: params?.comment - ? { - type: 'doc', - version: 1, - content: [ - { - type: 'paragraph', - content: [ - { - type: 'text', - text: params.comment, - }, - ], - }, - ], - } - : undefined, - started: params?.started, - }), + body: JSON.stringify(buildWorklogBody(params!)), }) if (!worklogResponse.ok) { @@ -152,19 +162,12 @@ export const jiraUpdateWorklogTool: ToolConfig { const response = await fetch('https://api.atlassian.com/oauth/token/accessible-resources', { method: 'GET', diff --git a/apps/sim/tools/jira/write.ts b/apps/sim/tools/jira/write.ts index bf2816be0..47c8be58b 100644 --- a/apps/sim/tools/jira/write.ts +++ b/apps/sim/tools/jira/write.ts @@ -1,10 +1,11 @@ import type { JiraWriteParams, JiraWriteResponse } from '@/tools/jira/types' +import { TIMESTAMP_OUTPUT } from '@/tools/jira/types' import type { ToolConfig } from '@/tools/types' export const jiraWriteTool: ToolConfig = { id: 'jira_write', name: 'Jira Write', - description: 'Write a Jira issue', + description: 'Create a new Jira issue', version: '1.0.0', oauth: { @@ -65,8 +66,14 @@ export const jiraWriteTool: ToolConfig = { issueType: { type: 'string', required: true, - visibility: 'hidden', - description: 'Type of issue to create (e.g., Task, Story)', + visibility: 'user-or-llm', + description: 'Type of issue to create (e.g., Task, Story, Bug, Epic, Sub-task)', + }, + parent: { + type: 'json', + required: false, + visibility: 'user-or-llm', + description: 'Parent issue key for creating subtasks (e.g., { "key": "PROJ-123" })', }, labels: { type: 'array', @@ -74,12 +81,24 @@ export const jiraWriteTool: ToolConfig = { visibility: 'user-or-llm', description: 'Labels for the issue (array of label names)', }, + components: { + type: 'array', + required: false, + visibility: 'user-or-llm', + description: 'Components for the issue (array of component names)', + }, duedate: { type: 'string', required: false, visibility: 'user-or-llm', description: 'Due date for the issue (format: YYYY-MM-DD)', }, + fixVersions: { + type: 'array', + required: false, + visibility: 'user-or-llm', + description: 'Fix versions for the issue (array of version names)', + }, reporter: { type: 'string', required: false, @@ -113,7 +132,6 @@ export const jiraWriteTool: ToolConfig = { 'Content-Type': 'application/json', }), body: (params) => { - // Pass all parameters to the internal API route return { domain: params.domain, accessToken: params.accessToken, @@ -126,7 +144,9 @@ export const jiraWriteTool: ToolConfig = { issueType: params.issueType, parent: params.parent, labels: params.labels, + components: params.components, duedate: params.duedate, + fixVersions: params.fixVersions, reporter: params.reporter, environment: params.environment, customFieldId: params.customFieldId, @@ -143,39 +163,62 @@ export const jiraWriteTool: ToolConfig = { success: true, output: { ts: new Date().toISOString(), + id: '', issueKey: 'unknown', + self: '', summary: 'Issue created successfully', success: true, url: '', + assigneeId: null, }, } } const data = JSON.parse(responseText) - // The internal API route already returns the correct format if (data.success && data.output) { - return data + return { + success: data.success, + output: { + ts: data.output.ts ?? new Date().toISOString(), + id: data.output.id ?? '', + issueKey: data.output.issueKey ?? 'unknown', + self: data.output.self ?? '', + summary: data.output.summary ?? '', + success: data.output.success ?? true, + url: data.output.url ?? '', + assigneeId: data.output.assigneeId ?? null, + }, + } } - // Fallback for unexpected response format return { success: data.success || false, - output: data.output || { + output: { ts: new Date().toISOString(), - issueKey: 'unknown', - summary: 'Issue created', + id: data.output?.id ?? '', + issueKey: data.output?.issueKey ?? 'unknown', + self: data.output?.self ?? '', + summary: data.output?.summary ?? 'Issue created', success: false, + url: data.output?.url ?? '', + assigneeId: data.output?.assigneeId ?? null, }, error: data.error, } }, outputs: { - ts: { type: 'string', description: 'Timestamp of the operation' }, + ts: TIMESTAMP_OUTPUT, + id: { type: 'string', description: 'Created issue ID' }, issueKey: { type: 'string', description: 'Created issue key (e.g., PROJ-123)' }, + self: { type: 'string', description: 'REST API URL for the created issue' }, summary: { type: 'string', description: 'Issue summary' }, - url: { type: 'string', description: 'URL to the created issue' }, - assigneeId: { type: 'string', description: 'Account ID of the assigned user (if assigned)' }, + url: { type: 'string', description: 'URL to the created issue in Jira' }, + assigneeId: { + type: 'string', + description: 'Account ID of the assigned user (null if no assignee was set)', + optional: true, + }, }, } diff --git a/apps/sim/tools/jsm/add_comment.ts b/apps/sim/tools/jsm/add_comment.ts index 971836eac..e23069a13 100644 --- a/apps/sim/tools/jsm/add_comment.ts +++ b/apps/sim/tools/jsm/add_comment.ts @@ -1,4 +1,5 @@ import type { JsmAddCommentParams, JsmAddCommentResponse } from '@/tools/jsm/types' +import { USER_OUTPUT_PROPERTIES } from '@/tools/jsm/types' import type { ToolConfig } from '@/tools/types' export const jsmAddCommentTool: ToolConfig = { @@ -79,6 +80,8 @@ export const jsmAddCommentTool: ToolConfig = { @@ -107,7 +108,14 @@ export const jsmGetApprovalsTool: ToolConfig = { @@ -49,6 +50,12 @@ export const jsmGetCommentsTool: ToolConfig = { @@ -110,7 +111,14 @@ export const jsmGetCustomersTool: ToolConfig = { @@ -110,7 +111,14 @@ export const jsmGetQueuesTool: ToolConfig = { @@ -37,6 +42,13 @@ export const jsmGetRequestTool: ToolConfig = { + id: 'jsm_get_request_type_fields', + name: 'JSM Get Request Type Fields', + description: + 'Get the fields required to create a request of a specific type in Jira Service Management', + version: '1.0.0', + + oauth: { + required: true, + provider: 'jira', + }, + + params: { + accessToken: { + type: 'string', + required: true, + visibility: 'hidden', + description: 'OAuth access token for Jira Service Management', + }, + domain: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'Your Jira domain (e.g., yourcompany.atlassian.net)', + }, + cloudId: { + type: 'string', + required: false, + visibility: 'hidden', + description: 'Jira Cloud ID for the instance', + }, + serviceDeskId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'Service Desk ID (e.g., "1", "2")', + }, + requestTypeId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'Request Type ID (e.g., "10", "15")', + }, + }, + + request: { + url: '/api/tools/jsm/requesttypefields', + method: 'POST', + headers: () => ({ + 'Content-Type': 'application/json', + }), + body: (params) => ({ + domain: params.domain, + accessToken: params.accessToken, + cloudId: params.cloudId, + serviceDeskId: params.serviceDeskId, + requestTypeId: params.requestTypeId, + }), + }, + + transformResponse: async (response: Response) => { + const responseText = await response.text() + + if (!responseText) { + return { + success: false, + output: { + ts: new Date().toISOString(), + serviceDeskId: '', + requestTypeId: '', + canAddRequestParticipants: false, + canRaiseOnBehalfOf: false, + requestTypeFields: [], + }, + error: 'Empty response from API', + } + } + + const data = JSON.parse(responseText) + + if (data.success && data.output) { + return data + } + + return { + success: data.success || false, + output: data.output || { + ts: new Date().toISOString(), + serviceDeskId: '', + requestTypeId: '', + canAddRequestParticipants: false, + canRaiseOnBehalfOf: false, + requestTypeFields: [], + }, + error: data.error, + } + }, + + outputs: { + ts: { type: 'string', description: 'Timestamp of the operation' }, + serviceDeskId: { type: 'string', description: 'Service desk ID' }, + requestTypeId: { type: 'string', description: 'Request type ID' }, + canAddRequestParticipants: { + type: 'boolean', + description: 'Whether participants can be added to requests of this type', + }, + canRaiseOnBehalfOf: { + type: 'boolean', + description: 'Whether requests can be raised on behalf of another user', + }, + requestTypeFields: { + type: 'array', + description: 'List of fields for this request type', + items: { + type: 'object', + properties: REQUEST_TYPE_FIELD_PROPERTIES, + }, + }, + }, +} diff --git a/apps/sim/tools/jsm/get_request_types.ts b/apps/sim/tools/jsm/get_request_types.ts index 717d64239..715715a83 100644 --- a/apps/sim/tools/jsm/get_request_types.ts +++ b/apps/sim/tools/jsm/get_request_types.ts @@ -1,4 +1,5 @@ import type { JsmGetRequestTypesParams, JsmGetRequestTypesResponse } from '@/tools/jsm/types' +import { REQUEST_TYPE_ITEM_PROPERTIES } from '@/tools/jsm/types' import type { ToolConfig } from '@/tools/types' export const jsmGetRequestTypesTool: ToolConfig< @@ -40,6 +41,24 @@ export const jsmGetRequestTypesTool: ToolConfig< visibility: 'user-or-llm', description: 'Service Desk ID (e.g., "1", "2")', }, + searchQuery: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Filter request types by name', + }, + groupId: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Filter by request type group ID', + }, + expand: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Comma-separated fields to expand in the response', + }, start: { type: 'number', required: false, @@ -65,6 +84,9 @@ export const jsmGetRequestTypesTool: ToolConfig< accessToken: params.accessToken, cloudId: params.cloudId, serviceDeskId: params.serviceDeskId, + searchQuery: params.searchQuery, + groupId: params.groupId, + expand: params.expand, start: params.start, limit: params.limit, }), @@ -106,7 +128,14 @@ export const jsmGetRequestTypesTool: ToolConfig< outputs: { ts: { type: 'string', description: 'Timestamp of the operation' }, - requestTypes: { type: 'json', description: 'Array of request types' }, + requestTypes: { + type: 'array', + description: 'List of request types', + items: { + type: 'object', + properties: REQUEST_TYPE_ITEM_PROPERTIES, + }, + }, total: { type: 'number', description: 'Total number of request types' }, isLastPage: { type: 'boolean', description: 'Whether this is the last page' }, }, diff --git a/apps/sim/tools/jsm/get_requests.ts b/apps/sim/tools/jsm/get_requests.ts index 2f769c20e..5aca92bd0 100644 --- a/apps/sim/tools/jsm/get_requests.ts +++ b/apps/sim/tools/jsm/get_requests.ts @@ -1,4 +1,5 @@ import type { JsmGetRequestsParams, JsmGetRequestsResponse } from '@/tools/jsm/types' +import { REQUEST_ITEM_PROPERTIES } from '@/tools/jsm/types' import type { ToolConfig } from '@/tools/types' export const jsmGetRequestsTool: ToolConfig = { @@ -42,13 +43,19 @@ export const jsmGetRequestsTool: ToolConfig = { @@ -106,7 +107,14 @@ export const jsmGetSlaTool: ToolConfig = { outputs: { ts: { type: 'string', description: 'Timestamp of the operation' }, issueIdOrKey: { type: 'string', description: 'Issue ID or key' }, - slas: { type: 'json', description: 'Array of SLA information' }, + slas: { + type: 'array', + description: 'List of SLA metrics', + items: { + type: 'object', + properties: SLA_ITEM_PROPERTIES, + }, + }, total: { type: 'number', description: 'Total number of SLAs' }, isLastPage: { type: 'boolean', description: 'Whether this is the last page' }, }, diff --git a/apps/sim/tools/jsm/get_transitions.ts b/apps/sim/tools/jsm/get_transitions.ts index 41a61f482..d864cf747 100644 --- a/apps/sim/tools/jsm/get_transitions.ts +++ b/apps/sim/tools/jsm/get_transitions.ts @@ -1,4 +1,5 @@ import type { JsmGetTransitionsParams, JsmGetTransitionsResponse } from '@/tools/jsm/types' +import { TRANSITION_ITEM_PROPERTIES } from '@/tools/jsm/types' import type { ToolConfig } from '@/tools/types' export const jsmGetTransitionsTool: ToolConfig = @@ -38,6 +39,18 @@ export const jsmGetTransitionsTool: ToolConfig ongoingCycle?: { startTime: { iso8601: string } @@ -115,11 +349,192 @@ export interface JsmTransition { name: string } +/** Participant representation */ +export interface JsmParticipant { + accountId: string + displayName: string + emailAddress?: string + active: boolean +} + +/** Approver representation */ +export interface JsmApprover { + accountId: string + displayName: string + emailAddress?: string + approverDecision: 'pending' | 'approved' | 'declined' +} + +/** Approval representation */ +export interface JsmApproval { + id: string + name: string + finalDecision: 'pending' | 'approved' | 'declined' + canAnswerApproval: boolean + approvers: JsmApprover[] + createdDate?: { iso8601: string; friendly: string } + completedDate?: { iso8601: string; friendly: string } +} + +/** Request type field representation */ +export interface JsmRequestTypeField { + fieldId: string + name: string + description?: string + required: boolean + visible?: boolean + validValues: Array<{ value: string; label: string; children?: unknown[] }> + presetValues?: unknown[] + defaultValues?: unknown[] + jiraSchema: { type: string; system?: string; custom?: string; customId?: number } +} + +// --------------------------------------------------------------------------- +// Params interfaces +// --------------------------------------------------------------------------- + export interface JsmGetServiceDesksParams extends JsmBaseParams { + expand?: string start?: number limit?: number } +export interface JsmGetRequestTypesParams extends JsmBaseParams { + serviceDeskId: string + searchQuery?: string + groupId?: string + expand?: string + start?: number + limit?: number +} + +export interface JsmCreateRequestParams extends JsmBaseParams { + serviceDeskId: string + requestTypeId: string + summary: string + description?: string + requestFieldValues?: Record + raiseOnBehalfOf?: string + requestParticipants?: string[] + channel?: string +} + +export interface JsmGetRequestParams extends JsmBaseParams { + issueIdOrKey: string + expand?: string +} + +export interface JsmGetRequestsParams extends JsmBaseParams { + serviceDeskId?: string + requestOwnership?: 'OWNED_REQUESTS' | 'PARTICIPATED_REQUESTS' | 'APPROVER' | 'ALL_REQUESTS' + requestStatus?: 'OPEN_REQUESTS' | 'CLOSED_REQUESTS' | 'ALL_REQUESTS' + requestTypeId?: string + searchTerm?: string + expand?: string + start?: number + limit?: number +} + +export interface JsmAddCommentParams extends JsmBaseParams { + issueIdOrKey: string + body: string + isPublic: boolean +} + +export interface JsmGetCommentsParams extends JsmBaseParams { + issueIdOrKey: string + isPublic?: boolean + internal?: boolean + expand?: string + start?: number + limit?: number +} + +export interface JsmGetCustomersParams extends JsmBaseParams { + serviceDeskId: string + query?: string + start?: number + limit?: number +} + +export interface JsmAddCustomerParams extends JsmBaseParams { + serviceDeskId: string + accountIds?: string + emails?: string +} + +export interface JsmGetOrganizationsParams extends JsmBaseParams { + serviceDeskId: string + start?: number + limit?: number +} + +export interface JsmGetQueuesParams extends JsmBaseParams { + serviceDeskId: string + includeCount?: boolean + start?: number + limit?: number +} + +export interface JsmGetSlaParams extends JsmBaseParams { + issueIdOrKey: string + start?: number + limit?: number +} + +export interface JsmTransitionRequestParams extends JsmBaseParams { + issueIdOrKey: string + transitionId: string + comment?: string +} + +export interface JsmGetTransitionsParams extends JsmBaseParams { + issueIdOrKey: string + start?: number + limit?: number +} + +export interface JsmCreateOrganizationParams extends JsmBaseParams { + name: string +} + +export interface JsmAddOrganizationParams extends JsmBaseParams { + serviceDeskId: string + organizationId: string +} + +export interface JsmGetParticipantsParams extends JsmBaseParams { + issueIdOrKey: string + start?: number + limit?: number +} + +export interface JsmAddParticipantsParams extends JsmBaseParams { + issueIdOrKey: string + accountIds: string +} + +export interface JsmGetApprovalsParams extends JsmBaseParams { + issueIdOrKey: string + start?: number + limit?: number +} + +export interface JsmAnswerApprovalParams extends JsmBaseParams { + issueIdOrKey: string + approvalId: string + decision: 'approve' | 'decline' +} + +export interface JsmGetRequestTypeFieldsParams extends JsmBaseParams { + serviceDeskId: string + requestTypeId: string +} + +// --------------------------------------------------------------------------- +// Response interfaces +// --------------------------------------------------------------------------- + export interface JsmGetServiceDesksResponse extends ToolResponse { output: { ts: string @@ -129,12 +544,6 @@ export interface JsmGetServiceDesksResponse extends ToolResponse { } } -export interface JsmGetRequestTypesParams extends JsmBaseParams { - serviceDeskId: string - start?: number - limit?: number -} - export interface JsmGetRequestTypesResponse extends ToolResponse { output: { ts: string @@ -144,15 +553,6 @@ export interface JsmGetRequestTypesResponse extends ToolResponse { } } -export interface JsmCreateRequestParams extends JsmBaseParams { - serviceDeskId: string - requestTypeId: string - summary: string - description?: string - requestFieldValues?: Record - raiseOnBehalfOf?: string -} - export interface JsmCreateRequestResponse extends ToolResponse { output: { ts: string @@ -160,31 +560,43 @@ export interface JsmCreateRequestResponse extends ToolResponse { issueKey: string requestTypeId: string serviceDeskId: string + createdDate: { iso8601: string; friendly: string; epochMillis: number } | null + currentStatus: { + status: string + statusCategory: string + statusDate?: { iso8601: string; friendly: string } + } | null + reporter: { accountId: string; displayName: string; emailAddress?: string } | null success: boolean url: string } } -export interface JsmGetRequestParams extends JsmBaseParams { - issueIdOrKey: string -} - export interface JsmGetRequestResponse extends ToolResponse { output: { ts: string - request: JsmRequest + issueId: string + issueKey: string + requestTypeId: string + serviceDeskId: string + createdDate: { iso8601: string; friendly: string; epochMillis: number } | null + currentStatus: { + status: string + statusCategory: string + statusDate: { iso8601: string; friendly: string } + } | null + reporter: { + accountId: string + displayName: string + emailAddress?: string + active: boolean + } | null + requestFieldValues: Array<{ fieldId: string; label: string; value: unknown }> + url: string + request?: Record } } -export interface JsmGetRequestsParams extends JsmBaseParams { - serviceDeskId?: string - requestOwnership?: 'OWNED_REQUESTS' | 'PARTICIPATED_REQUESTS' | 'ORGANIZATION' | 'ALL_REQUESTS' - requestStatus?: 'OPEN' | 'CLOSED' | 'ALL' - searchTerm?: string - start?: number - limit?: number -} - export interface JsmGetRequestsResponse extends ToolResponse { output: { ts: string @@ -194,12 +606,6 @@ export interface JsmGetRequestsResponse extends ToolResponse { } } -export interface JsmAddCommentParams extends JsmBaseParams { - issueIdOrKey: string - body: string - isPublic: boolean -} - export interface JsmAddCommentResponse extends ToolResponse { output: { ts: string @@ -207,18 +613,12 @@ export interface JsmAddCommentResponse extends ToolResponse { commentId: string body: string isPublic: boolean + author: { accountId: string; displayName: string; emailAddress?: string } | null + createdDate: { iso8601: string; friendly: string } | null success: boolean } } -export interface JsmGetCommentsParams extends JsmBaseParams { - issueIdOrKey: string - isPublic?: boolean - internal?: boolean - start?: number - limit?: number -} - export interface JsmGetCommentsResponse extends ToolResponse { output: { ts: string @@ -229,13 +629,6 @@ export interface JsmGetCommentsResponse extends ToolResponse { } } -export interface JsmGetCustomersParams extends JsmBaseParams { - serviceDeskId: string - query?: string - start?: number - limit?: number -} - export interface JsmGetCustomersResponse extends ToolResponse { output: { ts: string @@ -245,11 +638,6 @@ export interface JsmGetCustomersResponse extends ToolResponse { } } -export interface JsmAddCustomerParams extends JsmBaseParams { - serviceDeskId: string - emails: string -} - export interface JsmAddCustomerResponse extends ToolResponse { output: { ts: string @@ -258,12 +646,6 @@ export interface JsmAddCustomerResponse extends ToolResponse { } } -export interface JsmGetOrganizationsParams extends JsmBaseParams { - serviceDeskId: string - start?: number - limit?: number -} - export interface JsmGetOrganizationsResponse extends ToolResponse { output: { ts: string @@ -273,13 +655,6 @@ export interface JsmGetOrganizationsResponse extends ToolResponse { } } -export interface JsmGetQueuesParams extends JsmBaseParams { - serviceDeskId: string - includeCount?: boolean - start?: number - limit?: number -} - export interface JsmGetQueuesResponse extends ToolResponse { output: { ts: string @@ -289,12 +664,6 @@ export interface JsmGetQueuesResponse extends ToolResponse { } } -export interface JsmGetSlaParams extends JsmBaseParams { - issueIdOrKey: string - start?: number - limit?: number -} - export interface JsmGetSlaResponse extends ToolResponse { output: { ts: string @@ -305,12 +674,6 @@ export interface JsmGetSlaResponse extends ToolResponse { } } -export interface JsmTransitionRequestParams extends JsmBaseParams { - issueIdOrKey: string - transitionId: string - comment?: string -} - export interface JsmTransitionRequestResponse extends ToolResponse { output: { ts: string @@ -320,22 +683,16 @@ export interface JsmTransitionRequestResponse extends ToolResponse { } } -export interface JsmGetTransitionsParams extends JsmBaseParams { - issueIdOrKey: string -} - export interface JsmGetTransitionsResponse extends ToolResponse { output: { ts: string issueIdOrKey: string transitions: JsmTransition[] + total: number + isLastPage: boolean } } -export interface JsmCreateOrganizationParams extends JsmBaseParams { - name: string -} - export interface JsmCreateOrganizationResponse extends ToolResponse { output: { ts: string @@ -345,11 +702,6 @@ export interface JsmCreateOrganizationResponse extends ToolResponse { } } -export interface JsmAddOrganizationParams extends JsmBaseParams { - serviceDeskId: string - organizationId: string -} - export interface JsmAddOrganizationResponse extends ToolResponse { output: { ts: string @@ -359,19 +711,6 @@ export interface JsmAddOrganizationResponse extends ToolResponse { } } -export interface JsmParticipant { - accountId: string - displayName: string - emailAddress?: string - active: boolean -} - -export interface JsmGetParticipantsParams extends JsmBaseParams { - issueIdOrKey: string - start?: number - limit?: number -} - export interface JsmGetParticipantsResponse extends ToolResponse { output: { ts: string @@ -382,11 +721,6 @@ export interface JsmGetParticipantsResponse extends ToolResponse { } } -export interface JsmAddParticipantsParams extends JsmBaseParams { - issueIdOrKey: string - accountIds: string -} - export interface JsmAddParticipantsResponse extends ToolResponse { output: { ts: string @@ -396,29 +730,6 @@ export interface JsmAddParticipantsResponse extends ToolResponse { } } -export interface JsmApprover { - accountId: string - displayName: string - emailAddress?: string - approverDecision: 'pending' | 'approved' | 'declined' -} - -export interface JsmApproval { - id: string - name: string - finalDecision: 'pending' | 'approved' | 'declined' - canAnswerApproval: boolean - approvers: JsmApprover[] - createdDate?: { iso8601: string; friendly: string } - completedDate?: { iso8601: string; friendly: string } -} - -export interface JsmGetApprovalsParams extends JsmBaseParams { - issueIdOrKey: string - start?: number - limit?: number -} - export interface JsmGetApprovalsResponse extends ToolResponse { output: { ts: string @@ -429,22 +740,47 @@ export interface JsmGetApprovalsResponse extends ToolResponse { } } -export interface JsmAnswerApprovalParams extends JsmBaseParams { - issueIdOrKey: string - approvalId: string - decision: 'approve' | 'decline' -} - export interface JsmAnswerApprovalResponse extends ToolResponse { output: { ts: string issueIdOrKey: string approvalId: string decision: string + id: string | null + name: string | null + finalDecision: string | null + canAnswerApproval: boolean | null + approvers: Array<{ + approver: { + accountId: string + displayName: string + emailAddress?: string + active?: boolean + } + approverDecision: string + }> | null + createdDate: { iso8601: string; friendly: string; epochMillis: number } | null + completedDate: { iso8601: string; friendly: string; epochMillis: number } | null + approval?: Record success: boolean } } +export interface JsmGetRequestTypeFieldsResponse extends ToolResponse { + output: { + ts: string + serviceDeskId: string + requestTypeId: string + canAddRequestParticipants: boolean + canRaiseOnBehalfOf: boolean + requestTypeFields: JsmRequestTypeField[] + } +} + +// --------------------------------------------------------------------------- +// Union type for all JSM responses +// --------------------------------------------------------------------------- + /** Union type for all JSM responses */ export type JsmResponse = | JsmGetServiceDesksResponse @@ -467,3 +803,4 @@ export type JsmResponse = | JsmAddParticipantsResponse | JsmGetApprovalsResponse | JsmAnswerApprovalResponse + | JsmGetRequestTypeFieldsResponse diff --git a/apps/sim/tools/onepassword/create_item.ts b/apps/sim/tools/onepassword/create_item.ts new file mode 100644 index 000000000..5f9b70a07 --- /dev/null +++ b/apps/sim/tools/onepassword/create_item.ts @@ -0,0 +1,104 @@ +import type { + OnePasswordCreateItemParams, + OnePasswordCreateItemResponse, +} from '@/tools/onepassword/types' +import { FULL_ITEM_OUTPUTS, transformFullItem } from '@/tools/onepassword/utils' +import type { ToolConfig } from '@/tools/types' + +export const createItemTool: ToolConfig< + OnePasswordCreateItemParams, + OnePasswordCreateItemResponse +> = { + id: 'onepassword_create_item', + name: '1Password Create Item', + description: 'Create a new item in a vault', + version: '1.0.0', + + params: { + connectionMode: { + type: 'string', + required: false, + description: 'Connection mode: "service_account" or "connect"', + }, + serviceAccountToken: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Service Account token (for Service Account mode)', + }, + apiKey: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Connect API token (for Connect Server mode)', + }, + serverUrl: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Connect server URL (for Connect Server mode)', + }, + vaultId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The vault UUID to create the item in', + }, + category: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: + 'Item category (e.g., LOGIN, PASSWORD, API_CREDENTIAL, SECURE_NOTE, SERVER, DATABASE)', + }, + title: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Item title', + }, + tags: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Comma-separated list of tags', + }, + fields: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: + 'JSON array of field objects (e.g., [{"label":"username","value":"admin","type":"STRING","purpose":"USERNAME"}])', + }, + }, + + request: { + url: '/api/tools/onepassword/create-item', + method: 'POST', + headers: () => ({ 'Content-Type': 'application/json' }), + body: (params) => ({ + connectionMode: params.connectionMode, + serviceAccountToken: params.serviceAccountToken, + serverUrl: params.serverUrl, + apiKey: params.apiKey, + vaultId: params.vaultId, + category: params.category, + title: params.title, + tags: params.tags, + fields: params.fields, + }), + }, + + transformResponse: async (response) => { + const data = await response.json() + if (data.error) { + return { success: false, output: transformFullItem({}), error: data.error } + } + return { + success: true, + output: transformFullItem(data), + } + }, + + outputs: FULL_ITEM_OUTPUTS, +} diff --git a/apps/sim/tools/onepassword/delete_item.ts b/apps/sim/tools/onepassword/delete_item.ts new file mode 100644 index 000000000..08990a19a --- /dev/null +++ b/apps/sim/tools/onepassword/delete_item.ts @@ -0,0 +1,84 @@ +import type { + OnePasswordDeleteItemParams, + OnePasswordDeleteItemResponse, +} from '@/tools/onepassword/types' +import type { ToolConfig } from '@/tools/types' + +export const deleteItemTool: ToolConfig< + OnePasswordDeleteItemParams, + OnePasswordDeleteItemResponse +> = { + id: 'onepassword_delete_item', + name: '1Password Delete Item', + description: 'Delete an item from a vault', + version: '1.0.0', + + params: { + connectionMode: { + type: 'string', + required: false, + description: 'Connection mode: "service_account" or "connect"', + }, + serviceAccountToken: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Service Account token (for Service Account mode)', + }, + apiKey: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Connect API token (for Connect Server mode)', + }, + serverUrl: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Connect server URL (for Connect Server mode)', + }, + vaultId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The vault UUID', + }, + itemId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The item UUID to delete', + }, + }, + + request: { + url: '/api/tools/onepassword/delete-item', + method: 'POST', + headers: () => ({ 'Content-Type': 'application/json' }), + body: (params) => ({ + connectionMode: params.connectionMode, + serviceAccountToken: params.serviceAccountToken, + serverUrl: params.serverUrl, + apiKey: params.apiKey, + vaultId: params.vaultId, + itemId: params.itemId, + }), + }, + + transformResponse: async (response) => { + const data = await response.json() + if (data.error) { + return { success: false, output: { success: false }, error: data.error } + } + return { + success: true, + output: { + success: true, + }, + } + }, + + outputs: { + success: { type: 'boolean', description: 'Whether the item was successfully deleted' }, + }, +} diff --git a/apps/sim/tools/onepassword/get_item.ts b/apps/sim/tools/onepassword/get_item.ts new file mode 100644 index 000000000..8049d7260 --- /dev/null +++ b/apps/sim/tools/onepassword/get_item.ts @@ -0,0 +1,78 @@ +import type { + OnePasswordGetItemParams, + OnePasswordGetItemResponse, +} from '@/tools/onepassword/types' +import { FULL_ITEM_OUTPUTS, transformFullItem } from '@/tools/onepassword/utils' +import type { ToolConfig } from '@/tools/types' + +export const getItemTool: ToolConfig = { + id: 'onepassword_get_item', + name: '1Password Get Item', + description: 'Get full details of an item including all fields and secrets', + version: '1.0.0', + + params: { + connectionMode: { + type: 'string', + required: false, + description: 'Connection mode: "service_account" or "connect"', + }, + serviceAccountToken: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Service Account token (for Service Account mode)', + }, + apiKey: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Connect API token (for Connect Server mode)', + }, + serverUrl: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Connect server URL (for Connect Server mode)', + }, + vaultId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The vault UUID', + }, + itemId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The item UUID to retrieve', + }, + }, + + request: { + url: '/api/tools/onepassword/get-item', + method: 'POST', + headers: () => ({ 'Content-Type': 'application/json' }), + body: (params) => ({ + connectionMode: params.connectionMode, + serviceAccountToken: params.serviceAccountToken, + serverUrl: params.serverUrl, + apiKey: params.apiKey, + vaultId: params.vaultId, + itemId: params.itemId, + }), + }, + + transformResponse: async (response) => { + const data = await response.json() + if (data.error) { + return { success: false, output: transformFullItem({}), error: data.error } + } + return { + success: true, + output: transformFullItem(data), + } + }, + + outputs: FULL_ITEM_OUTPUTS, +} diff --git a/apps/sim/tools/onepassword/get_vault.ts b/apps/sim/tools/onepassword/get_vault.ts new file mode 100644 index 000000000..cf2b63a44 --- /dev/null +++ b/apps/sim/tools/onepassword/get_vault.ts @@ -0,0 +1,107 @@ +import type { + OnePasswordGetVaultParams, + OnePasswordGetVaultResponse, +} from '@/tools/onepassword/types' +import type { ToolConfig } from '@/tools/types' + +export const getVaultTool: ToolConfig = { + id: 'onepassword_get_vault', + name: '1Password Get Vault', + description: 'Get details of a specific vault by ID', + version: '1.0.0', + + params: { + connectionMode: { + type: 'string', + required: false, + description: 'Connection mode: "service_account" or "connect"', + }, + serviceAccountToken: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Service Account token (for Service Account mode)', + }, + apiKey: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Connect API token (for Connect Server mode)', + }, + serverUrl: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Connect server URL (for Connect Server mode)', + }, + vaultId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The vault UUID', + }, + }, + + request: { + url: '/api/tools/onepassword/get-vault', + method: 'POST', + headers: () => ({ 'Content-Type': 'application/json' }), + body: (params) => ({ + connectionMode: params.connectionMode, + serviceAccountToken: params.serviceAccountToken, + serverUrl: params.serverUrl, + apiKey: params.apiKey, + vaultId: params.vaultId, + }), + }, + + transformResponse: async (response) => { + const data = await response.json() + if (data.error) { + return { + success: false, + output: { + id: '', + name: '', + description: null, + attributeVersion: 0, + contentVersion: 0, + items: 0, + type: '', + createdAt: null, + updatedAt: null, + }, + error: data.error, + } + } + return { + success: true, + output: { + id: data.id ?? null, + name: data.name ?? null, + description: data.description ?? null, + attributeVersion: data.attributeVersion ?? 0, + contentVersion: data.contentVersion ?? 0, + items: data.items ?? 0, + type: data.type ?? null, + createdAt: data.createdAt ?? null, + updatedAt: data.updatedAt ?? null, + }, + } + }, + + outputs: { + id: { type: 'string', description: 'Vault ID' }, + name: { type: 'string', description: 'Vault name' }, + description: { type: 'string', description: 'Vault description', optional: true }, + attributeVersion: { type: 'number', description: 'Vault attribute version' }, + contentVersion: { type: 'number', description: 'Vault content version' }, + items: { type: 'number', description: 'Number of items in the vault' }, + type: { + type: 'string', + description: 'Vault type (USER_CREATED, PERSONAL, EVERYONE, TRANSFER)', + }, + createdAt: { type: 'string', description: 'Creation timestamp', optional: true }, + updatedAt: { type: 'string', description: 'Last update timestamp', optional: true }, + }, +} diff --git a/apps/sim/tools/onepassword/index.ts b/apps/sim/tools/onepassword/index.ts new file mode 100644 index 000000000..f51526b06 --- /dev/null +++ b/apps/sim/tools/onepassword/index.ts @@ -0,0 +1,19 @@ +import { createItemTool } from '@/tools/onepassword/create_item' +import { deleteItemTool } from '@/tools/onepassword/delete_item' +import { getItemTool } from '@/tools/onepassword/get_item' +import { getVaultTool } from '@/tools/onepassword/get_vault' +import { listItemsTool } from '@/tools/onepassword/list_items' +import { listVaultsTool } from '@/tools/onepassword/list_vaults' +import { replaceItemTool } from '@/tools/onepassword/replace_item' +import { resolveSecretTool } from '@/tools/onepassword/resolve_secret' +import { updateItemTool } from '@/tools/onepassword/update_item' + +export const onepasswordCreateItemTool = createItemTool +export const onepasswordDeleteItemTool = deleteItemTool +export const onepasswordGetItemTool = getItemTool +export const onepasswordGetVaultTool = getVaultTool +export const onepasswordListItemsTool = listItemsTool +export const onepasswordListVaultsTool = listVaultsTool +export const onepasswordReplaceItemTool = replaceItemTool +export const onepasswordResolveSecretTool = resolveSecretTool +export const onepasswordUpdateItemTool = updateItemTool diff --git a/apps/sim/tools/onepassword/list_items.ts b/apps/sim/tools/onepassword/list_items.ts new file mode 100644 index 000000000..4bcad6e65 --- /dev/null +++ b/apps/sim/tools/onepassword/list_items.ts @@ -0,0 +1,141 @@ +import type { + OnePasswordListItemsParams, + OnePasswordListItemsResponse, +} from '@/tools/onepassword/types' +import type { ToolConfig } from '@/tools/types' + +export const listItemsTool: ToolConfig = { + id: 'onepassword_list_items', + name: '1Password List Items', + description: 'List items in a vault. Returns summaries without field values.', + version: '1.0.0', + + params: { + connectionMode: { + type: 'string', + required: false, + description: 'Connection mode: "service_account" or "connect"', + }, + serviceAccountToken: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Service Account token (for Service Account mode)', + }, + apiKey: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Connect API token (for Connect Server mode)', + }, + serverUrl: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Connect server URL (for Connect Server mode)', + }, + vaultId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The vault UUID to list items from', + }, + filter: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'SCIM filter expression (e.g., title eq "API Key" or tag eq "production")', + }, + }, + + request: { + url: '/api/tools/onepassword/list-items', + method: 'POST', + headers: () => ({ 'Content-Type': 'application/json' }), + body: (params) => ({ + connectionMode: params.connectionMode, + serviceAccountToken: params.serviceAccountToken, + serverUrl: params.serverUrl, + apiKey: params.apiKey, + vaultId: params.vaultId, + filter: params.filter, + }), + }, + + transformResponse: async (response) => { + const data = await response.json() + if (data.error) { + return { success: false, output: { items: [] }, error: data.error } + } + const items = Array.isArray(data) ? data : [data] + return { + success: true, + output: { + items: items.map((item: any) => ({ + id: item.id ?? null, + title: item.title ?? null, + vault: item.vault ?? null, + category: item.category ?? null, + urls: (item.urls ?? []).map((url: any) => ({ + href: url.href ?? null, + label: url.label ?? null, + primary: url.primary ?? false, + })), + favorite: item.favorite ?? false, + tags: item.tags ?? [], + version: item.version ?? 0, + state: item.state ?? null, + createdAt: item.createdAt ?? null, + updatedAt: item.updatedAt ?? null, + lastEditedBy: item.lastEditedBy ?? null, + })), + }, + } + }, + + outputs: { + items: { + type: 'array', + description: 'List of items in the vault (summaries without field values)', + items: { + type: 'object', + properties: { + id: { type: 'string', description: 'Item ID' }, + title: { type: 'string', description: 'Item title' }, + vault: { + type: 'object', + description: 'Vault reference', + properties: { + id: { type: 'string', description: 'Vault ID' }, + }, + }, + category: { type: 'string', description: 'Item category (e.g., LOGIN, API_CREDENTIAL)' }, + urls: { + type: 'array', + description: 'URLs associated with the item', + optional: true, + items: { + type: 'object', + properties: { + href: { type: 'string', description: 'URL' }, + label: { type: 'string', description: 'URL label', optional: true }, + primary: { type: 'boolean', description: 'Whether this is the primary URL' }, + }, + }, + }, + favorite: { type: 'boolean', description: 'Whether the item is favorited' }, + tags: { type: 'array', description: 'Item tags' }, + version: { type: 'number', description: 'Item version number' }, + state: { + type: 'string', + description: 'Item state (ARCHIVED or DELETED)', + optional: true, + }, + createdAt: { type: 'string', description: 'Creation timestamp', optional: true }, + updatedAt: { type: 'string', description: 'Last update timestamp', optional: true }, + lastEditedBy: { type: 'string', description: 'ID of the last editor', optional: true }, + }, + }, + }, + }, +} diff --git a/apps/sim/tools/onepassword/list_vaults.ts b/apps/sim/tools/onepassword/list_vaults.ts new file mode 100644 index 000000000..64af64ec7 --- /dev/null +++ b/apps/sim/tools/onepassword/list_vaults.ts @@ -0,0 +1,108 @@ +import type { + OnePasswordListVaultsParams, + OnePasswordListVaultsResponse, +} from '@/tools/onepassword/types' +import type { ToolConfig } from '@/tools/types' + +export const listVaultsTool: ToolConfig< + OnePasswordListVaultsParams, + OnePasswordListVaultsResponse +> = { + id: 'onepassword_list_vaults', + name: '1Password List Vaults', + description: 'List all vaults accessible by the Connect token or Service Account', + version: '1.0.0', + + params: { + connectionMode: { + type: 'string', + required: false, + description: 'Connection mode: "service_account" or "connect"', + }, + serviceAccountToken: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Service Account token (for Service Account mode)', + }, + apiKey: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Connect API token (for Connect Server mode)', + }, + serverUrl: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Connect server URL (for Connect Server mode)', + }, + filter: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'SCIM filter expression (e.g., name eq "My Vault")', + }, + }, + + request: { + url: '/api/tools/onepassword/list-vaults', + method: 'POST', + headers: () => ({ 'Content-Type': 'application/json' }), + body: (params) => ({ + connectionMode: params.connectionMode, + serviceAccountToken: params.serviceAccountToken, + serverUrl: params.serverUrl, + apiKey: params.apiKey, + filter: params.filter, + }), + }, + + transformResponse: async (response) => { + const data = await response.json() + if (data.error) { + return { success: false, output: { vaults: [] }, error: data.error } + } + const vaults = Array.isArray(data) ? data : [data] + return { + success: true, + output: { + vaults: vaults.map((vault: any) => ({ + id: vault.id ?? null, + name: vault.name ?? null, + description: vault.description ?? null, + attributeVersion: vault.attributeVersion ?? 0, + contentVersion: vault.contentVersion ?? 0, + items: vault.items ?? 0, + type: vault.type ?? null, + createdAt: vault.createdAt ?? null, + updatedAt: vault.updatedAt ?? null, + })), + }, + } + }, + + outputs: { + vaults: { + type: 'array', + description: 'List of accessible vaults', + items: { + type: 'object', + properties: { + id: { type: 'string', description: 'Vault ID' }, + name: { type: 'string', description: 'Vault name' }, + description: { type: 'string', description: 'Vault description', optional: true }, + attributeVersion: { type: 'number', description: 'Vault attribute version' }, + contentVersion: { type: 'number', description: 'Vault content version' }, + items: { type: 'number', description: 'Number of items in the vault' }, + type: { + type: 'string', + description: 'Vault type (USER_CREATED, PERSONAL, EVERYONE, TRANSFER)', + }, + createdAt: { type: 'string', description: 'Creation timestamp', optional: true }, + updatedAt: { type: 'string', description: 'Last update timestamp', optional: true }, + }, + }, + }, + }, +} diff --git a/apps/sim/tools/onepassword/replace_item.ts b/apps/sim/tools/onepassword/replace_item.ts new file mode 100644 index 000000000..4d8506fb9 --- /dev/null +++ b/apps/sim/tools/onepassword/replace_item.ts @@ -0,0 +1,89 @@ +import type { + OnePasswordReplaceItemParams, + OnePasswordReplaceItemResponse, +} from '@/tools/onepassword/types' +import { FULL_ITEM_OUTPUTS, transformFullItem } from '@/tools/onepassword/utils' +import type { ToolConfig } from '@/tools/types' + +export const replaceItemTool: ToolConfig< + OnePasswordReplaceItemParams, + OnePasswordReplaceItemResponse +> = { + id: 'onepassword_replace_item', + name: '1Password Replace Item', + description: 'Replace an entire item with new data (full update)', + version: '1.0.0', + + params: { + connectionMode: { + type: 'string', + required: false, + description: 'Connection mode: "service_account" or "connect"', + }, + serviceAccountToken: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Service Account token (for Service Account mode)', + }, + apiKey: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Connect API token (for Connect Server mode)', + }, + serverUrl: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Connect server URL (for Connect Server mode)', + }, + vaultId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The vault UUID', + }, + itemId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The item UUID to replace', + }, + item: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: + 'JSON object representing the full item (e.g., {"vault":{"id":"..."},"category":"LOGIN","title":"My Item","fields":[...]})', + }, + }, + + request: { + url: '/api/tools/onepassword/replace-item', + method: 'POST', + headers: () => ({ 'Content-Type': 'application/json' }), + body: (params) => ({ + connectionMode: params.connectionMode, + serviceAccountToken: params.serviceAccountToken, + serverUrl: params.serverUrl, + apiKey: params.apiKey, + vaultId: params.vaultId, + itemId: params.itemId, + item: params.item, + }), + }, + + transformResponse: async (response) => { + const data = await response.json() + if (data.error) { + return { success: false, output: transformFullItem({}), error: data.error } + } + return { + success: true, + output: transformFullItem(data), + } + }, + + outputs: FULL_ITEM_OUTPUTS, +} diff --git a/apps/sim/tools/onepassword/resolve_secret.ts b/apps/sim/tools/onepassword/resolve_secret.ts new file mode 100644 index 000000000..7b2820e07 --- /dev/null +++ b/apps/sim/tools/onepassword/resolve_secret.ts @@ -0,0 +1,67 @@ +import type { + OnePasswordResolveSecretParams, + OnePasswordResolveSecretResponse, +} from '@/tools/onepassword/types' +import type { ToolConfig } from '@/tools/types' + +export const resolveSecretTool: ToolConfig< + OnePasswordResolveSecretParams, + OnePasswordResolveSecretResponse +> = { + id: 'onepassword_resolve_secret', + name: '1Password Resolve Secret', + description: + 'Resolve a secret reference (op://vault/item/field) to its value. Service Account mode only.', + version: '1.0.0', + + params: { + connectionMode: { + type: 'string', + required: false, + description: 'Connection mode: must be "service_account" for this operation', + }, + serviceAccountToken: { + type: 'string', + required: true, + visibility: 'user-only', + description: '1Password Service Account token', + }, + secretReference: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: + 'Secret reference URI (e.g., op://vault-name/item-name/field-name or op://vault-name/item-name/section-name/field-name)', + }, + }, + + request: { + url: '/api/tools/onepassword/resolve-secret', + method: 'POST', + headers: () => ({ 'Content-Type': 'application/json' }), + body: (params) => ({ + connectionMode: params.connectionMode ?? 'service_account', + serviceAccountToken: params.serviceAccountToken, + secretReference: params.secretReference, + }), + }, + + transformResponse: async (response) => { + const data = await response.json() + if (data.error) { + return { success: false, output: { value: '', reference: '' }, error: data.error } + } + return { + success: true, + output: { + value: data.value ?? '', + reference: data.reference ?? '', + }, + } + }, + + outputs: { + value: { type: 'string', description: 'The resolved secret value' }, + reference: { type: 'string', description: 'The original secret reference URI' }, + }, +} diff --git a/apps/sim/tools/onepassword/types.ts b/apps/sim/tools/onepassword/types.ts new file mode 100644 index 000000000..688a43953 --- /dev/null +++ b/apps/sim/tools/onepassword/types.ts @@ -0,0 +1,159 @@ +import type { ToolResponse } from '@/tools/types' + +/** Base params shared by all 1Password tools (credential fields). */ +export interface OnePasswordBaseParams { + connectionMode?: 'service_account' | 'connect' + serviceAccountToken?: string + apiKey?: string + serverUrl?: string +} + +export interface OnePasswordListVaultsParams extends OnePasswordBaseParams { + filter?: string +} + +export interface OnePasswordGetVaultParams extends OnePasswordBaseParams { + vaultId: string +} + +export interface OnePasswordListItemsParams extends OnePasswordBaseParams { + vaultId: string + filter?: string +} + +export interface OnePasswordGetItemParams extends OnePasswordBaseParams { + vaultId: string + itemId: string +} + +export interface OnePasswordCreateItemParams extends OnePasswordBaseParams { + vaultId: string + category: string + title?: string + tags?: string + fields?: string +} + +export interface OnePasswordUpdateItemParams extends OnePasswordBaseParams { + vaultId: string + itemId: string + operations: string +} + +export interface OnePasswordReplaceItemParams extends OnePasswordBaseParams { + vaultId: string + itemId: string + item: string +} + +export interface OnePasswordDeleteItemParams extends OnePasswordBaseParams { + vaultId: string + itemId: string +} + +export interface OnePasswordResolveSecretParams extends OnePasswordBaseParams { + secretReference: string +} + +export interface OnePasswordListVaultsResponse extends ToolResponse { + output: { + vaults: Array<{ + id: string + name: string + description: string | null + attributeVersion: number + contentVersion: number + items: number + type: string + createdAt: string | null + updatedAt: string | null + }> + } +} + +export interface OnePasswordGetVaultResponse extends ToolResponse { + output: { + id: string + name: string + description: string | null + attributeVersion: number + contentVersion: number + items: number + type: string + createdAt: string | null + updatedAt: string | null + } +} + +export interface OnePasswordListItemsResponse extends ToolResponse { + output: { + items: Array<{ + id: string + title: string + vault: { id: string } + category: string + urls: Array<{ href: string; label: string | null; primary: boolean }> + favorite: boolean + tags: string[] + version: number + state: string | null + createdAt: string | null + updatedAt: string | null + lastEditedBy: string | null + }> + } +} + +export interface OnePasswordFullItemResponse extends ToolResponse { + output: { + id: string + title: string + vault: { id: string } + category: string + urls: Array<{ href: string; label: string | null; primary: boolean }> + favorite: boolean + tags: string[] + version: number + state: string | null + fields: Array<{ + id: string + label: string | null + type: string + purpose: string + value: string | null + section: { id: string } | null + generate: boolean + recipe: { + length: number | null + characterSets: string[] + excludeCharacters: string | null + } | null + entropy: number | null + }> + sections: Array<{ + id: string + label: string | null + }> + createdAt: string | null + updatedAt: string | null + lastEditedBy: string | null + } +} + +export type OnePasswordGetItemResponse = OnePasswordFullItemResponse +export type OnePasswordCreateItemResponse = OnePasswordFullItemResponse +export type OnePasswordUpdateItemResponse = OnePasswordFullItemResponse +export type OnePasswordReplaceItemResponse = OnePasswordFullItemResponse + +export interface OnePasswordDeleteItemResponse extends ToolResponse { + output: { + success: boolean + } +} + +export interface OnePasswordResolveSecretResponse extends ToolResponse { + output: { + value: string + reference: string + } +} diff --git a/apps/sim/tools/onepassword/update_item.ts b/apps/sim/tools/onepassword/update_item.ts new file mode 100644 index 000000000..af178dc87 --- /dev/null +++ b/apps/sim/tools/onepassword/update_item.ts @@ -0,0 +1,89 @@ +import type { + OnePasswordUpdateItemParams, + OnePasswordUpdateItemResponse, +} from '@/tools/onepassword/types' +import { FULL_ITEM_OUTPUTS, transformFullItem } from '@/tools/onepassword/utils' +import type { ToolConfig } from '@/tools/types' + +export const updateItemTool: ToolConfig< + OnePasswordUpdateItemParams, + OnePasswordUpdateItemResponse +> = { + id: 'onepassword_update_item', + name: '1Password Update Item', + description: 'Update an existing item using JSON Patch operations (RFC6902)', + version: '1.0.0', + + params: { + connectionMode: { + type: 'string', + required: false, + description: 'Connection mode: "service_account" or "connect"', + }, + serviceAccountToken: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Service Account token (for Service Account mode)', + }, + apiKey: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Connect API token (for Connect Server mode)', + }, + serverUrl: { + type: 'string', + required: false, + visibility: 'user-only', + description: '1Password Connect server URL (for Connect Server mode)', + }, + vaultId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The vault UUID', + }, + itemId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'The item UUID to update', + }, + operations: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: + 'JSON array of RFC6902 patch operations (e.g., [{"op":"replace","path":"/title","value":"New Title"}])', + }, + }, + + request: { + url: '/api/tools/onepassword/update-item', + method: 'POST', + headers: () => ({ 'Content-Type': 'application/json' }), + body: (params) => ({ + connectionMode: params.connectionMode, + serviceAccountToken: params.serviceAccountToken, + serverUrl: params.serverUrl, + apiKey: params.apiKey, + vaultId: params.vaultId, + itemId: params.itemId, + operations: params.operations, + }), + }, + + transformResponse: async (response) => { + const data = await response.json() + if (data.error) { + return { success: false, output: transformFullItem({}), error: data.error } + } + return { + success: true, + output: transformFullItem(data), + } + }, + + outputs: FULL_ITEM_OUTPUTS, +} diff --git a/apps/sim/tools/onepassword/utils.ts b/apps/sim/tools/onepassword/utils.ts new file mode 100644 index 000000000..d1abbf963 --- /dev/null +++ b/apps/sim/tools/onepassword/utils.ts @@ -0,0 +1,148 @@ +import type { OutputType } from '@/tools/types' + +/** Transforms a raw FullItem API response into our standardized output. */ +export function transformFullItem(data: any) { + return { + id: data.id ?? null, + title: data.title ?? null, + vault: data.vault ?? null, + category: data.category ?? null, + urls: (data.urls ?? []).map((url: any) => ({ + href: url.href ?? null, + label: url.label ?? null, + primary: url.primary ?? false, + })), + favorite: data.favorite ?? false, + tags: data.tags ?? [], + version: data.version ?? 0, + state: data.state ?? null, + fields: (data.fields ?? []).map((field: any) => ({ + id: field.id ?? null, + label: field.label ?? null, + type: field.type ?? 'STRING', + purpose: field.purpose ?? '', + value: field.value ?? null, + section: field.section ?? null, + generate: field.generate ?? false, + recipe: field.recipe + ? { + length: field.recipe.length ?? null, + characterSets: field.recipe.characterSets ?? [], + excludeCharacters: field.recipe.excludeCharacters ?? null, + } + : null, + entropy: field.entropy ?? null, + })), + sections: (data.sections ?? []).map((section: any) => ({ + id: section.id ?? null, + label: section.label ?? null, + })), + createdAt: data.createdAt ?? null, + updatedAt: data.updatedAt ?? null, + lastEditedBy: data.lastEditedBy ?? null, + } +} + +/** Shared output schema for FullItem responses (get_item, create_item, update_item). */ +export const FULL_ITEM_OUTPUTS: Record< + string, + { + type: OutputType + description: string + optional?: boolean + properties?: Record + items?: { type: OutputType; description?: string; properties?: Record } + } +> = { + id: { type: 'string', description: 'Item ID' }, + title: { type: 'string', description: 'Item title' }, + vault: { + type: 'object', + description: 'Vault reference', + properties: { + id: { type: 'string', description: 'Vault ID' }, + }, + }, + category: { + type: 'string', + description: 'Item category (e.g., LOGIN, API_CREDENTIAL, SECURE_NOTE)', + }, + urls: { + type: 'array', + description: 'URLs associated with the item', + optional: true, + items: { + type: 'object', + properties: { + href: { type: 'string', description: 'URL' }, + label: { type: 'string', description: 'URL label', optional: true }, + primary: { type: 'boolean', description: 'Whether this is the primary URL' }, + }, + }, + }, + favorite: { type: 'boolean', description: 'Whether the item is favorited' }, + tags: { type: 'array', description: 'Item tags' }, + version: { type: 'number', description: 'Item version number' }, + state: { type: 'string', description: 'Item state (ARCHIVED or DELETED)', optional: true }, + fields: { + type: 'array', + description: 'Item fields including secrets', + items: { + type: 'object', + properties: { + id: { type: 'string', description: 'Field ID' }, + label: { type: 'string', description: 'Field label', optional: true }, + type: { + type: 'string', + description: 'Field type (STRING, EMAIL, CONCEALED, URL, TOTP, DATE, MONTH_YEAR, MENU)', + }, + purpose: { + type: 'string', + description: 'Field purpose (USERNAME, PASSWORD, NOTES, or empty)', + }, + value: { type: 'string', description: 'Field value', optional: true }, + section: { + type: 'object', + description: 'Section reference this field belongs to', + optional: true, + properties: { + id: { type: 'string', description: 'Section ID' }, + }, + }, + generate: { type: 'boolean', description: 'Whether the field value should be generated' }, + recipe: { + type: 'object', + description: 'Password generation recipe', + optional: true, + properties: { + length: { type: 'number', description: 'Generated password length', optional: true }, + characterSets: { + type: 'array', + description: 'Character sets (LETTERS, DIGITS, SYMBOLS)', + }, + excludeCharacters: { + type: 'string', + description: 'Characters to exclude', + optional: true, + }, + }, + }, + entropy: { type: 'number', description: 'Password entropy score', optional: true }, + }, + }, + }, + sections: { + type: 'array', + description: 'Item sections', + items: { + type: 'object', + properties: { + id: { type: 'string', description: 'Section ID' }, + label: { type: 'string', description: 'Section label', optional: true }, + }, + }, + }, + createdAt: { type: 'string', description: 'Creation timestamp', optional: true }, + updatedAt: { type: 'string', description: 'Last update timestamp', optional: true }, + lastEditedBy: { type: 'string', description: 'ID of the last editor', optional: true }, +} diff --git a/apps/sim/tools/params.ts b/apps/sim/tools/params.ts index 9ac5a9788..e1bb8fe7b 100644 --- a/apps/sim/tools/params.ts +++ b/apps/sim/tools/params.ts @@ -401,6 +401,7 @@ export function createUserToolSchema(toolConfig: ToolConfig): ToolSchema { } for (const [paramId, param] of Object.entries(toolConfig.params)) { + if (!param) continue const visibility = param.visibility ?? 'user-or-llm' if (visibility === 'hidden') { continue diff --git a/apps/sim/tools/registry.ts b/apps/sim/tools/registry.ts index b6941d4ae..7411c53c5 100644 --- a/apps/sim/tools/registry.ts +++ b/apps/sim/tools/registry.ts @@ -849,6 +849,7 @@ import { jsmGetQueuesTool, jsmGetRequestsTool, jsmGetRequestTool, + jsmGetRequestTypeFieldsTool, jsmGetRequestTypesTool, jsmGetServiceDesksTool, jsmGetSlaTool, @@ -1158,6 +1159,17 @@ import { onedriveListTool, onedriveUploadTool, } from '@/tools/onedrive' +import { + onepasswordCreateItemTool, + onepasswordDeleteItemTool, + onepasswordGetItemTool, + onepasswordGetVaultTool, + onepasswordListItemsTool, + onepasswordListVaultsTool, + onepasswordReplaceItemTool, + onepasswordResolveSecretTool, + onepasswordUpdateItemTool, +} from '@/tools/onepassword' import { openAIEmbeddingsTool, openAIImageTool } from '@/tools/openai' import { outlookCopyTool, @@ -1945,6 +1957,7 @@ export const tools: Record = { jira_get_users: jiraGetUsersTool, jsm_get_service_desks: jsmGetServiceDesksTool, jsm_get_request_types: jsmGetRequestTypesTool, + jsm_get_request_type_fields: jsmGetRequestTypeFieldsTool, jsm_create_request: jsmCreateRequestTool, jsm_get_request: jsmGetRequestTool, jsm_get_requests: jsmGetRequestsTool, @@ -2133,6 +2146,15 @@ export const tools: Record = { notion_create_database_v2: notionCreateDatabaseV2Tool, notion_update_page_v2: notionUpdatePageV2Tool, notion_add_database_row_v2: notionAddDatabaseRowTool, + onepassword_list_vaults: onepasswordListVaultsTool, + onepassword_get_vault: onepasswordGetVaultTool, + onepassword_list_items: onepasswordListItemsTool, + onepassword_get_item: onepasswordGetItemTool, + onepassword_create_item: onepasswordCreateItemTool, + onepassword_replace_item: onepasswordReplaceItemTool, + onepassword_update_item: onepasswordUpdateItemTool, + onepassword_delete_item: onepasswordDeleteItemTool, + onepassword_resolve_secret: onepasswordResolveSecretTool, gmail_send: gmailSendTool, gmail_send_v2: gmailSendV2Tool, gmail_read: gmailReadTool, diff --git a/bun.lock b/bun.lock index defa6c36f..ddc7e235d 100644 --- a/bun.lock +++ b/bun.lock @@ -1,6 +1,5 @@ { "lockfileVersion": 1, - "configVersion": 0, "workspaces": { "": { "name": "simstudio", @@ -54,6 +53,7 @@ "name": "sim", "version": "0.1.0", "dependencies": { + "@1password/sdk": "0.3.1", "@a2a-js/sdk": "0.3.7", "@anthropic-ai/sdk": "0.71.2", "@aws-sdk/client-bedrock-runtime": "3.940.0", @@ -326,23 +326,27 @@ "react-dom": "19.2.1", }, "packages": { + "@1password/sdk": ["@1password/sdk@0.3.1", "", { "dependencies": { "@1password/sdk-core": "0.3.1" } }, "sha512-20zbQfqsjcECT0gvnAw4zONJDt3XQgNH946pZR0NV1Qxukyaz/DKB0cBnBNCCEWZg93Bah8poaR6gJCyuNX14w=="], + + "@1password/sdk-core": ["@1password/sdk-core@0.3.1", "", {}, "sha512-zFkbRznmE47kpke10OpO/9R0AF5csNWS+naFbadgXuFX1LlxY+2C28NSKbCXhLTqmcuWifBfPdZQ728GJ1i5xg=="], + "@a2a-js/sdk": ["@a2a-js/sdk@0.3.7", "", { "dependencies": { "uuid": "^11.1.0" }, "peerDependencies": { "express": "^4.21.2 || ^5.1.0" }, "optionalPeers": ["express"] }, "sha512-1WBghkOjgiKt4rPNje8jlB9VateVQXqyjlc887bY/H8yM82Hlf0+5JW8zB98BPExKAplI5XqtXVH980J6vqi+w=="], "@adobe/css-tools": ["@adobe/css-tools@4.4.4", "", {}, "sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg=="], - "@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.57", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-DREpYqW2pylgaj69gZ+K8u92bo9DaMgFdictYnY+IwYeY3bawQ4zI7l/o1VkDsBDljAx8iYz5lPURwVZNu+Xpg=="], + "@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.60", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-hpabbvnTHIP7y85TeFwkDHPveOxsMaCWTRRd1vb9My2EtJBKXGBG4eZhcR+DU98z1lXOlPRu1oGZhVNPttDW8g=="], "@ai-sdk/azure": ["@ai-sdk/azure@2.0.91", "", { "dependencies": { "@ai-sdk/openai": "2.0.89", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-9tznVSs6LGQNKKxb8pKd7CkBV9yk+a/ENpFicHCj2CmBUKefxzwJ9JbUqrlK3VF6dGZw3LXq0dWxt7/Yekaj1w=="], - "@ai-sdk/cerebras": ["@ai-sdk/cerebras@1.0.35", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.31", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-JrNdMYptrOUjNthibgBeAcBjZ/H+fXb49sSrWhOx5Aq8eUcrYvwQ2DtSAi8VraHssZu78NAnBMrgFWSUOTXFxw=="], + "@ai-sdk/cerebras": ["@ai-sdk/cerebras@1.0.36", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.32", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-zoJYL33+ieyd86FSP0Whm86D79d1lKPR7wUzh1SZ1oTxwYmsGyvIrmMf2Ll0JA9Ds2Es6qik4VaFCrjwGYRTIQ=="], "@ai-sdk/deepseek": ["@ai-sdk/deepseek@1.0.33", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-NiKjvqXI/96e/7SjZGgQH141PBqggsF7fNbjGTv4RgVWayMXp9mj0Ou2NjAUGwwxJwj/qseY0gXiDCYaHWFBkw=="], - "@ai-sdk/gateway": ["@ai-sdk/gateway@2.0.29", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20", "@vercel/oidc": "3.1.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-1b7E9F/B5gex/1uCkhs+sGIbH0KsZOItHnNz3iY5ir+nc4ZUA6WOU5Cu2w1USlc+3UVbhf+H+iNLlxVjLe4VvQ=="], + "@ai-sdk/gateway": ["@ai-sdk/gateway@2.0.35", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20", "@vercel/oidc": "3.1.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-fMzhC9artgY2s2GgXEWB+cECRJEHHoFJKzDpzsuneguNQ656vydPHhvDdoMjbWW+UtLc4nGf3VwlqG0t4FeQ/w=="], "@ai-sdk/google": ["@ai-sdk/google@2.0.52", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-2XUnGi3f7TV4ujoAhA+Fg3idUoG/+Y2xjCRg70a1/m0DH1KSQqYaCboJ1C19y6ZHGdf5KNT20eJdswP6TvrY2g=="], - "@ai-sdk/google-vertex": ["@ai-sdk/google-vertex@3.0.97", "", { "dependencies": { "@ai-sdk/anthropic": "2.0.57", "@ai-sdk/google": "2.0.52", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20", "google-auth-library": "^10.5.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-s4tI7Z15i6FlbtCvS4SBRal8wRfkOXJzKxlS6cU4mJW/QfUfoVy4b22836NVNJwDvkG/HkDSfzwm/X8mn46MhA=="], + "@ai-sdk/google-vertex": ["@ai-sdk/google-vertex@3.0.100", "", { "dependencies": { "@ai-sdk/anthropic": "2.0.60", "@ai-sdk/google": "2.0.52", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20", "google-auth-library": "^10.5.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-4iqwr5mRdUanNbCP0S+7IDxgNtMLR/4oj5UaFwzfw6jR5yq9wKujfuKD4TCbgOExVIfP+ASQ8tS6RXtBTyuDXA=="], "@ai-sdk/groq": ["@ai-sdk/groq@2.0.34", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-wfCYkVgmVjxNA32T57KbLabVnv9aFUflJ4urJ7eWgTwbnmGQHElCTu+rJ3ydxkXSqxOkXPwMOttDm7XNrvPjmg=="], @@ -350,7 +354,7 @@ "@ai-sdk/openai": ["@ai-sdk/openai@2.0.89", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-4+qWkBCbL9HPKbgrUO/F2uXZ8GqrYxHa8SWEYIzxEJ9zvWw3ISr3t1/27O1i8MGSym+PzEyHBT48EV4LAwWaEw=="], - "@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.31", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-znBvaVHM0M6yWNerIEy3hR+O8ZK2sPcE7e2cxfb6kYLEX3k//JH5VDnRnajseVofg7LXtTCFFdjsB7WLf1BdeQ=="], + "@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.32", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-YspqqyJPzHjqWrjt4y/Wgc2aJgCcQj5uIJgZpq2Ar/lH30cEVhgE+keePDbjKpetD9UwNggCj7u6kO3unS23OQ=="], "@ai-sdk/perplexity": ["@ai-sdk/perplexity@2.0.23", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-aiaRvnc6mhQZKhTTSXPCjPH8Iqr5D/PfCN1hgVP/3RGTBbJtsd9HemIBSABeSdAKbsMH/PwJxgnqH75HEamcBA=="], @@ -358,9 +362,9 @@ "@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="], - "@ai-sdk/togetherai": ["@ai-sdk/togetherai@1.0.33", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.31", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-zb9sEr94tLGNtuqyMfVhLJqyfBPsfa4E21PIAo+Bm/tgw7xPqSBpQp8iDj7ydetl2wzQfw20zE97UgZv6mR/OQ=="], + "@ai-sdk/togetherai": ["@ai-sdk/togetherai@1.0.34", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.32", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-jjJmJms6kdEc4nC3MDGFJfhV8F1ifY4nolV2dbnT7BM4ab+Wkskc0GwCsJ7G7WdRMk7xDbFh4he3DPL8KJ/cyA=="], - "@ai-sdk/xai": ["@ai-sdk/xai@2.0.55", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.31", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-rzecWIMUJvja41kbH3d1CyKYxbuRRCM8J316I7HLrNxwztFQXwMKYllu4oOyQc33tlOASytKRpY4WF5QyoUkQQ=="], + "@ai-sdk/xai": ["@ai-sdk/xai@2.0.57", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.32", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-qeEPqKtE+bFfeY/60+wBQYwea8ULeY0KEFh+Hr4BaMzzOvAUM4vsKbeccSu0nekVC+gH5WNb/vTfVOP6m8XeIg=="], "@alloc/quick-lru": ["@alloc/quick-lru@5.2.0", "", {}, "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw=="], @@ -396,9 +400,9 @@ "@aws-sdk/client-rds-data": ["@aws-sdk/client-rds-data@3.940.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "3.940.0", "@aws-sdk/credential-provider-node": "3.940.0", "@aws-sdk/middleware-host-header": "3.936.0", "@aws-sdk/middleware-logger": "3.936.0", "@aws-sdk/middleware-recursion-detection": "3.936.0", "@aws-sdk/middleware-user-agent": "3.940.0", "@aws-sdk/region-config-resolver": "3.936.0", "@aws-sdk/types": "3.936.0", "@aws-sdk/util-endpoints": "3.936.0", "@aws-sdk/util-user-agent-browser": "3.936.0", "@aws-sdk/util-user-agent-node": "3.940.0", "@smithy/config-resolver": "^4.4.3", "@smithy/core": "^3.18.5", "@smithy/fetch-http-handler": "^5.3.6", "@smithy/hash-node": "^4.2.5", "@smithy/invalid-dependency": "^4.2.5", "@smithy/middleware-content-length": "^4.2.5", "@smithy/middleware-endpoint": "^4.3.12", "@smithy/middleware-retry": "^4.4.12", "@smithy/middleware-serde": "^4.2.6", "@smithy/middleware-stack": "^4.2.5", "@smithy/node-config-provider": "^4.3.5", "@smithy/node-http-handler": "^4.4.5", "@smithy/protocol-http": "^5.3.5", "@smithy/smithy-client": "^4.9.8", "@smithy/types": "^4.9.0", "@smithy/url-parser": "^4.2.5", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.11", "@smithy/util-defaults-mode-node": "^4.2.14", "@smithy/util-endpoints": "^3.2.5", "@smithy/util-middleware": "^4.2.5", "@smithy/util-retry": "^4.2.5", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-68NH61MvS48CVPfzBNCPdCG4KnNjM+Uj/3DSw7rT9PJvdML9ARS4M2Uqco9POPw+Aj20KBumsEUd6FMVcYBXAA=="], - "@aws-sdk/client-s3": ["@aws-sdk/client-s3@3.978.0", "", { "dependencies": { "@aws-crypto/sha1-browser": "5.2.0", "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.4", "@aws-sdk/credential-provider-node": "^3.972.2", "@aws-sdk/middleware-bucket-endpoint": "^3.972.2", "@aws-sdk/middleware-expect-continue": "^3.972.2", "@aws-sdk/middleware-flexible-checksums": "^3.972.2", "@aws-sdk/middleware-host-header": "^3.972.2", "@aws-sdk/middleware-location-constraint": "^3.972.2", "@aws-sdk/middleware-logger": "^3.972.2", "@aws-sdk/middleware-recursion-detection": "^3.972.2", "@aws-sdk/middleware-sdk-s3": "^3.972.4", "@aws-sdk/middleware-ssec": "^3.972.2", "@aws-sdk/middleware-user-agent": "^3.972.4", "@aws-sdk/region-config-resolver": "^3.972.2", "@aws-sdk/signature-v4-multi-region": "3.972.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.2", "@aws-sdk/util-user-agent-node": "^3.972.2", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.0", "@smithy/eventstream-serde-browser": "^4.2.8", "@smithy/eventstream-serde-config-resolver": "^4.3.8", "@smithy/eventstream-serde-node": "^4.2.8", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-blob-browser": "^4.2.9", "@smithy/hash-node": "^4.2.8", "@smithy/hash-stream-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/md5-js": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.12", "@smithy/middleware-retry": "^4.4.29", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.28", "@smithy/util-defaults-mode-node": "^4.2.31", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-stream": "^4.5.10", "@smithy/util-utf8": "^4.2.0", "@smithy/util-waiter": "^4.2.8", "tslib": "^2.6.2" } }, "sha512-2chs05VbfgRNb5ZEYIwooeHCaL+DjwvrW3ElkslI71ltEqVNdeWvB7hbkLWPPKazV3kjY3H90pLDY8mMqsET+A=="], + "@aws-sdk/client-s3": ["@aws-sdk/client-s3@3.986.0", "", { "dependencies": { "@aws-crypto/sha1-browser": "5.2.0", "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/credential-provider-node": "^3.972.6", "@aws-sdk/middleware-bucket-endpoint": "^3.972.3", "@aws-sdk/middleware-expect-continue": "^3.972.3", "@aws-sdk/middleware-flexible-checksums": "^3.972.5", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-location-constraint": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-sdk-s3": "^3.972.7", "@aws-sdk/middleware-ssec": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/signature-v4-multi-region": "3.986.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.986.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/eventstream-serde-browser": "^4.2.8", "@smithy/eventstream-serde-config-resolver": "^4.3.8", "@smithy/eventstream-serde-node": "^4.2.8", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-blob-browser": "^4.2.9", "@smithy/hash-node": "^4.2.8", "@smithy/hash-stream-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/md5-js": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-stream": "^4.5.11", "@smithy/util-utf8": "^4.2.0", "@smithy/util-waiter": "^4.2.8", "tslib": "^2.6.2" } }, "sha512-IcDJ8shVVvbxgMe8+dLWcv6uhSwmX65PHTVGX81BhWAElPnp3CL8w/5uzOPRo4n4/bqIk9eskGVEIicw2o+SrA=="], - "@aws-sdk/client-sesv2": ["@aws-sdk/client-sesv2@3.978.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.4", "@aws-sdk/credential-provider-node": "^3.972.2", "@aws-sdk/middleware-host-header": "^3.972.2", "@aws-sdk/middleware-logger": "^3.972.2", "@aws-sdk/middleware-recursion-detection": "^3.972.2", "@aws-sdk/middleware-user-agent": "^3.972.4", "@aws-sdk/region-config-resolver": "^3.972.2", "@aws-sdk/signature-v4-multi-region": "3.972.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.2", "@aws-sdk/util-user-agent-node": "^3.972.2", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.0", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.12", "@smithy/middleware-retry": "^4.4.29", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.28", "@smithy/util-defaults-mode-node": "^4.2.31", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-x0jQtqJADxbqam7JJTmAlkRPptb7rA2NmDuv0UG2ImBtllB+wF+Ar8uq569V4ylFtEsfZS9yiNK5+CdmTc6+Wg=="], + "@aws-sdk/client-sesv2": ["@aws-sdk/client-sesv2@3.986.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/credential-provider-node": "^3.972.6", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/signature-v4-multi-region": "3.986.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.986.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-bMt0cloSsbDQ6S3u9YiHxwnbo7Gvzd8+e6PQQFC4wTUJRKHva4jY1EM2mq4j6iDy3MFYPk61HQlmoZ7krCdQEA=="], "@aws-sdk/client-sqs": ["@aws-sdk/client-sqs@3.947.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "3.947.0", "@aws-sdk/credential-provider-node": "3.947.0", "@aws-sdk/middleware-host-header": "3.936.0", "@aws-sdk/middleware-logger": "3.936.0", "@aws-sdk/middleware-recursion-detection": "3.936.0", "@aws-sdk/middleware-sdk-sqs": "3.946.0", "@aws-sdk/middleware-user-agent": "3.947.0", "@aws-sdk/region-config-resolver": "3.936.0", "@aws-sdk/types": "3.936.0", "@aws-sdk/util-endpoints": "3.936.0", "@aws-sdk/util-user-agent-browser": "3.936.0", "@aws-sdk/util-user-agent-node": "3.947.0", "@smithy/config-resolver": "^4.4.3", "@smithy/core": "^3.18.7", "@smithy/fetch-http-handler": "^5.3.6", "@smithy/hash-node": "^4.2.5", "@smithy/invalid-dependency": "^4.2.5", "@smithy/md5-js": "^4.2.5", "@smithy/middleware-content-length": "^4.2.5", "@smithy/middleware-endpoint": "^4.3.14", "@smithy/middleware-retry": "^4.4.14", "@smithy/middleware-serde": "^4.2.6", "@smithy/middleware-stack": "^4.2.5", "@smithy/node-config-provider": "^4.3.5", "@smithy/node-http-handler": "^4.4.5", "@smithy/protocol-http": "^5.3.5", "@smithy/smithy-client": "^4.9.10", "@smithy/types": "^4.9.0", "@smithy/url-parser": "^4.2.5", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.13", "@smithy/util-defaults-mode-node": "^4.2.16", "@smithy/util-endpoints": "^3.2.5", "@smithy/util-middleware": "^4.2.5", "@smithy/util-retry": "^4.2.5", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-8tzFyYGAAnQg+G9eB5zAe0oEo+MJMZ3YEk+8EL4uf2zG5wKxJvTBJZr6U9I1CEXYUde374OyLMyKng+sWyN+wg=="], @@ -430,29 +434,29 @@ "@aws-sdk/lib-dynamodb": ["@aws-sdk/lib-dynamodb@3.940.0", "", { "dependencies": { "@aws-sdk/core": "3.940.0", "@aws-sdk/util-dynamodb": "3.940.0", "@smithy/core": "^3.18.5", "@smithy/smithy-client": "^4.9.8", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" }, "peerDependencies": { "@aws-sdk/client-dynamodb": "^3.940.0" } }, "sha512-5ApYAix2wvJuMszj1lrpg8lm4ipoZMFO8crxtzsdAvxM8TV5bKSRQQ2GA3CMIODrBuSzpXvWueHHrfkx05ZAQw=="], - "@aws-sdk/middleware-bucket-endpoint": ["@aws-sdk/middleware-bucket-endpoint@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-arn-parser": "^3.972.2", "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-config-provider": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-ofuXBnitp9j8t05O4NQVrpMZDECPtUhRIWdLzR35baR5njOIPY7YqNtJE+yELVpSn2m4jt2sV1ezYMBY4/Lo+w=="], + "@aws-sdk/middleware-bucket-endpoint": ["@aws-sdk/middleware-bucket-endpoint@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-arn-parser": "^3.972.2", "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-config-provider": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-fmbgWYirF67YF1GfD7cg5N6HHQ96EyRNx/rDIrTF277/zTWVuPI2qS/ZHgofwR1NZPe/NWvoppflQY01LrbVLg=="], "@aws-sdk/middleware-endpoint-discovery": ["@aws-sdk/middleware-endpoint-discovery@3.936.0", "", { "dependencies": { "@aws-sdk/endpoint-cache": "3.893.0", "@aws-sdk/types": "3.936.0", "@smithy/node-config-provider": "^4.3.5", "@smithy/protocol-http": "^5.3.5", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-wNJZ8PDw0eQK2x4z1q8JqiDvw9l9xd36EoklVT2CIBt8FnqGdrMGjAx93RRbH3G6Fmvwoe+D3VJXbWHBlhD0Bw=="], "@aws-sdk/middleware-eventstream": ["@aws-sdk/middleware-eventstream@3.936.0", "", { "dependencies": { "@aws-sdk/types": "3.936.0", "@smithy/protocol-http": "^5.3.5", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-XQSH8gzLkk8CDUDxyt4Rdm9owTpRIPdtg2yw9Y2Wl5iSI55YQSiC3x8nM3c4Y4WqReJprunFPK225ZUDoYCfZA=="], - "@aws-sdk/middleware-expect-continue": ["@aws-sdk/middleware-expect-continue@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-d9bBQlGk1T5j5rWfof20M2tErddOSoSLDauP2/yyuXfeOfQRCSBUZNrApSxjJ9Hw+/RDGR/XL+LEOqmXxSlV3A=="], + "@aws-sdk/middleware-expect-continue": ["@aws-sdk/middleware-expect-continue@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-4msC33RZsXQpUKR5QR4HnvBSNCPLGHmB55oDiROqqgyOc+TOfVu2xgi5goA7ms6MdZLeEh2905UfWMnMMF4mRg=="], - "@aws-sdk/middleware-flexible-checksums": ["@aws-sdk/middleware-flexible-checksums@3.972.2", "", { "dependencies": { "@aws-crypto/crc32": "5.2.0", "@aws-crypto/crc32c": "5.2.0", "@aws-crypto/util": "5.2.0", "@aws-sdk/core": "^3.973.2", "@aws-sdk/crc64-nvme": "3.972.0", "@aws-sdk/types": "^3.973.1", "@smithy/is-array-buffer": "^4.2.0", "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-stream": "^4.5.10", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-GgWVZJdzXzqhXxzNAYB3TnZCj7d5rZNdovqSIV91e97nowHVaExRoyaZ3H/Ydqot7veHGPTl8nBp464zZeLDTQ=="], + "@aws-sdk/middleware-flexible-checksums": ["@aws-sdk/middleware-flexible-checksums@3.972.5", "", { "dependencies": { "@aws-crypto/crc32": "5.2.0", "@aws-crypto/crc32c": "5.2.0", "@aws-crypto/util": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/crc64-nvme": "3.972.0", "@aws-sdk/types": "^3.973.1", "@smithy/is-array-buffer": "^4.2.0", "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-stream": "^4.5.11", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-SF/1MYWx67OyCrLA4icIpWUfCkdlOi8Y1KecQ9xYxkL10GMjVdPTGPnYhAg0dw5U43Y9PVUWhAV2ezOaG+0BLg=="], "@aws-sdk/middleware-host-header": ["@aws-sdk/middleware-host-header@3.936.0", "", { "dependencies": { "@aws-sdk/types": "3.936.0", "@smithy/protocol-http": "^5.3.5", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-tAaObaAnsP1XnLGndfkGWFuzrJYuk9W0b/nLvol66t8FZExIAf/WdkT2NNAWOYxljVs++oHnyHBCxIlaHrzSiw=="], - "@aws-sdk/middleware-location-constraint": ["@aws-sdk/middleware-location-constraint@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-pyayzpq+VQiG1o9pEUyr6BXEJ2g2t4JIPdNxDkIHp2AhR63Gy/10WQkXTBOgRnfQ7/aLPLOnjRIWwOPp0CfUlA=="], + "@aws-sdk/middleware-location-constraint": ["@aws-sdk/middleware-location-constraint@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-nIg64CVrsXp67vbK0U1/Is8rik3huS3QkRHn2DRDx4NldrEFMgdkZGI/+cZMKD9k4YOS110Dfu21KZLHrFA/1g=="], "@aws-sdk/middleware-logger": ["@aws-sdk/middleware-logger@3.936.0", "", { "dependencies": { "@aws-sdk/types": "3.936.0", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-aPSJ12d3a3Ea5nyEnLbijCaaYJT2QjQ9iW+zGh5QcZYXmOGWbKVyPSxmVOboZQG+c1M8t6d2O7tqrwzIq8L8qw=="], "@aws-sdk/middleware-recursion-detection": ["@aws-sdk/middleware-recursion-detection@3.936.0", "", { "dependencies": { "@aws-sdk/types": "3.936.0", "@aws/lambda-invoke-store": "^0.2.0", "@smithy/protocol-http": "^5.3.5", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-l4aGbHpXM45YNgXggIux1HgsCVAvvBoqHPkqLnqMl9QVapfuSTjJHfDYDsx1Xxct6/m7qSMUzanBALhiaGO2fA=="], - "@aws-sdk/middleware-sdk-s3": ["@aws-sdk/middleware-sdk-s3@3.972.4", "", { "dependencies": { "@aws-sdk/core": "^3.973.4", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-arn-parser": "^3.972.2", "@smithy/core": "^3.22.0", "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/util-config-provider": "^4.2.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-stream": "^4.5.10", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-lradfn72Td7lswhZKi86VKRNkDtmQR7bq9shX1kaPK1itjThxfcx7ogXSvMm/0cuqoYGic8UUXQOaK4kpU933g=="], + "@aws-sdk/middleware-sdk-s3": ["@aws-sdk/middleware-sdk-s3@3.972.7", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-arn-parser": "^3.972.2", "@smithy/core": "^3.22.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/util-config-provider": "^4.2.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-stream": "^4.5.11", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-VtZ7tMIw18VzjG+I6D6rh2eLkJfTtByiFoCIauGDtTTPBEUMQUiGaJ/zZrPlCY6BsvLLeFKz3+E5mntgiOWmIg=="], "@aws-sdk/middleware-sdk-sqs": ["@aws-sdk/middleware-sdk-sqs@3.946.0", "", { "dependencies": { "@aws-sdk/types": "3.936.0", "@smithy/smithy-client": "^4.9.10", "@smithy/types": "^4.9.0", "@smithy/util-hex-encoding": "^4.2.0", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-+KedlcXUqA1Bdafvw264SWvwyHYvFxn47y831tEKc85fp5VF5LGE9uMlU13hsWySftLmDd/ZFwSQI6RN2zSpAg=="], - "@aws-sdk/middleware-ssec": ["@aws-sdk/middleware-ssec@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-HJ3OmQnlQ1es6esrDWnx3nVPhBAN89WaFCzsDcb6oT7TMjBPUfZ5+1BpI7B0Hnme8cc6kp7qc4cgo2plrlROJA=="], + "@aws-sdk/middleware-ssec": ["@aws-sdk/middleware-ssec@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-dU6kDuULN3o3jEHcjm0c4zWJlY1zWVkjG9NPe9qxYLLpcbdj5kRYBS2DdWYD+1B9f910DezRuws7xDEqKkHQIg=="], "@aws-sdk/middleware-user-agent": ["@aws-sdk/middleware-user-agent@3.940.0", "", { "dependencies": { "@aws-sdk/core": "3.940.0", "@aws-sdk/types": "3.936.0", "@aws-sdk/util-endpoints": "3.936.0", "@smithy/core": "^3.18.5", "@smithy/protocol-http": "^5.3.5", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-nJbLrUj6fY+l2W2rIB9P4Qvpiy0tnTdg/dmixRxrU1z3e8wBdspJlyE+AZN4fuVbeL6rrRrO/zxQC1bB3cw5IA=="], @@ -462,9 +466,9 @@ "@aws-sdk/region-config-resolver": ["@aws-sdk/region-config-resolver@3.936.0", "", { "dependencies": { "@aws-sdk/types": "3.936.0", "@smithy/config-resolver": "^4.4.3", "@smithy/node-config-provider": "^4.3.5", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-wOKhzzWsshXGduxO4pqSiNyL9oUtk4BEvjWm9aaq6Hmfdoydq6v6t0rAGHWPjFwy9z2haovGRi3C8IxdMB4muw=="], - "@aws-sdk/s3-request-presigner": ["@aws-sdk/s3-request-presigner@3.978.0", "", { "dependencies": { "@aws-sdk/signature-v4-multi-region": "3.972.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-format-url": "^3.972.2", "@smithy/middleware-endpoint": "^4.4.12", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-P+SZrny/BT/x9w4BBM9IUr17jjDL7Rg/FjXKqh9viV81i/68Eu6gHBtS/JzvNF+rpG5gdZcMnBSANZqbnEbDmA=="], + "@aws-sdk/s3-request-presigner": ["@aws-sdk/s3-request-presigner@3.986.0", "", { "dependencies": { "@aws-sdk/signature-v4-multi-region": "3.986.0", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-format-url": "^3.972.3", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-+yopxtoXwRXZ2Ai9H4GzkN+T2D07sGrURYcm7Eh2OQe3p+Ys/3VrR6UrzILssaJGYtR2vQqVKnGJBHVYqaM1EQ=="], - "@aws-sdk/signature-v4-multi-region": ["@aws-sdk/signature-v4-multi-region@3.972.0", "", { "dependencies": { "@aws-sdk/middleware-sdk-s3": "3.972.0", "@aws-sdk/types": "3.972.0", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-2udiRijmjpN81Pvajje4TsjbXDZNP6K9bYUanBYH8hXa/tZG5qfGCySD+TyX0sgDxCQmEDMg3LaQdfjNHBDEgQ=="], + "@aws-sdk/signature-v4-multi-region": ["@aws-sdk/signature-v4-multi-region@3.986.0", "", { "dependencies": { "@aws-sdk/middleware-sdk-s3": "^3.972.7", "@aws-sdk/types": "^3.973.1", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-Upw+rw7wCH93E6QWxqpAqJLrUmJYVUAWrk4tCOBnkeuwzGERZvJFL5UQ6TAJFj9T18Ih+vNFaACh8J5aP4oTBw=="], "@aws-sdk/token-providers": ["@aws-sdk/token-providers@3.940.0", "", { "dependencies": { "@aws-sdk/core": "3.940.0", "@aws-sdk/nested-clients": "3.940.0", "@aws-sdk/types": "3.936.0", "@smithy/property-provider": "^4.2.5", "@smithy/shared-ini-file-loader": "^4.4.0", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-k5qbRe/ZFjW9oWEdzLIa2twRVIEx7p/9rutofyrRysrtEnYh3HAWCngAnwbgKMoiwa806UzcTRx0TjyEpnKcCg=="], @@ -476,7 +480,7 @@ "@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.936.0", "", { "dependencies": { "@aws-sdk/types": "3.936.0", "@smithy/types": "^4.9.0", "@smithy/url-parser": "^4.2.5", "@smithy/util-endpoints": "^3.2.5", "tslib": "^2.6.2" } }, "sha512-0Zx3Ntdpu+z9Wlm7JKUBOzS9EunwKAb4KdGUQQxDqh5Lc3ta5uBoub+FgmVuzwnmBu9U1Os8UuwVTH0Lgu+P5w=="], - "@aws-sdk/util-format-url": ["@aws-sdk/util-format-url@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/querystring-builder": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-RCd8eur5wzDLgFBvbBhoFQ1bw1wxHJiN88MQ82IiJBs6OGXTWaf0oFgLbK06qJvnVUqL13t3jEnlYPHPNdgBWw=="], + "@aws-sdk/util-format-url": ["@aws-sdk/util-format-url@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/querystring-builder": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-n7F2ycckcKFXa01vAsT/SJdjFHfKH9s96QHcs5gn8AaaigASICeME8WdUL9uBp8XV/OVwEt8+6gzn6KFUgQa8g=="], "@aws-sdk/util-locate-window": ["@aws-sdk/util-locate-window@3.965.4", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-H1onv5SkgPBK2P6JR2MjGgbOnttoNzSPIRoeZTNPZYyaplwGg50zS3amXvXqF0/qfXpWEC9rLWU564QTB9bSog=="], @@ -500,7 +504,7 @@ "@azure/core-client": ["@azure/core-client@1.10.1", "", { "dependencies": { "@azure/abort-controller": "^2.1.2", "@azure/core-auth": "^1.10.0", "@azure/core-rest-pipeline": "^1.22.0", "@azure/core-tracing": "^1.3.0", "@azure/core-util": "^1.13.0", "@azure/logger": "^1.3.0", "tslib": "^2.6.2" } }, "sha512-Nh5PhEOeY6PrnxNPsEHRr9eimxLwgLlpmguQaHKBinFYA/RU9+kOYVOQqOrTsCL+KSxrLLl1gD8Dk5BFW/7l/w=="], - "@azure/core-http-compat": ["@azure/core-http-compat@2.3.1", "", { "dependencies": { "@azure/abort-controller": "^2.1.2", "@azure/core-client": "^1.10.0", "@azure/core-rest-pipeline": "^1.22.0" } }, "sha512-az9BkXND3/d5VgdRRQVkiJb2gOmDU8Qcq4GvjtBmDICNiQ9udFmDk4ZpSB5Qq1OmtDJGlQAfBaS4palFsazQ5g=="], + "@azure/core-http-compat": ["@azure/core-http-compat@2.3.2", "", { "dependencies": { "@azure/abort-controller": "^2.1.2" }, "peerDependencies": { "@azure/core-client": "^1.10.0", "@azure/core-rest-pipeline": "^1.22.0" } }, "sha512-Tf6ltdKzOJEgxZeWLCjMxrxbodB/ZeCbzzA1A2qHbhzAjzjHoBVSUeSl/baT/oHAxhc4qdqVaDKnc2+iE932gw=="], "@azure/core-lro": ["@azure/core-lro@2.7.2", "", { "dependencies": { "@azure/abort-controller": "^2.0.0", "@azure/core-util": "^1.2.0", "@azure/logger": "^1.0.0", "tslib": "^2.6.2" } }, "sha512-0YIpccoX8m/k00O7mDDMdJpbr6mf1yWo2dfmxt5A8XVZVVMz2SSKaEbMCeJRvgQ0IaSlqhjT47p4hVIRRy90xw=="], @@ -518,13 +522,13 @@ "@azure/storage-blob": ["@azure/storage-blob@12.27.0", "", { "dependencies": { "@azure/abort-controller": "^2.1.2", "@azure/core-auth": "^1.4.0", "@azure/core-client": "^1.6.2", "@azure/core-http-compat": "^2.0.0", "@azure/core-lro": "^2.2.0", "@azure/core-paging": "^1.1.1", "@azure/core-rest-pipeline": "^1.10.1", "@azure/core-tracing": "^1.1.2", "@azure/core-util": "^1.6.1", "@azure/core-xml": "^1.4.3", "@azure/logger": "^1.0.0", "events": "^3.0.0", "tslib": "^2.2.0" } }, "sha512-IQjj9RIzAKatmNca3D6bT0qJ+Pkox1WZGOg2esJF2YLHb45pQKOwGPIAV+w3rfgkj7zV3RMxpn/c6iftzSOZJQ=="], - "@babel/code-frame": ["@babel/code-frame@7.28.6", "", { "dependencies": { "@babel/helper-validator-identifier": "^7.28.5", "js-tokens": "^4.0.0", "picocolors": "^1.1.1" } }, "sha512-JYgintcMjRiCvS8mMECzaEn+m3PfoQiyqukOMCCVQtoJGYJw8j/8LBJEiqkHLkfwCcs74E3pbAUFNg7d9VNJ+Q=="], + "@babel/code-frame": ["@babel/code-frame@7.29.0", "", { "dependencies": { "@babel/helper-validator-identifier": "^7.28.5", "js-tokens": "^4.0.0", "picocolors": "^1.1.1" } }, "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw=="], - "@babel/compat-data": ["@babel/compat-data@7.28.6", "", {}, "sha512-2lfu57JtzctfIrcGMz992hyLlByuzgIk58+hhGCxjKZ3rWI82NnVLjXcaTqkI2NvlcvOskZaiZ5kjUALo3Lpxg=="], + "@babel/compat-data": ["@babel/compat-data@7.29.0", "", {}, "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg=="], - "@babel/core": ["@babel/core@7.28.6", "", { "dependencies": { "@babel/code-frame": "^7.28.6", "@babel/generator": "^7.28.6", "@babel/helper-compilation-targets": "^7.28.6", "@babel/helper-module-transforms": "^7.28.6", "@babel/helpers": "^7.28.6", "@babel/parser": "^7.28.6", "@babel/template": "^7.28.6", "@babel/traverse": "^7.28.6", "@babel/types": "^7.28.6", "@jridgewell/remapping": "^2.3.5", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", "json5": "^2.2.3", "semver": "^6.3.1" } }, "sha512-H3mcG6ZDLTlYfaSNi0iOKkigqMFvkTKlGUYlD8GW7nNOYRrevuA46iTypPyv+06V3fEmvvazfntkBU34L0azAw=="], + "@babel/core": ["@babel/core@7.29.0", "", { "dependencies": { "@babel/code-frame": "^7.29.0", "@babel/generator": "^7.29.0", "@babel/helper-compilation-targets": "^7.28.6", "@babel/helper-module-transforms": "^7.28.6", "@babel/helpers": "^7.28.6", "@babel/parser": "^7.29.0", "@babel/template": "^7.28.6", "@babel/traverse": "^7.29.0", "@babel/types": "^7.29.0", "@jridgewell/remapping": "^2.3.5", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", "json5": "^2.2.3", "semver": "^6.3.1" } }, "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA=="], - "@babel/generator": ["@babel/generator@7.28.6", "", { "dependencies": { "@babel/parser": "^7.28.6", "@babel/types": "^7.28.6", "@jridgewell/gen-mapping": "^0.3.12", "@jridgewell/trace-mapping": "^0.3.28", "jsesc": "^3.0.2" } }, "sha512-lOoVRwADj8hjf7al89tvQ2a1lf53Z+7tiXMgpZJL3maQPDxh0DgLMN62B2MKUOFcoodBHLMbDM6WAbKgNy5Suw=="], + "@babel/generator": ["@babel/generator@7.29.1", "", { "dependencies": { "@babel/parser": "^7.29.0", "@babel/types": "^7.29.0", "@jridgewell/gen-mapping": "^0.3.12", "@jridgewell/trace-mapping": "^0.3.28", "jsesc": "^3.0.2" } }, "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw=="], "@babel/helper-compilation-targets": ["@babel/helper-compilation-targets@7.28.6", "", { "dependencies": { "@babel/compat-data": "^7.28.6", "@babel/helper-validator-option": "^7.27.1", "browserslist": "^4.24.0", "lru-cache": "^5.1.1", "semver": "^6.3.1" } }, "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA=="], @@ -544,7 +548,7 @@ "@babel/helpers": ["@babel/helpers@7.28.6", "", { "dependencies": { "@babel/template": "^7.28.6", "@babel/types": "^7.28.6" } }, "sha512-xOBvwq86HHdB7WUDTfKfT/Vuxh7gElQ+Sfti2Cy6yIWNW05P8iUslOVcZ4/sKbE+/jQaukQAdz/gf3724kYdqw=="], - "@babel/parser": ["@babel/parser@7.28.6", "", { "dependencies": { "@babel/types": "^7.28.6" }, "bin": "./bin/babel-parser.js" }, "sha512-TeR9zWR18BvbfPmGbLampPMW+uW1NZnJlRuuHso8i87QZNq2JRF9i6RgxRqtEq+wQGsS19NNTWr2duhnE49mfQ=="], + "@babel/parser": ["@babel/parser@7.29.0", "", { "dependencies": { "@babel/types": "^7.29.0" }, "bin": "./bin/babel-parser.js" }, "sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww=="], "@babel/plugin-transform-react-jsx-self": ["@babel/plugin-transform-react-jsx-self@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw=="], @@ -554,9 +558,9 @@ "@babel/template": ["@babel/template@7.28.6", "", { "dependencies": { "@babel/code-frame": "^7.28.6", "@babel/parser": "^7.28.6", "@babel/types": "^7.28.6" } }, "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ=="], - "@babel/traverse": ["@babel/traverse@7.28.6", "", { "dependencies": { "@babel/code-frame": "^7.28.6", "@babel/generator": "^7.28.6", "@babel/helper-globals": "^7.28.0", "@babel/parser": "^7.28.6", "@babel/template": "^7.28.6", "@babel/types": "^7.28.6", "debug": "^4.3.1" } }, "sha512-fgWX62k02qtjqdSNTAGxmKYY/7FSL9WAS1o2Hu5+I5m9T0yxZzr4cnrfXQ/MX0rIifthCSs6FKTlzYbJcPtMNg=="], + "@babel/traverse": ["@babel/traverse@7.29.0", "", { "dependencies": { "@babel/code-frame": "^7.29.0", "@babel/generator": "^7.29.0", "@babel/helper-globals": "^7.28.0", "@babel/parser": "^7.29.0", "@babel/template": "^7.28.6", "@babel/types": "^7.29.0", "debug": "^4.3.1" } }, "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA=="], - "@babel/types": ["@babel/types@7.28.6", "", { "dependencies": { "@babel/helper-string-parser": "^7.27.1", "@babel/helper-validator-identifier": "^7.28.5" } }, "sha512-0ZrskXVEHSWIqZM/sQZ4EV3jZJXRkio/WCxaqKZP1g//CEWEPSfeZFcms4XeKBCHU0ZKnIkdJeU/kF+eRp5lBg=="], + "@babel/types": ["@babel/types@7.29.0", "", { "dependencies": { "@babel/helper-string-parser": "^7.27.1", "@babel/helper-validator-identifier": "^7.28.5" } }, "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A=="], "@bcoe/v8-coverage": ["@bcoe/v8-coverage@1.0.2", "", {}, "sha512-6zABk/ECA/QYSCQ1NGiVwwbQerUCZ+TQbp64Q3AgmfNvurHH0j8TtXa1qbShXA6qqkpAj4V5W8pP6mLe1mcMqA=="], @@ -760,9 +764,9 @@ "@isaacs/balanced-match": ["@isaacs/balanced-match@4.0.1", "", {}, "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ=="], - "@isaacs/brace-expansion": ["@isaacs/brace-expansion@5.0.0", "", { "dependencies": { "@isaacs/balanced-match": "^4.0.1" } }, "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA=="], + "@isaacs/brace-expansion": ["@isaacs/brace-expansion@5.0.1", "", { "dependencies": { "@isaacs/balanced-match": "^4.0.1" } }, "sha512-WMz71T1JS624nWj2n2fnYAuPovhv7EUhk69R6i9dsVyzxt5eM3bjwvgk9L+APE1TRscGysAVMANkB0jh0LQZrQ=="], - "@isaacs/cliui": ["@isaacs/cliui@8.0.2", "", { "dependencies": { "string-width": "^5.1.2", "string-width-cjs": "npm:string-width@^4.2.0", "strip-ansi": "^7.0.1", "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", "wrap-ansi": "^8.1.0", "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" } }, "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA=="], + "@isaacs/cliui": ["@isaacs/cliui@9.0.0", "", {}, "sha512-AokJm4tuBHillT+FpMtxQ60n8ObyXBatq7jD2/JA9dxbDDokKQm8KMht5ibGzLVU9IJDIKK4TPKgMHEYMn3lMg=="], "@isaacs/fs-minipass": ["@isaacs/fs-minipass@4.0.1", "", { "dependencies": { "minipass": "^7.0.4" } }, "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w=="], @@ -800,29 +804,29 @@ "@mongodb-js/saslprep": ["@mongodb-js/saslprep@1.4.5", "", { "dependencies": { "sparse-bitfield": "^3.0.3" } }, "sha512-k64Lbyb7ycCSXHSLzxVdb2xsKGPMvYZfCICXvDsI8Z65CeWQzTEKS4YmGbnqw+U9RBvLPTsB6UCmwkgsDTGWIw=="], - "@napi-rs/canvas": ["@napi-rs/canvas@0.1.89", "", { "optionalDependencies": { "@napi-rs/canvas-android-arm64": "0.1.89", "@napi-rs/canvas-darwin-arm64": "0.1.89", "@napi-rs/canvas-darwin-x64": "0.1.89", "@napi-rs/canvas-linux-arm-gnueabihf": "0.1.89", "@napi-rs/canvas-linux-arm64-gnu": "0.1.89", "@napi-rs/canvas-linux-arm64-musl": "0.1.89", "@napi-rs/canvas-linux-riscv64-gnu": "0.1.89", "@napi-rs/canvas-linux-x64-gnu": "0.1.89", "@napi-rs/canvas-linux-x64-musl": "0.1.89", "@napi-rs/canvas-win32-arm64-msvc": "0.1.89", "@napi-rs/canvas-win32-x64-msvc": "0.1.89" } }, "sha512-7GjmkMirJHejeALCqUnZY3QwID7bbumOiLrqq2LKgxrdjdmxWQBTc6rcASa2u8wuWrH7qo4/4n/VNrOwCoKlKg=="], + "@napi-rs/canvas": ["@napi-rs/canvas@0.1.91", "", { "optionalDependencies": { "@napi-rs/canvas-android-arm64": "0.1.91", "@napi-rs/canvas-darwin-arm64": "0.1.91", "@napi-rs/canvas-darwin-x64": "0.1.91", "@napi-rs/canvas-linux-arm-gnueabihf": "0.1.91", "@napi-rs/canvas-linux-arm64-gnu": "0.1.91", "@napi-rs/canvas-linux-arm64-musl": "0.1.91", "@napi-rs/canvas-linux-riscv64-gnu": "0.1.91", "@napi-rs/canvas-linux-x64-gnu": "0.1.91", "@napi-rs/canvas-linux-x64-musl": "0.1.91", "@napi-rs/canvas-win32-arm64-msvc": "0.1.91", "@napi-rs/canvas-win32-x64-msvc": "0.1.91" } }, "sha512-eeIe1GoB74P1B0Nkw6pV8BCQ3hfCfvyYr4BntzlCsnFXzVJiPMDnLeIx3gVB0xQMblHYnjK/0nCLvirEhOjr5g=="], - "@napi-rs/canvas-android-arm64": ["@napi-rs/canvas-android-arm64@0.1.89", "", { "os": "android", "cpu": "arm64" }, "sha512-CXxQTXsjtQqKGENS8Ejv9pZOFJhOPIl2goenS+aU8dY4DygvkyagDhy/I07D1YLqrDtPvLEX5zZHt8qUdnuIpQ=="], + "@napi-rs/canvas-android-arm64": ["@napi-rs/canvas-android-arm64@0.1.91", "", { "os": "android", "cpu": "arm64" }, "sha512-SLLzXXgSnfct4zy/BVAfweZQkYkPJsNsJ2e5DOE8DFEHC6PufyUrwb12yqeu2So2IOIDpWJJaDAxKY/xpy6MYQ=="], - "@napi-rs/canvas-darwin-arm64": ["@napi-rs/canvas-darwin-arm64@0.1.89", "", { "os": "darwin", "cpu": "arm64" }, "sha512-k29cR/Zl20WLYM7M8YePevRu2VQRaKcRedYr1V/8FFHkyIQ8kShEV+MPoPGi+znvmd17Eqjy2Pk2F2kpM2umVg=="], + "@napi-rs/canvas-darwin-arm64": ["@napi-rs/canvas-darwin-arm64@0.1.91", "", { "os": "darwin", "cpu": "arm64" }, "sha512-bzdbCjIjw3iRuVFL+uxdSoMra/l09ydGNX9gsBxO/zg+5nlppscIpj6gg+nL6VNG85zwUarDleIrUJ+FWHvmuA=="], - "@napi-rs/canvas-darwin-x64": ["@napi-rs/canvas-darwin-x64@0.1.89", "", { "os": "darwin", "cpu": "x64" }, "sha512-iUragqhBrA5FqU13pkhYBDbUD1WEAIlT8R2+fj6xHICY2nemzwMUI8OENDhRh7zuL06YDcRwENbjAVxOmaX9jg=="], + "@napi-rs/canvas-darwin-x64": ["@napi-rs/canvas-darwin-x64@0.1.91", "", { "os": "darwin", "cpu": "x64" }, "sha512-q3qpkpw0IsG9fAS/dmcGIhCVoNxj8ojbexZKWwz3HwxlEWsLncEQRl4arnxrwbpLc2nTNTyj4WwDn7QR5NDAaA=="], - "@napi-rs/canvas-linux-arm-gnueabihf": ["@napi-rs/canvas-linux-arm-gnueabihf@0.1.89", "", { "os": "linux", "cpu": "arm" }, "sha512-y3SM9sfDWasY58ftoaI09YBFm35Ig8tosZqgahLJ2WGqawCusGNPV9P0/4PsrLOCZqGg629WxexQMY25n7zcvA=="], + "@napi-rs/canvas-linux-arm-gnueabihf": ["@napi-rs/canvas-linux-arm-gnueabihf@0.1.91", "", { "os": "linux", "cpu": "arm" }, "sha512-Io3g8wJZVhK8G+Fpg1363BE90pIPqg+ZbeehYNxPWDSzbgwU3xV0l8r/JBzODwC7XHi1RpFEk+xyUTMa2POj6w=="], - "@napi-rs/canvas-linux-arm64-gnu": ["@napi-rs/canvas-linux-arm64-gnu@0.1.89", "", { "os": "linux", "cpu": "arm64" }, "sha512-NEoF9y8xq5fX8HG8aZunBom1ILdTwt7ayBzSBIwrmitk7snj4W6Fz/yN/ZOmlM1iyzHDNX5Xn0n+VgWCF8BEdA=="], + "@napi-rs/canvas-linux-arm64-gnu": ["@napi-rs/canvas-linux-arm64-gnu@0.1.91", "", { "os": "linux", "cpu": "arm64" }, "sha512-HBnto+0rxx1bQSl8bCWA9PyBKtlk2z/AI32r3cu4kcNO+M/5SD4b0v1MWBWZyqMQyxFjWgy3ECyDjDKMC6tY1A=="], - "@napi-rs/canvas-linux-arm64-musl": ["@napi-rs/canvas-linux-arm64-musl@0.1.89", "", { "os": "linux", "cpu": "arm64" }, "sha512-UQQkIEzV12/l60j1ziMjZ+mtodICNUbrd205uAhbyTw0t60CrC/EsKb5/aJWGq1wM0agvcgZV72JJCKfLS6+4w=="], + "@napi-rs/canvas-linux-arm64-musl": ["@napi-rs/canvas-linux-arm64-musl@0.1.91", "", { "os": "linux", "cpu": "arm64" }, "sha512-/eJtVe2Xw9A86I4kwXpxxoNagdGclu12/NSMsfoL8q05QmeRCbfjhg1PJS7ENAuAvaiUiALGrbVfeY1KU1gztQ=="], - "@napi-rs/canvas-linux-riscv64-gnu": ["@napi-rs/canvas-linux-riscv64-gnu@0.1.89", "", { "os": "linux", "cpu": "none" }, "sha512-1/VmEoFaIO6ONeeEMGoWF17wOYZOl5hxDC1ios2Bkz/oQjbJJ8DY/X22vWTmvuUKWWhBVlo63pxLGZbjJU/heA=="], + "@napi-rs/canvas-linux-riscv64-gnu": ["@napi-rs/canvas-linux-riscv64-gnu@0.1.91", "", { "os": "linux", "cpu": "none" }, "sha512-floNK9wQuRWevUhhXRcuis7h0zirdytVxPgkonWO+kQlbvxV7gEUHGUFQyq4n55UHYFwgck1SAfJ1HuXv/+ppQ=="], - "@napi-rs/canvas-linux-x64-gnu": ["@napi-rs/canvas-linux-x64-gnu@0.1.89", "", { "os": "linux", "cpu": "x64" }, "sha512-ebLuqkCuaPIkKgKH9q4+pqWi1tkPOfiTk5PM1LKR1tB9iO9sFNVSIgwEp+SJreTSbA2DK5rW8lQXiN78SjtcvA=="], + "@napi-rs/canvas-linux-x64-gnu": ["@napi-rs/canvas-linux-x64-gnu@0.1.91", "", { "os": "linux", "cpu": "x64" }, "sha512-c3YDqBdf7KETuZy2AxsHFMsBBX1dWT43yFfWUq+j1IELdgesWtxf/6N7csi3VPf6VA3PmnT9EhMyb+M1wfGtqw=="], - "@napi-rs/canvas-linux-x64-musl": ["@napi-rs/canvas-linux-x64-musl@0.1.89", "", { "os": "linux", "cpu": "x64" }, "sha512-w+5qxHzplvA4BkHhCaizNMLLXiI+CfP84YhpHm/PqMub4u8J0uOAv+aaGv40rYEYra5hHRWr9LUd6cfW32o9/A=="], + "@napi-rs/canvas-linux-x64-musl": ["@napi-rs/canvas-linux-x64-musl@0.1.91", "", { "os": "linux", "cpu": "x64" }, "sha512-RpZ3RPIwgEcNBHSHSX98adm+4VP8SMT5FN6250s5jQbWpX/XNUX5aLMfAVJS/YnDjS1QlsCgQxFOPU0aCCWgag=="], - "@napi-rs/canvas-win32-arm64-msvc": ["@napi-rs/canvas-win32-arm64-msvc@0.1.89", "", { "os": "win32", "cpu": "arm64" }, "sha512-DmyXa5lJHcjOsDC78BM3bnEECqbK3xASVMrKfvtT/7S7Z8NGQOugvu+L7b41V6cexCd34mBWgMOsjoEBceeB1Q=="], + "@napi-rs/canvas-win32-arm64-msvc": ["@napi-rs/canvas-win32-arm64-msvc@0.1.91", "", { "os": "win32", "cpu": "arm64" }, "sha512-gF8MBp4X134AgVurxqlCdDA2qO0WaDdi9o6Sd5rWRVXRhWhYQ6wkdEzXNLIrmmros0Tsp2J0hQzx4ej/9O8trQ=="], - "@napi-rs/canvas-win32-x64-msvc": ["@napi-rs/canvas-win32-x64-msvc@0.1.89", "", { "os": "win32", "cpu": "x64" }, "sha512-WMej0LZrIqIncQcx0JHaMXlnAG7sncwJh7obs/GBgp0xF9qABjwoRwIooMWCZkSansapKGNUHhamY6qEnFN7gA=="], + "@napi-rs/canvas-win32-x64-msvc": ["@napi-rs/canvas-win32-x64-msvc@0.1.91", "", { "os": "win32", "cpu": "x64" }, "sha512-++gtW9EV/neKI8TshD8WFxzBYALSPag2kFRahIJV+LYsyt5kBn21b1dBhEUDHf7O+wiZmuFCeUa7QKGHnYRZBA=="], "@next/env": ["@next/env@16.1.0-canary.21", "", {}, "sha512-J5inWwxC8EpAr/a2GApmQK1KkftG7K2nM6SuzNvciNaPt9Z0AHFeazvFuQxbvXn024p+akBHRlo8P7ZJRoU7kA=="], @@ -1164,75 +1168,75 @@ "@rolldown/pluginutils": ["@rolldown/pluginutils@1.0.0-beta.27", "", {}, "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA=="], - "@rollup/rollup-android-arm-eabi": ["@rollup/rollup-android-arm-eabi@4.57.0", "", { "os": "android", "cpu": "arm" }, "sha512-tPgXB6cDTndIe1ah7u6amCI1T0SsnlOuKgg10Xh3uizJk4e5M1JGaUMk7J4ciuAUcFpbOiNhm2XIjP9ON0dUqA=="], + "@rollup/rollup-android-arm-eabi": ["@rollup/rollup-android-arm-eabi@4.57.1", "", { "os": "android", "cpu": "arm" }, "sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg=="], - "@rollup/rollup-android-arm64": ["@rollup/rollup-android-arm64@4.57.0", "", { "os": "android", "cpu": "arm64" }, "sha512-sa4LyseLLXr1onr97StkU1Nb7fWcg6niokTwEVNOO7awaKaoRObQ54+V/hrF/BP1noMEaaAW6Fg2d/CfLiq3Mg=="], + "@rollup/rollup-android-arm64": ["@rollup/rollup-android-arm64@4.57.1", "", { "os": "android", "cpu": "arm64" }, "sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w=="], - "@rollup/rollup-darwin-arm64": ["@rollup/rollup-darwin-arm64@4.57.0", "", { "os": "darwin", "cpu": "arm64" }, "sha512-/NNIj9A7yLjKdmkx5dC2XQ9DmjIECpGpwHoGmA5E1AhU0fuICSqSWScPhN1yLCkEdkCwJIDu2xIeLPs60MNIVg=="], + "@rollup/rollup-darwin-arm64": ["@rollup/rollup-darwin-arm64@4.57.1", "", { "os": "darwin", "cpu": "arm64" }, "sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg=="], - "@rollup/rollup-darwin-x64": ["@rollup/rollup-darwin-x64@4.57.0", "", { "os": "darwin", "cpu": "x64" }, "sha512-xoh8abqgPrPYPr7pTYipqnUi1V3em56JzE/HgDgitTqZBZ3yKCWI+7KUkceM6tNweyUKYru1UMi7FC060RyKwA=="], + "@rollup/rollup-darwin-x64": ["@rollup/rollup-darwin-x64@4.57.1", "", { "os": "darwin", "cpu": "x64" }, "sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w=="], - "@rollup/rollup-freebsd-arm64": ["@rollup/rollup-freebsd-arm64@4.57.0", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-PCkMh7fNahWSbA0OTUQ2OpYHpjZZr0hPr8lId8twD7a7SeWrvT3xJVyza+dQwXSSq4yEQTMoXgNOfMCsn8584g=="], + "@rollup/rollup-freebsd-arm64": ["@rollup/rollup-freebsd-arm64@4.57.1", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug=="], - "@rollup/rollup-freebsd-x64": ["@rollup/rollup-freebsd-x64@4.57.0", "", { "os": "freebsd", "cpu": "x64" }, "sha512-1j3stGx+qbhXql4OCDZhnK7b01s6rBKNybfsX+TNrEe9JNq4DLi1yGiR1xW+nL+FNVvI4D02PUnl6gJ/2y6WJA=="], + "@rollup/rollup-freebsd-x64": ["@rollup/rollup-freebsd-x64@4.57.1", "", { "os": "freebsd", "cpu": "x64" }, "sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q=="], - "@rollup/rollup-linux-arm-gnueabihf": ["@rollup/rollup-linux-arm-gnueabihf@4.57.0", "", { "os": "linux", "cpu": "arm" }, "sha512-eyrr5W08Ms9uM0mLcKfM/Uzx7hjhz2bcjv8P2uynfj0yU8GGPdz8iYrBPhiLOZqahoAMB8ZiolRZPbbU2MAi6Q=="], + "@rollup/rollup-linux-arm-gnueabihf": ["@rollup/rollup-linux-arm-gnueabihf@4.57.1", "", { "os": "linux", "cpu": "arm" }, "sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw=="], - "@rollup/rollup-linux-arm-musleabihf": ["@rollup/rollup-linux-arm-musleabihf@4.57.0", "", { "os": "linux", "cpu": "arm" }, "sha512-Xds90ITXJCNyX9pDhqf85MKWUI4lqjiPAipJ8OLp8xqI2Ehk+TCVhF9rvOoN8xTbcafow3QOThkNnrM33uCFQA=="], + "@rollup/rollup-linux-arm-musleabihf": ["@rollup/rollup-linux-arm-musleabihf@4.57.1", "", { "os": "linux", "cpu": "arm" }, "sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw=="], - "@rollup/rollup-linux-arm64-gnu": ["@rollup/rollup-linux-arm64-gnu@4.57.0", "", { "os": "linux", "cpu": "arm64" }, "sha512-Xws2KA4CLvZmXjy46SQaXSejuKPhwVdaNinldoYfqruZBaJHqVo6hnRa8SDo9z7PBW5x84SH64+izmldCgbezw=="], + "@rollup/rollup-linux-arm64-gnu": ["@rollup/rollup-linux-arm64-gnu@4.57.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g=="], - "@rollup/rollup-linux-arm64-musl": ["@rollup/rollup-linux-arm64-musl@4.57.0", "", { "os": "linux", "cpu": "arm64" }, "sha512-hrKXKbX5FdaRJj7lTMusmvKbhMJSGWJ+w++4KmjiDhpTgNlhYobMvKfDoIWecy4O60K6yA4SnztGuNTQF+Lplw=="], + "@rollup/rollup-linux-arm64-musl": ["@rollup/rollup-linux-arm64-musl@4.57.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q=="], - "@rollup/rollup-linux-loong64-gnu": ["@rollup/rollup-linux-loong64-gnu@4.57.0", "", { "os": "linux", "cpu": "none" }, "sha512-6A+nccfSDGKsPm00d3xKcrsBcbqzCTAukjwWK6rbuAnB2bHaL3r9720HBVZ/no7+FhZLz/U3GwwZZEh6tOSI8Q=="], + "@rollup/rollup-linux-loong64-gnu": ["@rollup/rollup-linux-loong64-gnu@4.57.1", "", { "os": "linux", "cpu": "none" }, "sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA=="], - "@rollup/rollup-linux-loong64-musl": ["@rollup/rollup-linux-loong64-musl@4.57.0", "", { "os": "linux", "cpu": "none" }, "sha512-4P1VyYUe6XAJtQH1Hh99THxr0GKMMwIXsRNOceLrJnaHTDgk1FTcTimDgneRJPvB3LqDQxUmroBclQ1S0cIJwQ=="], + "@rollup/rollup-linux-loong64-musl": ["@rollup/rollup-linux-loong64-musl@4.57.1", "", { "os": "linux", "cpu": "none" }, "sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw=="], - "@rollup/rollup-linux-ppc64-gnu": ["@rollup/rollup-linux-ppc64-gnu@4.57.0", "", { "os": "linux", "cpu": "ppc64" }, "sha512-8Vv6pLuIZCMcgXre6c3nOPhE0gjz1+nZP6T+hwWjr7sVH8k0jRkH+XnfjjOTglyMBdSKBPPz54/y1gToSKwrSQ=="], + "@rollup/rollup-linux-ppc64-gnu": ["@rollup/rollup-linux-ppc64-gnu@4.57.1", "", { "os": "linux", "cpu": "ppc64" }, "sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w=="], - "@rollup/rollup-linux-ppc64-musl": ["@rollup/rollup-linux-ppc64-musl@4.57.0", "", { "os": "linux", "cpu": "ppc64" }, "sha512-r1te1M0Sm2TBVD/RxBPC6RZVwNqUTwJTA7w+C/IW5v9Ssu6xmxWEi+iJQlpBhtUiT1raJ5b48pI8tBvEjEFnFA=="], + "@rollup/rollup-linux-ppc64-musl": ["@rollup/rollup-linux-ppc64-musl@4.57.1", "", { "os": "linux", "cpu": "ppc64" }, "sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw=="], - "@rollup/rollup-linux-riscv64-gnu": ["@rollup/rollup-linux-riscv64-gnu@4.57.0", "", { "os": "linux", "cpu": "none" }, "sha512-say0uMU/RaPm3CDQLxUUTF2oNWL8ysvHkAjcCzV2znxBr23kFfaxocS9qJm+NdkRhF8wtdEEAJuYcLPhSPbjuQ=="], + "@rollup/rollup-linux-riscv64-gnu": ["@rollup/rollup-linux-riscv64-gnu@4.57.1", "", { "os": "linux", "cpu": "none" }, "sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A=="], - "@rollup/rollup-linux-riscv64-musl": ["@rollup/rollup-linux-riscv64-musl@4.57.0", "", { "os": "linux", "cpu": "none" }, "sha512-/MU7/HizQGsnBREtRpcSbSV1zfkoxSTR7wLsRmBPQ8FwUj5sykrP1MyJTvsxP5KBq9SyE6kH8UQQQwa0ASeoQQ=="], + "@rollup/rollup-linux-riscv64-musl": ["@rollup/rollup-linux-riscv64-musl@4.57.1", "", { "os": "linux", "cpu": "none" }, "sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw=="], - "@rollup/rollup-linux-s390x-gnu": ["@rollup/rollup-linux-s390x-gnu@4.57.0", "", { "os": "linux", "cpu": "s390x" }, "sha512-Q9eh+gUGILIHEaJf66aF6a414jQbDnn29zeu0eX3dHMuysnhTvsUvZTCAyZ6tJhUjnvzBKE4FtuaYxutxRZpOg=="], + "@rollup/rollup-linux-s390x-gnu": ["@rollup/rollup-linux-s390x-gnu@4.57.1", "", { "os": "linux", "cpu": "s390x" }, "sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg=="], - "@rollup/rollup-linux-x64-gnu": ["@rollup/rollup-linux-x64-gnu@4.57.0", "", { "os": "linux", "cpu": "x64" }, "sha512-OR5p5yG5OKSxHReWmwvM0P+VTPMwoBS45PXTMYaskKQqybkS3Kmugq1W+YbNWArF8/s7jQScgzXUhArzEQ7x0A=="], + "@rollup/rollup-linux-x64-gnu": ["@rollup/rollup-linux-x64-gnu@4.57.1", "", { "os": "linux", "cpu": "x64" }, "sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg=="], - "@rollup/rollup-linux-x64-musl": ["@rollup/rollup-linux-x64-musl@4.57.0", "", { "os": "linux", "cpu": "x64" }, "sha512-XeatKzo4lHDsVEbm1XDHZlhYZZSQYym6dg2X/Ko0kSFgio+KXLsxwJQprnR48GvdIKDOpqWqssC3iBCjoMcMpw=="], + "@rollup/rollup-linux-x64-musl": ["@rollup/rollup-linux-x64-musl@4.57.1", "", { "os": "linux", "cpu": "x64" }, "sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw=="], - "@rollup/rollup-openbsd-x64": ["@rollup/rollup-openbsd-x64@4.57.0", "", { "os": "openbsd", "cpu": "x64" }, "sha512-Lu71y78F5qOfYmubYLHPcJm74GZLU6UJ4THkf/a1K7Tz2ycwC2VUbsqbJAXaR6Bx70SRdlVrt2+n5l7F0agTUw=="], + "@rollup/rollup-openbsd-x64": ["@rollup/rollup-openbsd-x64@4.57.1", "", { "os": "openbsd", "cpu": "x64" }, "sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw=="], - "@rollup/rollup-openharmony-arm64": ["@rollup/rollup-openharmony-arm64@4.57.0", "", { "os": "none", "cpu": "arm64" }, "sha512-v5xwKDWcu7qhAEcsUubiav7r+48Uk/ENWdr82MBZZRIm7zThSxCIVDfb3ZeRRq9yqk+oIzMdDo6fCcA5DHfMyA=="], + "@rollup/rollup-openharmony-arm64": ["@rollup/rollup-openharmony-arm64@4.57.1", "", { "os": "none", "cpu": "arm64" }, "sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ=="], - "@rollup/rollup-win32-arm64-msvc": ["@rollup/rollup-win32-arm64-msvc@4.57.0", "", { "os": "win32", "cpu": "arm64" }, "sha512-XnaaaSMGSI6Wk8F4KK3QP7GfuuhjGchElsVerCplUuxRIzdvZ7hRBpLR0omCmw+kI2RFJB80nenhOoGXlJ5TfQ=="], + "@rollup/rollup-win32-arm64-msvc": ["@rollup/rollup-win32-arm64-msvc@4.57.1", "", { "os": "win32", "cpu": "arm64" }, "sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ=="], - "@rollup/rollup-win32-ia32-msvc": ["@rollup/rollup-win32-ia32-msvc@4.57.0", "", { "os": "win32", "cpu": "ia32" }, "sha512-3K1lP+3BXY4t4VihLw5MEg6IZD3ojSYzqzBG571W3kNQe4G4CcFpSUQVgurYgib5d+YaCjeFow8QivWp8vuSvA=="], + "@rollup/rollup-win32-ia32-msvc": ["@rollup/rollup-win32-ia32-msvc@4.57.1", "", { "os": "win32", "cpu": "ia32" }, "sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew=="], - "@rollup/rollup-win32-x64-gnu": ["@rollup/rollup-win32-x64-gnu@4.57.0", "", { "os": "win32", "cpu": "x64" }, "sha512-MDk610P/vJGc5L5ImE4k5s+GZT3en0KoK1MKPXCRgzmksAMk79j4h3k1IerxTNqwDLxsGxStEZVBqG0gIqZqoA=="], + "@rollup/rollup-win32-x64-gnu": ["@rollup/rollup-win32-x64-gnu@4.57.1", "", { "os": "win32", "cpu": "x64" }, "sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ=="], - "@rollup/rollup-win32-x64-msvc": ["@rollup/rollup-win32-x64-msvc@4.57.0", "", { "os": "win32", "cpu": "x64" }, "sha512-Zv7v6q6aV+VslnpwzqKAmrk5JdVkLUzok2208ZXGipjb+msxBr/fJPZyeEXiFgH7k62Ak0SLIfxQRZQvTuf7rQ=="], + "@rollup/rollup-win32-x64-msvc": ["@rollup/rollup-win32-x64-msvc@4.57.1", "", { "os": "win32", "cpu": "x64" }, "sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA=="], "@s2-dev/streamstore": ["@s2-dev/streamstore@0.17.3", "", { "dependencies": { "@protobuf-ts/runtime": "^2.11.1" }, "peerDependencies": { "typescript": "^5.9.3" } }, "sha512-UeXL5+MgZQfNkbhCgEDVm7PrV5B3bxh6Zp4C5pUzQQwaoA+iGh2QiiIptRZynWgayzRv4vh0PYfnKpTzJEXegQ=="], "@selderee/plugin-htmlparser2": ["@selderee/plugin-htmlparser2@0.11.0", "", { "dependencies": { "domhandler": "^5.0.3", "selderee": "^0.11.0" } }, "sha512-P33hHGdldxGabLFjPPpaTxVolMrzrcegejx+0GxjrIb9Zv48D8yAIA/QTDR2dFl7Uz7urX8aX6+5bCZslr+gWQ=="], - "@shikijs/core": ["@shikijs/core@3.21.0", "", { "dependencies": { "@shikijs/types": "3.21.0", "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4", "hast-util-to-html": "^9.0.5" } }, "sha512-AXSQu/2n1UIQekY8euBJlvFYZIw0PHY63jUzGbrOma4wPxzznJXTXkri+QcHeBNaFxiiOljKxxJkVSoB3PjbyA=="], + "@shikijs/core": ["@shikijs/core@3.22.0", "", { "dependencies": { "@shikijs/types": "3.22.0", "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4", "hast-util-to-html": "^9.0.5" } }, "sha512-iAlTtSDDbJiRpvgL5ugKEATDtHdUVkqgHDm/gbD2ZS9c88mx7G1zSYjjOxp5Qa0eaW0MAQosFRmJSk354PRoQA=="], - "@shikijs/engine-javascript": ["@shikijs/engine-javascript@3.21.0", "", { "dependencies": { "@shikijs/types": "3.21.0", "@shikijs/vscode-textmate": "^10.0.2", "oniguruma-to-es": "^4.3.4" } }, "sha512-ATwv86xlbmfD9n9gKRiwuPpWgPENAWCLwYCGz9ugTJlsO2kOzhOkvoyV/UD+tJ0uT7YRyD530x6ugNSffmvIiQ=="], + "@shikijs/engine-javascript": ["@shikijs/engine-javascript@3.22.0", "", { "dependencies": { "@shikijs/types": "3.22.0", "@shikijs/vscode-textmate": "^10.0.2", "oniguruma-to-es": "^4.3.4" } }, "sha512-jdKhfgW9CRtj3Tor0L7+yPwdG3CgP7W+ZEqSsojrMzCjD1e0IxIbwUMDDpYlVBlC08TACg4puwFGkZfLS+56Tw=="], - "@shikijs/engine-oniguruma": ["@shikijs/engine-oniguruma@3.21.0", "", { "dependencies": { "@shikijs/types": "3.21.0", "@shikijs/vscode-textmate": "^10.0.2" } }, "sha512-OYknTCct6qiwpQDqDdf3iedRdzj6hFlOPv5hMvI+hkWfCKs5mlJ4TXziBG9nyabLwGulrUjHiCq3xCspSzErYQ=="], + "@shikijs/engine-oniguruma": ["@shikijs/engine-oniguruma@3.22.0", "", { "dependencies": { "@shikijs/types": "3.22.0", "@shikijs/vscode-textmate": "^10.0.2" } }, "sha512-DyXsOG0vGtNtl7ygvabHd7Mt5EY8gCNqR9Y7Lpbbd/PbJvgWrqaKzH1JW6H6qFkuUa8aCxoiYVv8/YfFljiQxA=="], - "@shikijs/langs": ["@shikijs/langs@3.21.0", "", { "dependencies": { "@shikijs/types": "3.21.0" } }, "sha512-g6mn5m+Y6GBJ4wxmBYqalK9Sp0CFkUqfNzUy2pJglUginz6ZpWbaWjDB4fbQ/8SHzFjYbtU6Ddlp1pc+PPNDVA=="], + "@shikijs/langs": ["@shikijs/langs@3.22.0", "", { "dependencies": { "@shikijs/types": "3.22.0" } }, "sha512-x/42TfhWmp6H00T6uwVrdTJGKgNdFbrEdhaDwSR5fd5zhQ1Q46bHq9EO61SCEWJR0HY7z2HNDMaBZp8JRmKiIA=="], - "@shikijs/rehype": ["@shikijs/rehype@3.21.0", "", { "dependencies": { "@shikijs/types": "3.21.0", "@types/hast": "^3.0.4", "hast-util-to-string": "^3.0.1", "shiki": "3.21.0", "unified": "^11.0.5", "unist-util-visit": "^5.0.0" } }, "sha512-fTQvwsZL67QdosMFdTgQ5SNjW3nxaPplRy//312hqOctRbIwviTV0nAbhv3NfnztHXvFli2zLYNKsTz/f9tbpQ=="], + "@shikijs/rehype": ["@shikijs/rehype@3.22.0", "", { "dependencies": { "@shikijs/types": "3.22.0", "@types/hast": "^3.0.4", "hast-util-to-string": "^3.0.1", "shiki": "3.22.0", "unified": "^11.0.5", "unist-util-visit": "^5.1.0" } }, "sha512-69b2VPc6XBy/VmAJlpBU5By+bJSBdE2nvgRCZXav7zujbrjXuT0F60DIrjKuutjPqNufuizE+E8tIZr2Yn8Z+g=="], - "@shikijs/themes": ["@shikijs/themes@3.21.0", "", { "dependencies": { "@shikijs/types": "3.21.0" } }, "sha512-BAE4cr9EDiZyYzwIHEk7JTBJ9CzlPuM4PchfcA5ao1dWXb25nv6hYsoDiBq2aZK9E3dlt3WB78uI96UESD+8Mw=="], + "@shikijs/themes": ["@shikijs/themes@3.22.0", "", { "dependencies": { "@shikijs/types": "3.22.0" } }, "sha512-o+tlOKqsr6FE4+mYJG08tfCFDS+3CG20HbldXeVoyP+cYSUxDhrFf3GPjE60U55iOkkjbpY2uC3It/eeja35/g=="], - "@shikijs/transformers": ["@shikijs/transformers@3.21.0", "", { "dependencies": { "@shikijs/core": "3.21.0", "@shikijs/types": "3.21.0" } }, "sha512-CZwvCWWIiRRiFk9/JKzdEooakAP8mQDtBOQ1TKiCaS2E1bYtyBCOkUzS8akO34/7ufICQ29oeSfkb3tT5KtrhA=="], + "@shikijs/transformers": ["@shikijs/transformers@3.22.0", "", { "dependencies": { "@shikijs/core": "3.22.0", "@shikijs/types": "3.22.0" } }, "sha512-E7eRV7mwDBjueLF6852n2oYeJYxBq3NSsDk+uyruYAXONv4U8holGmIrT+mPRJQ1J1SNOH6L8G19KRzmBawrFw=="], - "@shikijs/types": ["@shikijs/types@3.21.0", "", { "dependencies": { "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-zGrWOxZ0/+0ovPY7PvBU2gIS9tmhSUUt30jAcNV0Bq0gb2S98gwfjIs1vxlmH5zM7/4YxLamT6ChlqqAJmPPjA=="], + "@shikijs/types": ["@shikijs/types@3.22.0", "", { "dependencies": { "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-491iAekgKDBFE67z70Ok5a8KBMsQ2IJwOWw3us/7ffQkIBCyOQfm/aNwVMBUriP02QshIfgHCBSIYAl3u2eWjg=="], "@shikijs/vscode-textmate": ["@shikijs/vscode-textmate@10.0.2", "", {}, "sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg=="], @@ -1258,7 +1262,7 @@ "@smithy/config-resolver": ["@smithy/config-resolver@4.4.6", "", { "dependencies": { "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "@smithy/util-config-provider": "^4.2.0", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "tslib": "^2.6.2" } }, "sha512-qJpzYC64kaj3S0fueiu3kXm8xPrR3PcXDPEgnaNMRn0EjNSZFoFjvbUp0YUDsRhN1CB90EnHJtbxWKevnH99UQ=="], - "@smithy/core": ["@smithy/core@3.22.0", "", { "dependencies": { "@smithy/middleware-serde": "^4.2.9", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-stream": "^4.5.10", "@smithy/util-utf8": "^4.2.0", "@smithy/uuid": "^1.1.0", "tslib": "^2.6.2" } }, "sha512-6vjCHD6vaY8KubeNw2Fg3EK0KLGQYdldG4fYgQmA0xSW0dJ8G2xFhSOdrlUakWVoP5JuWHtFODg3PNd/DN3FDA=="], + "@smithy/core": ["@smithy/core@3.22.1", "", { "dependencies": { "@smithy/middleware-serde": "^4.2.9", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-stream": "^4.5.11", "@smithy/util-utf8": "^4.2.0", "@smithy/uuid": "^1.1.0", "tslib": "^2.6.2" } }, "sha512-x3ie6Crr58MWrm4viHqqy2Du2rHYZjwu8BekasrQx4ca+Y24dzVAwq3yErdqIbc2G3I0kLQA13PQ+/rde+u65g=="], "@smithy/credential-provider-imds": ["@smithy/credential-provider-imds@4.2.8", "", { "dependencies": { "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "tslib": "^2.6.2" } }, "sha512-FNT0xHS1c/CPN8upqbMFP83+ul5YgdisfCfkZ86Jh2NSmnqw/AJ6x5pEogVCTVvSm7j9MopRU89bmDelxuDMYw=="], @@ -1288,9 +1292,9 @@ "@smithy/middleware-content-length": ["@smithy/middleware-content-length@4.2.8", "", { "dependencies": { "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-RO0jeoaYAB1qBRhfVyq0pMgBoUK34YEJxVxyjOWYZiOKOq2yMZ4MnVXMZCUDenpozHue207+9P5ilTV1zeda0A=="], - "@smithy/middleware-endpoint": ["@smithy/middleware-endpoint@4.4.12", "", { "dependencies": { "@smithy/core": "^3.22.0", "@smithy/middleware-serde": "^4.2.9", "@smithy/node-config-provider": "^4.3.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-middleware": "^4.2.8", "tslib": "^2.6.2" } }, "sha512-9JMKHVJtW9RysTNjcBZQHDwB0p3iTP6B1IfQV4m+uCevkVd/VuLgwfqk5cnI4RHcp4cPwoIvxQqN4B1sxeHo8Q=="], + "@smithy/middleware-endpoint": ["@smithy/middleware-endpoint@4.4.13", "", { "dependencies": { "@smithy/core": "^3.22.1", "@smithy/middleware-serde": "^4.2.9", "@smithy/node-config-provider": "^4.3.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-middleware": "^4.2.8", "tslib": "^2.6.2" } }, "sha512-x6vn0PjYmGdNuKh/juUJJewZh7MoQ46jYaJ2mvekF4EesMuFfrl4LaW/k97Zjf8PTCPQmPgMvwewg7eNoH9n5w=="], - "@smithy/middleware-retry": ["@smithy/middleware-retry@4.4.29", "", { "dependencies": { "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/service-error-classification": "^4.2.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/uuid": "^1.1.0", "tslib": "^2.6.2" } }, "sha512-bmTn75a4tmKRkC5w61yYQLb3DmxNzB8qSVu9SbTYqW6GAL0WXO2bDZuMAn/GJSbOdHEdjZvWxe+9Kk015bw6Cg=="], + "@smithy/middleware-retry": ["@smithy/middleware-retry@4.4.30", "", { "dependencies": { "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/service-error-classification": "^4.2.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/uuid": "^1.1.0", "tslib": "^2.6.2" } }, "sha512-CBGyFvN0f8hlnqKH/jckRDz78Snrp345+PVk8Ux7pnkUCW97Iinse59lY78hBt04h1GZ6hjBN94BRwZy1xC8Bg=="], "@smithy/middleware-serde": ["@smithy/middleware-serde@4.2.9", "", { "dependencies": { "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-eMNiej0u/snzDvlqRGSN3Vl0ESn3838+nKyVfF2FKNXFbi4SERYT6PR392D39iczngbqqGG0Jl1DlCnp7tBbXQ=="], @@ -1298,7 +1302,7 @@ "@smithy/node-config-provider": ["@smithy/node-config-provider@4.3.8", "", { "dependencies": { "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-aFP1ai4lrbVlWjfpAfRSL8KFcnJQYfTl5QxLJXY32vghJrDuFyPZ6LtUL+JEGYiFRG1PfPLHLoxj107ulncLIg=="], - "@smithy/node-http-handler": ["@smithy/node-http-handler@4.4.8", "", { "dependencies": { "@smithy/abort-controller": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/querystring-builder": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-q9u+MSbJVIJ1QmJ4+1u+cERXkrhuILCBDsJUBAW1MPE6sFonbCNaegFuwW9ll8kh5UdyY3jOkoOGlc7BesoLpg=="], + "@smithy/node-http-handler": ["@smithy/node-http-handler@4.4.9", "", { "dependencies": { "@smithy/abort-controller": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/querystring-builder": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-KX5Wml5mF+luxm1szW4QDz32e3NObgJ4Fyw+irhph4I/2geXwUy4jkIMUs5ZPGflRBeR6BUkC2wqIab4Llgm3w=="], "@smithy/property-provider": ["@smithy/property-provider@4.2.8", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-EtCTbyIveCKeOXDSWSdze3k612yCPq1YbXsbqX3UHhkOSW8zKsM9NOJG5gTIya0vbY2DIaieG8pKo1rITHYL0w=="], @@ -1314,7 +1318,7 @@ "@smithy/signature-v4": ["@smithy/signature-v4@5.3.8", "", { "dependencies": { "@smithy/is-array-buffer": "^4.2.0", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-hex-encoding": "^4.2.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-uri-escape": "^4.2.0", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-6A4vdGj7qKNRF16UIcO8HhHjKW27thsxYci+5r/uVRkdcBEkOEiY8OMPuydLX4QHSrJqGHPJzPRwwVTqbLZJhg=="], - "@smithy/smithy-client": ["@smithy/smithy-client@4.11.1", "", { "dependencies": { "@smithy/core": "^3.22.0", "@smithy/middleware-endpoint": "^4.4.12", "@smithy/middleware-stack": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-stream": "^4.5.10", "tslib": "^2.6.2" } }, "sha512-SERgNg5Z1U+jfR6/2xPYjSEHY1t3pyTHC/Ma3YQl6qWtmiL42bvNId3W/oMUWIwu7ekL2FMPdqAmwbQegM7HeQ=="], + "@smithy/smithy-client": ["@smithy/smithy-client@4.11.2", "", { "dependencies": { "@smithy/core": "^3.22.1", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-stack": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "@smithy/util-stream": "^4.5.11", "tslib": "^2.6.2" } }, "sha512-SCkGmFak/xC1n7hKRsUr6wOnBTJ3L22Qd4e8H1fQIuKTAjntwgU8lrdMe7uHdiT2mJAOWA/60qaW9tiMu69n1A=="], "@smithy/types": ["@smithy/types@4.12.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-9YcuJVTOBDjg9LWo23Qp0lTQ3D7fQsQtwle0jVfpbUHy9qBwCEgKuVH4FqFB3VYu0nwdHKiEMA+oXz7oV8X1kw=="], @@ -1330,9 +1334,9 @@ "@smithy/util-config-provider": ["@smithy/util-config-provider@4.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-YEjpl6XJ36FTKmD+kRJJWYvrHeUvm5ykaUS5xK+6oXffQPHeEM4/nXlZPe+Wu0lsgRUcNZiliYNh/y7q9c2y6Q=="], - "@smithy/util-defaults-mode-browser": ["@smithy/util-defaults-mode-browser@4.3.28", "", { "dependencies": { "@smithy/property-provider": "^4.2.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-/9zcatsCao9h6g18p/9vH9NIi5PSqhCkxQ/tb7pMgRFnqYp9XUOyOlGPDMHzr8n5ih6yYgwJEY2MLEobUgi47w=="], + "@smithy/util-defaults-mode-browser": ["@smithy/util-defaults-mode-browser@4.3.29", "", { "dependencies": { "@smithy/property-provider": "^4.2.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-nIGy3DNRmOjaYaaKcQDzmWsro9uxlaqUOhZDHQed9MW/GmkBZPtnU70Pu1+GT9IBmUXwRdDuiyaeiy9Xtpn3+Q=="], - "@smithy/util-defaults-mode-node": ["@smithy/util-defaults-mode-node@4.2.31", "", { "dependencies": { "@smithy/config-resolver": "^4.4.6", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-JTvoApUXA5kbpceI2vuqQzRjeTbLpx1eoa5R/YEZbTgtxvIB7AQZxFJ0SEyfCpgPCyVV9IT7we+ytSeIB3CyWA=="], + "@smithy/util-defaults-mode-node": ["@smithy/util-defaults-mode-node@4.2.32", "", { "dependencies": { "@smithy/config-resolver": "^4.4.6", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-7dtFff6pu5fsjqrVve0YMhrnzJtccCWDacNKOkiZjJ++fmjGExmmSu341x+WU6Oc1IccL7lDuaUj7SfrHpWc5Q=="], "@smithy/util-endpoints": ["@smithy/util-endpoints@3.2.8", "", { "dependencies": { "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-8JaVTn3pBDkhZgHQ8R0epwWt+BqPSLCjdjXXusK1onwJlRuN69fbvSK66aIKKO7SwVFM6x2J2ox5X8pOaWcUEw=="], @@ -1342,7 +1346,7 @@ "@smithy/util-retry": ["@smithy/util-retry@4.2.8", "", { "dependencies": { "@smithy/service-error-classification": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-CfJqwvoRY0kTGe5AkQokpURNCT1u/MkRzMTASWMPPo2hNSnKtF1D45dQl3DE2LKLr4m+PW9mCeBMJr5mCAVThg=="], - "@smithy/util-stream": ["@smithy/util-stream@4.5.10", "", { "dependencies": { "@smithy/fetch-http-handler": "^5.3.9", "@smithy/node-http-handler": "^4.4.8", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-buffer-from": "^4.2.0", "@smithy/util-hex-encoding": "^4.2.0", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-jbqemy51UFSZSp2y0ZmRfckmrzuKww95zT9BYMmuJ8v3altGcqjwoV1tzpOwuHaKrwQrCjIzOib499ymr2f98g=="], + "@smithy/util-stream": ["@smithy/util-stream@4.5.11", "", { "dependencies": { "@smithy/fetch-http-handler": "^5.3.9", "@smithy/node-http-handler": "^4.4.9", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-buffer-from": "^4.2.0", "@smithy/util-hex-encoding": "^4.2.0", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-lKmZ0S/3Qj2OF5H1+VzvDLb6kRxGzZHq6f3rAsoSu5cTLGsn3v3VQBA8czkNNXlLjoFEtVu3OQT2jEeOtOE2CA=="], "@smithy/util-uri-escape": ["@smithy/util-uri-escape@4.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-igZpCKV9+E/Mzrpq6YacdTQ0qTiLm85gD6N/IrmyDvQFA4UnU3d5g3m8tMT/6zG/vVkWSU+VxeUyGonL62DuxA=="], @@ -1530,7 +1534,7 @@ "@types/ms": ["@types/ms@2.1.0", "", {}, "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA=="], - "@types/node": ["@types/node@22.19.7", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-MciR4AKGHWl7xwxkBa6xUGxQJ4VBOmPTF7sL+iGzuahOFaO0jHCsuEfS80pan1ef4gWId1oWOweIhrDEYLuaOw=="], + "@types/node": ["@types/node@22.19.10", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-tF5VOugLS/EuDlTBijk0MqABfP8UxgYazTLo3uIn3b4yJgg26QRbVYJYsDtHrjdDUIRfP70+VfhTTc+CE1yskw=="], "@types/node-fetch": ["@types/node-fetch@2.6.13", "", { "dependencies": { "@types/node": "*", "form-data": "^4.0.4" } }, "sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw=="], @@ -1540,7 +1544,7 @@ "@types/prismjs": ["@types/prismjs@1.26.5", "", {}, "sha512-AUZTa7hQ2KY5L7AmtSiqxlhWxb4ina0yd8hNbl4TWuqnv/pFP0nDMb3YrfSBf4hJVGLh2YEIBfKaBW/9UEl6IQ=="], - "@types/react": ["@types/react@19.2.10", "", { "dependencies": { "csstype": "^3.2.2" } }, "sha512-WPigyYuGhgZ/cTPRXB2EwUw+XvsRA3GqHlsP4qteqrnnjDrApbS7MxcGr/hke5iUoeB7E/gQtrs9I37zAJ0Vjw=="], + "@types/react": ["@types/react@19.2.13", "", { "dependencies": { "csstype": "^3.2.2" } }, "sha512-KkiJeU6VbYbUOp5ITMIc7kBfqlYkKA5KhEHVrGMmUUMt7NeaZg65ojdPk+FtNrBAOXNVM5QM72jnADjM+XVRAQ=="], "@types/react-dom": ["@types/react-dom@19.2.3", "", { "peerDependencies": { "@types/react": "^19.2.0" } }, "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ=="], @@ -1574,7 +1578,7 @@ "@types/yauzl": ["@types/yauzl@2.10.3", "", { "dependencies": { "@types/node": "*" } }, "sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q=="], - "@typespec/ts-http-runtime": ["@typespec/ts-http-runtime@0.3.2", "", { "dependencies": { "http-proxy-agent": "^7.0.0", "https-proxy-agent": "^7.0.0", "tslib": "^2.6.2" } }, "sha512-IlqQ/Gv22xUC1r/WQm4StLkYQmaaTsXAhUVsNE0+xiyf0yRFiH5++q78U3bw6bLKDCTmh0uqKB9eG9+Bt75Dkg=="], + "@typespec/ts-http-runtime": ["@typespec/ts-http-runtime@0.3.3", "", { "dependencies": { "http-proxy-agent": "^7.0.0", "https-proxy-agent": "^7.0.0", "tslib": "^2.6.2" } }, "sha512-91fp6CAAJSRtH5ja95T1FHSKa8aPW9/Zw6cta81jlZTUw/+Vq8jM/AfF/14h2b71wwR84JUTW/3Y8QPhDAawFA=="], "@ungap/structured-clone": ["@ungap/structured-clone@1.3.0", "", {}, "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g=="], @@ -1624,7 +1628,7 @@ "agentkeepalive": ["agentkeepalive@4.6.0", "", { "dependencies": { "humanize-ms": "^1.2.1" } }, "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ=="], - "ai": ["ai@5.0.123", "", { "dependencies": { "@ai-sdk/gateway": "2.0.29", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-V3Imb0tg0pHCa6a/VsoW/FZpT07mwUw/4Hj6nexJC1Nvf1eyKQJyaYVkl+YTLnA8cKQSUkoarKhXWbFy4CSgjw=="], + "ai": ["ai@5.0.129", "", { "dependencies": { "@ai-sdk/gateway": "2.0.35", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-IARdFetNTedDfqpByNMm9p0oHj7JS+SpOrbgLdQdyCiDe70Xk07wnKP4Lub1ckCrxkhAxY3yxOHllGEjbpXgpQ=="], "ajv": ["ajv@6.12.6", "", { "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" } }, "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g=="], @@ -1662,7 +1666,7 @@ "ast-types": ["ast-types@0.13.4", "", { "dependencies": { "tslib": "^2.0.1" } }, "sha512-x1FCFnFifvYDDzTaLII71vG5uvDwgtmDTEVWAxrgeiR8VjMONcCXJx7E+USjDtHlwFmt9MysbqgF9b9Vjr6w+w=="], - "ast-v8-to-istanbul": ["ast-v8-to-istanbul@0.3.10", "", { "dependencies": { "@jridgewell/trace-mapping": "^0.3.31", "estree-walker": "^3.0.3", "js-tokens": "^9.0.1" } }, "sha512-p4K7vMz2ZSk3wN8l5o3y2bJAoZXT3VuJI5OLTATY/01CYWumWvwkUw0SqDBnNq6IiTO3qDa1eSQDibAV8g7XOQ=="], + "ast-v8-to-istanbul": ["ast-v8-to-istanbul@0.3.11", "", { "dependencies": { "@jridgewell/trace-mapping": "^0.3.31", "estree-walker": "^3.0.3", "js-tokens": "^10.0.0" } }, "sha512-Qya9fkoofMjCBNVdWINMjB5KZvkYfaO9/anwkWnjxibpWUxo5iHl2sOdP7/uAqaRuUYuoo8rDwnbaaKVFxoUvw=="], "astring": ["astring@1.9.0", "", { "bin": { "astring": "bin/astring" } }, "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg=="], @@ -1676,7 +1680,7 @@ "aws-ssl-profiles": ["aws-ssl-profiles@1.1.2", "", {}, "sha512-NZKeq9AfyQvEeNlN0zSYAaWrmBffJh3IELMZfRpJVWgrpEbtEpnjvzqBPf+mxoI287JohRDoa+/nsfqqiZmF6g=="], - "axios": ["axios@1.13.4", "", { "dependencies": { "follow-redirects": "^1.15.6", "form-data": "^4.0.4", "proxy-from-env": "^1.1.0" } }, "sha512-1wVkUaAO6WyaYtCkcYCOx12ZgpGf9Zif+qXa4n+oYzK558YryKqiL6UWwd5DqiH3VRW0GYhTZQ/vlgJrCoNQlg=="], + "axios": ["axios@1.13.5", "", { "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", "proxy-from-env": "^1.1.0" } }, "sha512-cz4ur7Vb0xS4/KUN0tPWe44eqxrIu31me+fbang3ijiNscE129POzipJJA6zniq2C/Z6sJCjMimjS8Lc/GAs8Q=="], "b4a": ["b4a@1.7.3", "", { "peerDependencies": { "react-native-b4a": "*" }, "optionalPeers": ["react-native-b4a"] }, "sha512-5Q2mfq2WfGuFp3uS//0s6baOJLMoVduPYVeNmDYxu5OUA1/cBfvr2RIS7vi62LdNj/urk1hfmj867I3qt6uZ7Q=="], @@ -1728,7 +1732,7 @@ "boolbase": ["boolbase@1.0.0", "", {}, "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww=="], - "bowser": ["bowser@2.13.1", "", {}, "sha512-OHawaAbjwx6rqICCKgSG0SAnT05bzd7ppyKLVUITZpANBaaMFBAsaNkto3LoQ31tyFP5kNujE8Cdx85G9VzOkw=="], + "bowser": ["bowser@2.14.1", "", {}, "sha512-tzPjzCxygAKWFOJP011oxFHs57HzIhOEracIgAePE4pqB3LikALKnSzUyU4MGs9/iCEUuHlAJTjTc5M+u7YEGg=="], "brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="], @@ -1770,7 +1774,7 @@ "camelize": ["camelize@1.0.1", "", {}, "sha512-dU+Tx2fsypxTgtLoE36npi3UqcjSSMNYfkqgmoEhtZrraP5VWq0K7FkWVTYa8eMPtnU/G2txVsfdCJTn9uzpuQ=="], - "caniuse-lite": ["caniuse-lite@1.0.30001766", "", {}, "sha512-4C0lfJ0/YPjJQHagaE9x2Elb69CIqEPZeG0anQt9SIvIoOH4a4uaRl73IavyO+0qZh6MDLH//DrXThEYKHkmYA=="], + "caniuse-lite": ["caniuse-lite@1.0.30001769", "", {}, "sha512-BCfFL1sHijQlBGWBMuJyhZUhzo7wer5sVj9hqekB/7xn0Ypy+pER/edCYQm4exbXj4WiySGp40P8UuTh6w1srg=="], "caseless": ["caseless@0.12.0", "", {}, "sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw=="], @@ -2024,7 +2028,7 @@ "dotenv-expand": ["dotenv-expand@10.0.0", "", {}, "sha512-GopVGCpVS1UKH75VKHGuQFqS1Gusej0z4FyQkPdwjil2gNIv+LNsqBlboOzpJFZKVT95GkCyWJbBSdFEFUWI2A=="], - "drizzle-kit": ["drizzle-kit@0.31.8", "", { "dependencies": { "@drizzle-team/brocli": "^0.10.2", "@esbuild-kit/esm-loader": "^2.5.5", "esbuild": "^0.25.4", "esbuild-register": "^3.5.0" }, "bin": { "drizzle-kit": "bin.cjs" } }, "sha512-O9EC/miwdnRDY10qRxM8P3Pg8hXe3LyU4ZipReKOgTwn4OqANmftj8XJz1UPUAS6NMHf0E2htjsbQujUTkncCg=="], + "drizzle-kit": ["drizzle-kit@0.31.9", "", { "dependencies": { "@drizzle-team/brocli": "^0.10.2", "@esbuild-kit/esm-loader": "^2.5.5", "esbuild": "^0.25.4", "esbuild-register": "^3.5.0" }, "bin": { "drizzle-kit": "bin.cjs" } }, "sha512-GViD3IgsXn7trFyBUUHyTFBpH/FsHTxYJ66qdbVggxef4UBPHRYxQaRzYLTuekYnk9i5FIEL9pbBIwMqX/Uwrg=="], "drizzle-orm": ["drizzle-orm@0.44.7", "", { "peerDependencies": { "@aws-sdk/client-rds-data": ">=3", "@cloudflare/workers-types": ">=4", "@electric-sql/pglite": ">=0.2.0", "@libsql/client": ">=0.10.0", "@libsql/client-wasm": ">=0.10.0", "@neondatabase/serverless": ">=0.10.0", "@op-engineering/op-sqlite": ">=2", "@opentelemetry/api": "^1.4.1", "@planetscale/database": ">=1.13", "@prisma/client": "*", "@tidbcloud/serverless": "*", "@types/better-sqlite3": "*", "@types/pg": "*", "@types/sql.js": "*", "@upstash/redis": ">=1.34.7", "@vercel/postgres": ">=0.8.0", "@xata.io/client": "*", "better-sqlite3": ">=7", "bun-types": "*", "expo-sqlite": ">=14.0.0", "gel": ">=2", "knex": "*", "kysely": "*", "mysql2": ">=2", "pg": ">=8", "postgres": ">=3", "sql.js": ">=1", "sqlite3": ">=5" }, "optionalPeers": ["@aws-sdk/client-rds-data", "@cloudflare/workers-types", "@electric-sql/pglite", "@libsql/client", "@libsql/client-wasm", "@neondatabase/serverless", "@op-engineering/op-sqlite", "@opentelemetry/api", "@planetscale/database", "@prisma/client", "@tidbcloud/serverless", "@types/better-sqlite3", "@types/pg", "@types/sql.js", "@upstash/redis", "@vercel/postgres", "@xata.io/client", "better-sqlite3", "bun-types", "expo-sqlite", "gel", "knex", "kysely", "mysql2", "pg", "postgres", "sql.js", "sqlite3"] }, "sha512-quIpnYznjU9lHshEOAYLoZ9s3jweleHlZIAWR/jX9gAWNg/JhQ1wj0KGRf7/Zm+obRrYd9GjPVJg790QY9N5AQ=="], @@ -2032,7 +2036,7 @@ "dunder-proto": ["dunder-proto@1.0.1", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" } }, "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A=="], - "e2b": ["e2b@2.12.0", "", { "dependencies": { "@bufbuild/protobuf": "^2.6.2", "@connectrpc/connect": "2.0.0-rc.3", "@connectrpc/connect-web": "2.0.0-rc.3", "chalk": "^5.3.0", "compare-versions": "^6.1.0", "dockerfile-ast": "^0.7.1", "glob": "^11.1.0", "openapi-fetch": "^0.14.1", "platform": "^1.3.6", "tar": "^7.5.4" } }, "sha512-uzMEg11JQ6o90ODBUgPaQXKJ3tQNiQMAYi5yU5jK60Y0l+CSs7U8qoQcgTiSCemkIEyrmIDFub/ega8dv5vMCw=="], + "e2b": ["e2b@2.12.1", "", { "dependencies": { "@bufbuild/protobuf": "^2.6.2", "@connectrpc/connect": "2.0.0-rc.3", "@connectrpc/connect-web": "2.0.0-rc.3", "chalk": "^5.3.0", "compare-versions": "^6.1.0", "dockerfile-ast": "^0.7.1", "glob": "^11.1.0", "openapi-fetch": "^0.14.1", "platform": "^1.3.6", "tar": "^7.5.4" } }, "sha512-qKYwS0VSZqvtWAT4OrCtOwRhhMlcd359zyFRGAZZ1wpYHHjr9zR872UCoDb/d5jFVUsREcUgktURc47XxfznPg=="], "eastasianwidth": ["eastasianwidth@0.2.0", "", {}, "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="], @@ -2042,7 +2046,7 @@ "effect": ["effect@3.18.4", "", { "dependencies": { "@standard-schema/spec": "^1.0.0", "fast-check": "^3.23.1" } }, "sha512-b1LXQJLe9D11wfnOKAk3PKxuqYshQ0Heez+y5pnkd3jLj1yx9QhM72zZ9uUrOQyNvrs2GZZd/3maL0ZV18YuDA=="], - "electron-to-chromium": ["electron-to-chromium@1.5.282", "", {}, "sha512-FCPkJtpst28UmFzd903iU7PdeVTfY0KAeJy+Lk0GLZRwgwYHn/irRcaCbQQOmr5Vytc/7rcavsYLvTM8RiHYhQ=="], + "electron-to-chromium": ["electron-to-chromium@1.5.286", "", {}, "sha512-9tfDXhJ4RKFNerfjdCcZfufu49vg620741MNs26a9+bhLThdB+plgMeou98CAaHu/WATj2iHOOHTp1hWtABj2A=="], "emoji-regex": ["emoji-regex@10.6.0", "", {}, "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A=="], @@ -2064,7 +2068,7 @@ "engine.io-parser": ["engine.io-parser@5.2.3", "", {}, "sha512-HqD3yTBfnBxIrbnM1DoD6Pcq8NECnh8d4As1Qgh0z5Gg3jRRIqijury0CL3ghu/edArpUYiYqQiDUQBIs4np3Q=="], - "enhanced-resolve": ["enhanced-resolve@5.18.4", "", { "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" } }, "sha512-LgQMM4WXU3QI+SYgEc2liRgznaD5ojbmY3sb8LxyguVkIg5FxdpTkvk72te2R38/TGKxH634oLxXRGY6d7AP+Q=="], + "enhanced-resolve": ["enhanced-resolve@5.19.0", "", { "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.3.0" } }, "sha512-phv3E1Xl4tQOShqSte26C7Fl84EwUdZsyOuSSk9qtAGyyQs2s3jJzComh+Abf4g187lUUAvH+H26omrqia2aGg=="], "entities": ["entities@6.0.1", "", {}, "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g=="], @@ -2172,7 +2176,7 @@ "fast-safe-stringify": ["fast-safe-stringify@2.1.1", "", {}, "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA=="], - "fast-xml-parser": ["fast-xml-parser@5.3.3", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-2O3dkPAAC6JavuMm8+4+pgTk+5hoAs+CjZ+sWcQLkX9+/tHRuTkQh/Oaifr8qDmZ8iEHb771Ea6G8CdwkrgvYA=="], + "fast-xml-parser": ["fast-xml-parser@5.3.5", "", { "dependencies": { "strnum": "^2.1.2" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-JeaA2Vm9ffQKp9VjvfzObuMCjUYAp5WDYhRYL5LrBPY/jUDlUtOvDfot0vKSkB9tuX885BDHjtw4fZadD95wnA=="], "fastq": ["fastq@1.20.1", "", { "dependencies": { "reusify": "^1.0.4" } }, "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw=="], @@ -2216,7 +2220,7 @@ "fraction.js": ["fraction.js@4.3.7", "", {}, "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew=="], - "framer-motion": ["framer-motion@12.29.2", "", { "dependencies": { "motion-dom": "^12.29.2", "motion-utils": "^12.29.2", "tslib": "^2.4.0" }, "peerDependencies": { "@emotion/is-prop-valid": "*", "react": "^18.0.0 || ^19.0.0", "react-dom": "^18.0.0 || ^19.0.0" }, "optionalPeers": ["@emotion/is-prop-valid", "react", "react-dom"] }, "sha512-lSNRzBJk4wuIy0emYQ/nfZ7eWhqud2umPKw2QAQki6uKhZPKm2hRQHeQoHTG9MIvfobb+A/LbEWPJU794ZUKrg=="], + "framer-motion": ["framer-motion@12.34.0", "", { "dependencies": { "motion-dom": "^12.34.0", "motion-utils": "^12.29.2", "tslib": "^2.4.0" }, "peerDependencies": { "@emotion/is-prop-valid": "*", "react": "^18.0.0 || ^19.0.0", "react-dom": "^18.0.0 || ^19.0.0" }, "optionalPeers": ["@emotion/is-prop-valid", "react", "react-dom"] }, "sha512-+/H49owhzkzQyxtn7nZeF4kdH++I2FWrESQ184Zbcw5cEqNHYkE5yxWxcTLSj5lNx3NWdbIRy5FHqUvetD8FWg=="], "fresh": ["fresh@2.0.0", "", {}, "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A=="], @@ -2252,7 +2256,7 @@ "get-stream": ["get-stream@8.0.1", "", {}, "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA=="], - "get-tsconfig": ["get-tsconfig@4.13.0", "", { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, "sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ=="], + "get-tsconfig": ["get-tsconfig@4.13.6", "", { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, "sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw=="], "get-uri": ["get-uri@6.0.5", "", { "dependencies": { "basic-ftp": "^5.0.2", "data-uri-to-buffer": "^6.0.2", "debug": "^4.3.4" } }, "sha512-b1O07XYq8eRuVzBNgJLstU6FYc1tS6wnMtF1I1D9lE8LxZSOGZ7LhxN54yPP6mGw5f2CkXY2BQUL9Fx41qvcIg=="], @@ -2436,7 +2440,7 @@ "istanbul-reports": ["istanbul-reports@3.2.0", "", { "dependencies": { "html-escaper": "^2.0.0", "istanbul-lib-report": "^3.0.0" } }, "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA=="], - "jackspeak": ["jackspeak@4.1.1", "", { "dependencies": { "@isaacs/cliui": "^8.0.2" } }, "sha512-zptv57P3GpL+O0I7VdMJNBZCu+BPHVQUk55Ft8/QCJjTVxrnJHuVuX/0Bl2A6/+2oyR/ZMEuFKwmzqqZ/U5nPQ=="], + "jackspeak": ["jackspeak@4.2.3", "", { "dependencies": { "@isaacs/cliui": "^9.0.0" } }, "sha512-ykkVRwrYvFm1nb2AJfKKYPr0emF6IiXDYUaFx4Zn9ZuIH7MrzEZ3sD5RlqGXNRpHtvUHJyOnCEFxOlNDtGo7wg=="], "jaeger-client": ["jaeger-client@3.19.0", "", { "dependencies": { "node-int64": "^0.4.0", "opentracing": "^0.14.4", "thriftrw": "^3.5.0", "uuid": "^8.3.2", "xorshift": "^1.1.1" } }, "sha512-M0c7cKHmdyEUtjemnJyx/y9uX16XHocL46yQvyqDlPdvAcwPDbHrIbKjQdBqtiE4apQ/9dmr+ZLJYYPGnurgpw=="], @@ -2448,7 +2452,7 @@ "js-tiktoken": ["js-tiktoken@1.0.21", "", { "dependencies": { "base64-js": "^1.5.1" } }, "sha512-biOj/6M5qdgx5TKjDnFT1ymSpM5tbd3ylwDtrQvFQSu0Z7bBYko2dF+W/aUkXUPuk6IVpRxk/3Q2sHOzGlS36g=="], - "js-tokens": ["js-tokens@9.0.1", "", {}, "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ=="], + "js-tokens": ["js-tokens@10.0.0", "", {}, "sha512-lM/UBzQmfJRo9ABXbPWemivdCW8V2G8FHaHdypQaIy523snUjog0W71ayWXTjiR+ixeMyVHN2XcpnTd/liPg/Q=="], "js-yaml": ["js-yaml@4.1.0", "", { "dependencies": { "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" } }, "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA=="], @@ -2480,7 +2484,7 @@ "kleur": ["kleur@3.0.3", "", {}, "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w=="], - "kysely": ["kysely@0.28.10", "", {}, "sha512-ksNxfzIW77OcZ+QWSAPC7yDqUSaIVwkTWnTPNiIy//vifNbwsSgQ57OkkncHxxpcBHM3LRfLAZVEh7kjq5twVA=="], + "kysely": ["kysely@0.28.11", "", {}, "sha512-zpGIFg0HuoC893rIjYX1BETkVWdDnzTzF5e0kWXJFg5lE0k1/LfNWBejrcnOFu8Q2Rfq/hTDTU7XLUM8QOrpzg=="], "langsmith": ["langsmith@0.3.87", "", { "dependencies": { "@types/uuid": "^10.0.0", "chalk": "^4.1.2", "console-table-printer": "^2.12.1", "p-queue": "^6.6.2", "semver": "^7.6.3", "uuid": "^10.0.0" }, "peerDependencies": { "@opentelemetry/api": "*", "@opentelemetry/exporter-trace-otlp-proto": "*", "@opentelemetry/sdk-trace-base": "*", "openai": "*" }, "optionalPeers": ["@opentelemetry/api", "@opentelemetry/exporter-trace-otlp-proto", "@opentelemetry/sdk-trace-base", "openai"] }, "sha512-XXR1+9INH8YX96FKWc5tie0QixWz6tOqAsAKfcJyPkE0xPep+NDz0IQLR32q4bn10QK3LqD2HN6T3n6z1YLW7Q=="], @@ -2568,7 +2572,7 @@ "lru-cache": ["lru-cache@11.2.5", "", {}, "sha512-vFrFJkWtJvJnD5hg+hJvVE8Lh/TcMzKnTgCWmtBipwI5yLX/iX+5UB2tfuyODF5E7k9xEzMdYgGqaSb1c0c5Yw=="], - "lru.min": ["lru.min@1.1.3", "", {}, "sha512-Lkk/vx6ak3rYkRR0Nhu4lFUT2VDnQSxBe8Hbl7f36358p6ow8Bnvr8lrLt98H8J1aGxfhbX4Fs5tYg2+FTwr5Q=="], + "lru.min": ["lru.min@1.1.4", "", {}, "sha512-DqC6n3QQ77zdFpCMASA1a3Jlb64Hv2N2DciFGkO/4L9+q/IpIAuRlKOvCXabtRW6cQf8usbmM6BE/TOPysCdIA=="], "lucide-react": ["lucide-react@0.511.0", "", { "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-VK5a2ydJ7xm8GvBeKLS9mu1pVK6ucef9780JVUjw6bAjJL/QXnd4Y0p7SPeOUMC27YhzNCZvm5d/QX0Tp3rc0w=="], @@ -2728,7 +2732,7 @@ "minimal-polyfills": ["minimal-polyfills@2.2.3", "", {}, "sha512-oxdmJ9cL+xV72h0xYxp4tP2d5/fTBpP45H8DIOn9pASuF8a3IYTf+25fMGDYGiWW+MFsuog6KD6nfmhZJQ+uUw=="], - "minimatch": ["minimatch@10.1.1", "", { "dependencies": { "@isaacs/brace-expansion": "^5.0.0" } }, "sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ=="], + "minimatch": ["minimatch@10.1.2", "", { "dependencies": { "@isaacs/brace-expansion": "^5.0.1" } }, "sha512-fu656aJ0n2kcXwsnwnv9g24tkU5uSmOlTjd6WyyaKm2Z+h1qmY6bAjrcaIxF/BslFqbZ8UBtbJi7KgQOZD2PTw=="], "minimist": ["minimist@1.2.8", "", {}, "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA=="], @@ -2750,7 +2754,7 @@ "mongodb-connection-string-url": ["mongodb-connection-string-url@3.0.2", "", { "dependencies": { "@types/whatwg-url": "^11.0.2", "whatwg-url": "^14.1.0 || ^13.0.0" } }, "sha512-rMO7CGo/9BFwyZABcKAWL8UJwH/Kc2x0g72uhDWzG48URRax5TCIcJ7Rc3RZqffZzO/Gwff/jyKwCU9TN8gehA=="], - "motion-dom": ["motion-dom@12.29.2", "", { "dependencies": { "motion-utils": "^12.29.2" } }, "sha512-/k+NuycVV8pykxyiTCoFzIVLA95Nb1BFIVvfSu9L50/6K6qNeAYtkxXILy/LRutt7AzaYDc2myj0wkCVVYAPPA=="], + "motion-dom": ["motion-dom@12.34.0", "", { "dependencies": { "motion-utils": "^12.29.2" } }, "sha512-Lql3NuEcScRDxTAO6GgUsRHBZOWI/3fnMlkMcH5NftzcN37zJta+bpbMAV9px4Nj057TuvRooMK7QrzMCgtz6Q=="], "motion-utils": ["motion-utils@12.29.2", "", {}, "sha512-G3kc34H2cX2gI63RqU+cZq+zWRRPSsNIOjpdl9TN4AQwC4sgwYPl/Q/Obf/d53nOm569T0fYK+tcoSV50BWx8A=="], @@ -2810,6 +2814,8 @@ "node-int64": ["node-int64@0.4.0", "", {}, "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw=="], + "node-readable-to-web-readable-stream": ["node-readable-to-web-readable-stream@0.4.2", "", {}, "sha512-/cMZNI34v//jUTrI+UIo4ieHAB5EZRY/+7OmXZgBxaWBMcW2tGdceIw06RFxWxrKZ5Jp3sI2i5TsRo+CBhtVLQ=="], + "node-releases": ["node-releases@2.0.27", "", {}, "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA=="], "node-rsa": ["node-rsa@1.1.1", "", { "dependencies": { "asn1": "^0.2.4" } }, "sha512-Jd4cvbJMryN21r5HgxQOpMEqv+ooke/korixNNK3mGqfGJmy0M77WDDzo/05969+OkMy3XW1UuZsSmW9KQm7Fw=="], @@ -2926,7 +2932,7 @@ "pdf-lib": ["pdf-lib@1.17.1", "", { "dependencies": { "@pdf-lib/standard-fonts": "^1.0.0", "@pdf-lib/upng": "^1.0.1", "pako": "^1.0.11", "tslib": "^1.11.1" } }, "sha512-V/mpyJAoTsN4cnP31vc0wfNA1+p20evqqnap0KLoRUN0Yk/p3wN52DOEsL4oBFcLdb76hlpKPtzJIgo67j/XLw=="], - "pdfjs-dist": ["pdfjs-dist@5.4.530", "", { "optionalDependencies": { "@napi-rs/canvas": "^0.1.84" } }, "sha512-r1hWsSIGGmyYUAHR26zSXkxYWLXLMd6AwqcaFYG9YUZ0GBf5GvcjJSeo512tabM4GYFhxhl5pMCmPr7Q72Rq2Q=="], + "pdfjs-dist": ["pdfjs-dist@5.4.624", "", { "optionalDependencies": { "@napi-rs/canvas": "^0.1.88", "node-readable-to-web-readable-stream": "^0.4.2" } }, "sha512-sm6TxKTtWv1Oh6n3C6J6a8odejb5uO4A4zo/2dgkHuC0iu8ZMAXOezEODkVaoVp8nX1Xzr+0WxFJJmUr45hQzg=="], "peberminta": ["peberminta@0.9.0", "", {}, "sha512-XIxfHpEuSJbITd1H3EeQwpcZbTLHc+VVr8ANI9t5sit565tsI4/xK3KWTUFE2e6QiangUkh3B0jihzmGnNrRsQ=="], @@ -2960,9 +2966,9 @@ "platform": ["platform@1.3.6", "", {}, "sha512-fnWVljUchTro6RiCFvCXBbNhJc2NijN7oIQxbwsyL0buWJPG85v81ehlHI9fXrJsMNgTofEoWIQeClKpgxFLrg=="], - "playwright": ["playwright@1.58.0", "", { "dependencies": { "playwright-core": "1.58.0" }, "optionalDependencies": { "fsevents": "2.3.2" }, "bin": { "playwright": "cli.js" } }, "sha512-2SVA0sbPktiIY/MCOPX8e86ehA/e+tDNq+e5Y8qjKYti2Z/JG7xnronT/TXTIkKbYGWlCbuucZ6dziEgkoEjQQ=="], + "playwright": ["playwright@1.58.2", "", { "dependencies": { "playwright-core": "1.58.2" }, "optionalDependencies": { "fsevents": "2.3.2" }, "bin": { "playwright": "cli.js" } }, "sha512-vA30H8Nvkq/cPBnNw4Q8TWz1EJyqgpuinBcHET0YVJVFldr8JDNiU9LaWAE1KqSkRYazuaBhTpB5ZzShOezQ6A=="], - "playwright-core": ["playwright-core@1.58.0", "", { "bin": { "playwright-core": "cli.js" } }, "sha512-aaoB1RWrdNi3//rOeKuMiS65UCcgOVljU46At6eFcOFPFHWtd2weHRRow6z/n+Lec0Lvu0k9ZPKJSjPugikirw=="], + "playwright-core": ["playwright-core@1.58.2", "", { "bin": { "playwright-core": "cli.js" } }, "sha512-yZkEtftgwS8CsfYo7nm0KE8jsvm6i/PTgVtB8DL726wNf6H2IMsDuxCpJj59KDaxCtSnrWan2AeDqM7JBaultg=="], "postcss": ["postcss@8.5.6", "", { "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" } }, "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg=="], @@ -2986,7 +2992,7 @@ "posthog-node": ["posthog-node@5.9.2", "", { "dependencies": { "@posthog/core": "1.2.2" } }, "sha512-oU7FbFcH5cn40nhP04cBeT67zE76EiGWjKKzDvm6IOm5P83sqM0Ij0wMJQSHp+QI6ZN7MLzb+4xfMPUEZ4q6CA=="], - "preact": ["preact@10.28.2", "", {}, "sha512-lbteaWGzGHdlIuiJ0l2Jq454m6kcpI1zNje6d8MlGAFlYvP2GO4ibnat7P74Esfz4sPTdM6UxtTwh/d3pwM9JA=="], + "preact": ["preact@10.28.3", "", {}, "sha512-tCmoRkPQLpBeWzpmbhryairGnhW9tKV6c6gr/w+RhoRoKEJwsjzipwp//1oCpGPOchvSLaAPlpcJi9MwMmoPyA=="], "prebuild-install": ["prebuild-install@7.1.3", "", { "dependencies": { "detect-libc": "^2.0.0", "expand-template": "^2.0.3", "github-from-package": "0.0.0", "minimist": "^1.2.3", "mkdirp-classic": "^0.5.3", "napi-build-utils": "^2.0.0", "node-abi": "^3.3.0", "pump": "^3.0.0", "rc": "^1.2.7", "simple-get": "^4.0.0", "tar-fs": "^2.0.0", "tunnel-agent": "^0.6.0" }, "bin": { "prebuild-install": "bin.js" } }, "sha512-8Mf2cbV7x1cXPUILADGI3wuhfqWvtiLA1iclTDbFRZkgRQS0NqsPZphna9V+HyTEadheuPmjaJMsbzKQFOzLug=="], @@ -3146,7 +3152,7 @@ "rimraf": ["rimraf@5.0.10", "", { "dependencies": { "glob": "^10.3.7" }, "bin": { "rimraf": "dist/esm/bin.mjs" } }, "sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ=="], - "rollup": ["rollup@4.57.0", "", { "dependencies": { "@types/estree": "1.0.8" }, "optionalDependencies": { "@rollup/rollup-android-arm-eabi": "4.57.0", "@rollup/rollup-android-arm64": "4.57.0", "@rollup/rollup-darwin-arm64": "4.57.0", "@rollup/rollup-darwin-x64": "4.57.0", "@rollup/rollup-freebsd-arm64": "4.57.0", "@rollup/rollup-freebsd-x64": "4.57.0", "@rollup/rollup-linux-arm-gnueabihf": "4.57.0", "@rollup/rollup-linux-arm-musleabihf": "4.57.0", "@rollup/rollup-linux-arm64-gnu": "4.57.0", "@rollup/rollup-linux-arm64-musl": "4.57.0", "@rollup/rollup-linux-loong64-gnu": "4.57.0", "@rollup/rollup-linux-loong64-musl": "4.57.0", "@rollup/rollup-linux-ppc64-gnu": "4.57.0", "@rollup/rollup-linux-ppc64-musl": "4.57.0", "@rollup/rollup-linux-riscv64-gnu": "4.57.0", "@rollup/rollup-linux-riscv64-musl": "4.57.0", "@rollup/rollup-linux-s390x-gnu": "4.57.0", "@rollup/rollup-linux-x64-gnu": "4.57.0", "@rollup/rollup-linux-x64-musl": "4.57.0", "@rollup/rollup-openbsd-x64": "4.57.0", "@rollup/rollup-openharmony-arm64": "4.57.0", "@rollup/rollup-win32-arm64-msvc": "4.57.0", "@rollup/rollup-win32-ia32-msvc": "4.57.0", "@rollup/rollup-win32-x64-gnu": "4.57.0", "@rollup/rollup-win32-x64-msvc": "4.57.0", "fsevents": "~2.3.2" }, "bin": { "rollup": "dist/bin/rollup" } }, "sha512-e5lPJi/aui4TO1LpAXIRLySmwXSE8k3b9zoGfd42p67wzxog4WHjiZF3M2uheQih4DGyc25QEV4yRBbpueNiUA=="], + "rollup": ["rollup@4.57.1", "", { "dependencies": { "@types/estree": "1.0.8" }, "optionalDependencies": { "@rollup/rollup-android-arm-eabi": "4.57.1", "@rollup/rollup-android-arm64": "4.57.1", "@rollup/rollup-darwin-arm64": "4.57.1", "@rollup/rollup-darwin-x64": "4.57.1", "@rollup/rollup-freebsd-arm64": "4.57.1", "@rollup/rollup-freebsd-x64": "4.57.1", "@rollup/rollup-linux-arm-gnueabihf": "4.57.1", "@rollup/rollup-linux-arm-musleabihf": "4.57.1", "@rollup/rollup-linux-arm64-gnu": "4.57.1", "@rollup/rollup-linux-arm64-musl": "4.57.1", "@rollup/rollup-linux-loong64-gnu": "4.57.1", "@rollup/rollup-linux-loong64-musl": "4.57.1", "@rollup/rollup-linux-ppc64-gnu": "4.57.1", "@rollup/rollup-linux-ppc64-musl": "4.57.1", "@rollup/rollup-linux-riscv64-gnu": "4.57.1", "@rollup/rollup-linux-riscv64-musl": "4.57.1", "@rollup/rollup-linux-s390x-gnu": "4.57.1", "@rollup/rollup-linux-x64-gnu": "4.57.1", "@rollup/rollup-linux-x64-musl": "4.57.1", "@rollup/rollup-openbsd-x64": "4.57.1", "@rollup/rollup-openharmony-arm64": "4.57.1", "@rollup/rollup-win32-arm64-msvc": "4.57.1", "@rollup/rollup-win32-ia32-msvc": "4.57.1", "@rollup/rollup-win32-x64-gnu": "4.57.1", "@rollup/rollup-win32-x64-msvc": "4.57.1", "fsevents": "~2.3.2" }, "bin": { "rollup": "dist/bin/rollup" } }, "sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A=="], "rou3": ["rou3@0.5.1", "", {}, "sha512-OXMmJ3zRk2xeXFGfA3K+EOPHC5u7RDFG7lIOx0X1pdnhUkI8MdVrbV+sNsD80ElpUZ+MRHdyxPnFthq9VHs8uQ=="], @@ -3190,7 +3196,7 @@ "selderee": ["selderee@0.11.0", "", { "dependencies": { "parseley": "^0.12.0" } }, "sha512-5TF+l7p4+OsnP8BCCvSyZiSPc4x4//p5uPwK8TCnVPJYRmU2aYKMpOXvw8zM5a5JvuuCGN1jmsMwuU2W02ukfA=="], - "semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="], + "semver": ["semver@7.7.4", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA=="], "send": ["send@1.2.1", "", { "dependencies": { "debug": "^4.4.3", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "etag": "^1.8.1", "fresh": "^2.0.0", "http-errors": "^2.0.1", "mime-types": "^3.0.2", "ms": "^2.1.3", "on-finished": "^2.4.1", "range-parser": "^1.2.1", "statuses": "^2.0.2" } }, "sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ=="], @@ -3212,7 +3218,7 @@ "shell-quote": ["shell-quote@1.8.3", "", {}, "sha512-ObmnIF4hXNg1BqhnHmgbDETF8dLPCggZWBjkQfhZpbszZnYur5DUljTcCHii5LC3J5E0yeO/1LIMyH+UvHQgyw=="], - "shiki": ["shiki@3.21.0", "", { "dependencies": { "@shikijs/core": "3.21.0", "@shikijs/engine-javascript": "3.21.0", "@shikijs/engine-oniguruma": "3.21.0", "@shikijs/langs": "3.21.0", "@shikijs/themes": "3.21.0", "@shikijs/types": "3.21.0", "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-N65B/3bqL/TI2crrXr+4UivctrAGEjmsib5rPMMPpFp1xAx/w03v8WZ9RDDFYteXoEgY7qZ4HGgl5KBIu1153w=="], + "shiki": ["shiki@3.22.0", "", { "dependencies": { "@shikijs/core": "3.22.0", "@shikijs/engine-javascript": "3.22.0", "@shikijs/engine-oniguruma": "3.22.0", "@shikijs/langs": "3.22.0", "@shikijs/themes": "3.22.0", "@shikijs/types": "3.22.0", "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-LBnhsoYEe0Eou4e1VgJACes+O6S6QC0w71fCSp5Oya79inkwkm15gQ1UF6VtQ8j/taMDh79hAB49WUk8ALQW3g=="], "shimmer": ["shimmer@1.2.1", "", {}, "sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw=="], @@ -3468,7 +3474,7 @@ "underscore": ["underscore@1.13.7", "", {}, "sha512-GMXzWtsc57XAtguZgaQViUOzs0KTkk8ojr3/xAxXLITqf/3EMwxC0inyETfDFjH/Krbhuep0HNbbjI9i/q3F3g=="], - "undici": ["undici@7.19.2", "", {}, "sha512-4VQSpGEGsWzk0VYxyB/wVX/Q7qf9t5znLRgs0dzszr9w9Fej/8RVNQ+S20vdXSAyra/bJ7ZQfGv6ZMj7UEbzSg=="], + "undici": ["undici@7.21.0", "", {}, "sha512-Hn2tCQpoDt1wv23a68Ctc8Cr/BHpUSfaPYrkajTXOS9IKpxVRx/X5m1K2YkbK2ipgZgxXSgsUinl3x+2YdSSfg=="], "undici-types": ["undici-types@6.21.0", "", {}, "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ=="], @@ -3636,53 +3642,51 @@ "@aws-crypto/sha256-browser/@smithy/util-utf8": ["@smithy/util-utf8@2.3.0", "", { "dependencies": { "@smithy/util-buffer-from": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A=="], - "@aws-crypto/util/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], - "@aws-crypto/util/@smithy/util-utf8": ["@smithy/util-utf8@2.3.0", "", { "dependencies": { "@smithy/util-buffer-from": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A=="], - "@aws-sdk/client-s3/@aws-sdk/core": ["@aws-sdk/core@3.973.4", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/xml-builder": "^3.972.2", "@smithy/core": "^3.22.0", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-8Rk+kPP74YiR47x54bxYlKZswsaSh0a4XvvRUMLvyS/koNawhsGu/+qSZxREqUeTO+GkKpFvSQIsAZR+deUP+g=="], + "@aws-sdk/client-s3/@aws-sdk/core": ["@aws-sdk/core@3.973.7", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/xml-builder": "^3.972.4", "@smithy/core": "^3.22.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-wNZZQQNlJ+hzD49cKdo+PY6rsTDElO8yDImnrI69p2PLBa7QomeUKAJWYp9xnaR38nlHqWhMHZuYLCQ3oSX+xg=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node": ["@aws-sdk/credential-provider-node@3.972.3", "", { "dependencies": { "@aws-sdk/credential-provider-env": "^3.972.2", "@aws-sdk/credential-provider-http": "^3.972.4", "@aws-sdk/credential-provider-ini": "^3.972.2", "@aws-sdk/credential-provider-process": "^3.972.2", "@aws-sdk/credential-provider-sso": "^3.972.2", "@aws-sdk/credential-provider-web-identity": "^3.972.2", "@aws-sdk/types": "^3.973.1", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-iu+JwWHM7tHowKqE+8wNmI3sM6mPEiI9Egscz2BEV7adyKmV95oR9tBO4VIOl72FGDi7X9mXg19VtqIpSkEEsA=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node": ["@aws-sdk/credential-provider-node@3.972.6", "", { "dependencies": { "@aws-sdk/credential-provider-env": "^3.972.5", "@aws-sdk/credential-provider-http": "^3.972.7", "@aws-sdk/credential-provider-ini": "^3.972.5", "@aws-sdk/credential-provider-process": "^3.972.5", "@aws-sdk/credential-provider-sso": "^3.972.5", "@aws-sdk/credential-provider-web-identity": "^3.972.5", "@aws-sdk/types": "^3.973.1", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DZ3CnAAtSVtVz+G+ogqecaErMLgzph4JH5nYbHoBMgBkwTUV+SUcjsjOJwdBJTHu3Dm6l5LBYekZoU2nDqQk2A=="], - "@aws-sdk/client-s3/@aws-sdk/middleware-host-header": ["@aws-sdk/middleware-host-header@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-42hZ8jEXT2uR6YybCzNq9OomqHPw43YIfRfz17biZjMQA4jKSQUaHIl6VvqO2Ddl5904pXg2Yd/ku78S0Ikgog=="], + "@aws-sdk/client-s3/@aws-sdk/middleware-host-header": ["@aws-sdk/middleware-host-header@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-aknPTb2M+G3s+0qLCx4Li/qGZH8IIYjugHMv15JTYMe6mgZO8VBpYgeGYsNMGCqCZOcWzuf900jFBG5bopfzmA=="], - "@aws-sdk/client-s3/@aws-sdk/middleware-logger": ["@aws-sdk/middleware-logger@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-iUzdXKOgi4JVDDEG/VvoNw50FryRCEm0qAudw12DcZoiNJWl0rN6SYVLcL1xwugMfQncCXieK5UBlG6mhH7iYA=="], + "@aws-sdk/client-s3/@aws-sdk/middleware-logger": ["@aws-sdk/middleware-logger@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-Ftg09xNNRqaz9QNzlfdQWfpqMCJbsQdnZVJP55jfhbKi1+FTWxGuvfPoBhDHIovqWKjqbuiew3HuhxbJ0+OjgA=="], - "@aws-sdk/client-s3/@aws-sdk/middleware-recursion-detection": ["@aws-sdk/middleware-recursion-detection@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws/lambda-invoke-store": "^0.2.2", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-/mzlyzJDtngNFd/rAYvqx29a2d0VuiYKN84Y/Mu9mGw7cfMOCyRK+896tb9wV6MoPRHUX7IXuKCIL8nzz2Pz5A=="], + "@aws-sdk/client-s3/@aws-sdk/middleware-recursion-detection": ["@aws-sdk/middleware-recursion-detection@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws/lambda-invoke-store": "^0.2.2", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-PY57QhzNuXHnwbJgbWYTrqIDHYSeOlhfYERTAuc16LKZpTZRJUjzBFokp9hF7u1fuGeE3D70ERXzdbMBOqQz7Q=="], - "@aws-sdk/client-s3/@aws-sdk/middleware-user-agent": ["@aws-sdk/middleware-user-agent@3.972.4", "", { "dependencies": { "@aws-sdk/core": "^3.973.4", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.972.0", "@smithy/core": "^3.22.0", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-6sU8jrSJvY/lqSnU6IYsa8SrCKwOZ4Enl6O4xVJo8RCq9Bdr5Giuw2eUaJAk9GPcpr4OFcmSFv3JOLhpKGeRZA=="], + "@aws-sdk/client-s3/@aws-sdk/middleware-user-agent": ["@aws-sdk/middleware-user-agent@3.972.7", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@smithy/core": "^3.22.1", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-HUD+geASjXSCyL/DHPQc/Ua7JhldTcIglVAoCV8kiVm99IaFSlAbTvEnyhZwdE6bdFyTL+uIaWLaCFSRsglZBQ=="], - "@aws-sdk/client-s3/@aws-sdk/region-config-resolver": ["@aws-sdk/region-config-resolver@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/config-resolver": "^4.4.6", "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-/7vRBsfmiOlg2X67EdKrzzQGw5/SbkXb7ALHQmlQLkZh8qNgvS2G2dDC6NtF3hzFlpP3j2k+KIEtql/6VrI6JA=="], + "@aws-sdk/client-s3/@aws-sdk/region-config-resolver": ["@aws-sdk/region-config-resolver@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/config-resolver": "^4.4.6", "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-v4J8qYAWfOMcZ4MJUyatntOicTzEMaU7j3OpkRCGGFSL2NgXQ5VbxauIyORA+pxdKZ0qQG2tCQjQjZDlXEC3Ow=="], "@aws-sdk/client-s3/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], - "@aws-sdk/client-s3/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.972.0", "", { "dependencies": { "@aws-sdk/types": "3.972.0", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-6JHsl1V/a1ZW8D8AFfd4R52fwZPnZ5H4U6DS8m/bWT8qad72NvbOFAC7U2cDtFs2TShqUO3TEiX/EJibtY3ijg=="], + "@aws-sdk/client-s3/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.986.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-Mqi79L38qi1gCG3adlVdbNrSxvcm1IPDLiJPA3OBypY5ewxUyWbaA3DD4goG+EwET6LSFgZJcRSIh6KBNpP5pA=="], - "@aws-sdk/client-s3/@aws-sdk/util-user-agent-browser": ["@aws-sdk/util-user-agent-browser@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "bowser": "^2.11.0", "tslib": "^2.6.2" } }, "sha512-gz76bUyebPZRxIsBHJUd/v+yiyFzm9adHbr8NykP2nm+z/rFyvQneOHajrUejtmnc5tTBeaDPL4X25TnagRk4A=="], + "@aws-sdk/client-s3/@aws-sdk/util-user-agent-browser": ["@aws-sdk/util-user-agent-browser@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "bowser": "^2.11.0", "tslib": "^2.6.2" } }, "sha512-JurOwkRUcXD/5MTDBcqdyQ9eVedtAsZgw5rBwktsPTN7QtPiS2Ld1jkJepNgYoCufz1Wcut9iup7GJDoIHp8Fw=="], - "@aws-sdk/client-s3/@aws-sdk/util-user-agent-node": ["@aws-sdk/util-user-agent-node@3.972.2", "", { "dependencies": { "@aws-sdk/middleware-user-agent": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" }, "peerDependencies": { "aws-crt": ">=1.0.0" }, "optionalPeers": ["aws-crt"] }, "sha512-vnxOc4C6AR7hVbwyFo1YuH0GB6dgJlWt8nIOOJpnzJAWJPkUMPJ9Zv2lnKsSU7TTZbhP2hEO8OZ4PYH59XFv8Q=="], + "@aws-sdk/client-s3/@aws-sdk/util-user-agent-node": ["@aws-sdk/util-user-agent-node@3.972.5", "", { "dependencies": { "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/types": "^3.973.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" }, "peerDependencies": { "aws-crt": ">=1.0.0" }, "optionalPeers": ["aws-crt"] }, "sha512-GsUDF+rXyxDZkkJxUsDxnA67FG+kc5W1dnloCFLl6fWzceevsCYzJpASBzT+BPjwUgREE6FngfJYYYMQUY5fZQ=="], - "@aws-sdk/client-sesv2/@aws-sdk/core": ["@aws-sdk/core@3.973.4", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/xml-builder": "^3.972.2", "@smithy/core": "^3.22.0", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-8Rk+kPP74YiR47x54bxYlKZswsaSh0a4XvvRUMLvyS/koNawhsGu/+qSZxREqUeTO+GkKpFvSQIsAZR+deUP+g=="], + "@aws-sdk/client-sesv2/@aws-sdk/core": ["@aws-sdk/core@3.973.7", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/xml-builder": "^3.972.4", "@smithy/core": "^3.22.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-wNZZQQNlJ+hzD49cKdo+PY6rsTDElO8yDImnrI69p2PLBa7QomeUKAJWYp9xnaR38nlHqWhMHZuYLCQ3oSX+xg=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node": ["@aws-sdk/credential-provider-node@3.972.3", "", { "dependencies": { "@aws-sdk/credential-provider-env": "^3.972.2", "@aws-sdk/credential-provider-http": "^3.972.4", "@aws-sdk/credential-provider-ini": "^3.972.2", "@aws-sdk/credential-provider-process": "^3.972.2", "@aws-sdk/credential-provider-sso": "^3.972.2", "@aws-sdk/credential-provider-web-identity": "^3.972.2", "@aws-sdk/types": "^3.973.1", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-iu+JwWHM7tHowKqE+8wNmI3sM6mPEiI9Egscz2BEV7adyKmV95oR9tBO4VIOl72FGDi7X9mXg19VtqIpSkEEsA=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node": ["@aws-sdk/credential-provider-node@3.972.6", "", { "dependencies": { "@aws-sdk/credential-provider-env": "^3.972.5", "@aws-sdk/credential-provider-http": "^3.972.7", "@aws-sdk/credential-provider-ini": "^3.972.5", "@aws-sdk/credential-provider-process": "^3.972.5", "@aws-sdk/credential-provider-sso": "^3.972.5", "@aws-sdk/credential-provider-web-identity": "^3.972.5", "@aws-sdk/types": "^3.973.1", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DZ3CnAAtSVtVz+G+ogqecaErMLgzph4JH5nYbHoBMgBkwTUV+SUcjsjOJwdBJTHu3Dm6l5LBYekZoU2nDqQk2A=="], - "@aws-sdk/client-sesv2/@aws-sdk/middleware-host-header": ["@aws-sdk/middleware-host-header@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-42hZ8jEXT2uR6YybCzNq9OomqHPw43YIfRfz17biZjMQA4jKSQUaHIl6VvqO2Ddl5904pXg2Yd/ku78S0Ikgog=="], + "@aws-sdk/client-sesv2/@aws-sdk/middleware-host-header": ["@aws-sdk/middleware-host-header@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-aknPTb2M+G3s+0qLCx4Li/qGZH8IIYjugHMv15JTYMe6mgZO8VBpYgeGYsNMGCqCZOcWzuf900jFBG5bopfzmA=="], - "@aws-sdk/client-sesv2/@aws-sdk/middleware-logger": ["@aws-sdk/middleware-logger@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-iUzdXKOgi4JVDDEG/VvoNw50FryRCEm0qAudw12DcZoiNJWl0rN6SYVLcL1xwugMfQncCXieK5UBlG6mhH7iYA=="], + "@aws-sdk/client-sesv2/@aws-sdk/middleware-logger": ["@aws-sdk/middleware-logger@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-Ftg09xNNRqaz9QNzlfdQWfpqMCJbsQdnZVJP55jfhbKi1+FTWxGuvfPoBhDHIovqWKjqbuiew3HuhxbJ0+OjgA=="], - "@aws-sdk/client-sesv2/@aws-sdk/middleware-recursion-detection": ["@aws-sdk/middleware-recursion-detection@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws/lambda-invoke-store": "^0.2.2", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-/mzlyzJDtngNFd/rAYvqx29a2d0VuiYKN84Y/Mu9mGw7cfMOCyRK+896tb9wV6MoPRHUX7IXuKCIL8nzz2Pz5A=="], + "@aws-sdk/client-sesv2/@aws-sdk/middleware-recursion-detection": ["@aws-sdk/middleware-recursion-detection@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws/lambda-invoke-store": "^0.2.2", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-PY57QhzNuXHnwbJgbWYTrqIDHYSeOlhfYERTAuc16LKZpTZRJUjzBFokp9hF7u1fuGeE3D70ERXzdbMBOqQz7Q=="], - "@aws-sdk/client-sesv2/@aws-sdk/middleware-user-agent": ["@aws-sdk/middleware-user-agent@3.972.4", "", { "dependencies": { "@aws-sdk/core": "^3.973.4", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.972.0", "@smithy/core": "^3.22.0", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-6sU8jrSJvY/lqSnU6IYsa8SrCKwOZ4Enl6O4xVJo8RCq9Bdr5Giuw2eUaJAk9GPcpr4OFcmSFv3JOLhpKGeRZA=="], + "@aws-sdk/client-sesv2/@aws-sdk/middleware-user-agent": ["@aws-sdk/middleware-user-agent@3.972.7", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@smithy/core": "^3.22.1", "@smithy/protocol-http": "^5.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-HUD+geASjXSCyL/DHPQc/Ua7JhldTcIglVAoCV8kiVm99IaFSlAbTvEnyhZwdE6bdFyTL+uIaWLaCFSRsglZBQ=="], - "@aws-sdk/client-sesv2/@aws-sdk/region-config-resolver": ["@aws-sdk/region-config-resolver@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/config-resolver": "^4.4.6", "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-/7vRBsfmiOlg2X67EdKrzzQGw5/SbkXb7ALHQmlQLkZh8qNgvS2G2dDC6NtF3hzFlpP3j2k+KIEtql/6VrI6JA=="], + "@aws-sdk/client-sesv2/@aws-sdk/region-config-resolver": ["@aws-sdk/region-config-resolver@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/config-resolver": "^4.4.6", "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-v4J8qYAWfOMcZ4MJUyatntOicTzEMaU7j3OpkRCGGFSL2NgXQ5VbxauIyORA+pxdKZ0qQG2tCQjQjZDlXEC3Ow=="], "@aws-sdk/client-sesv2/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], - "@aws-sdk/client-sesv2/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.972.0", "", { "dependencies": { "@aws-sdk/types": "3.972.0", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-6JHsl1V/a1ZW8D8AFfd4R52fwZPnZ5H4U6DS8m/bWT8qad72NvbOFAC7U2cDtFs2TShqUO3TEiX/EJibtY3ijg=="], + "@aws-sdk/client-sesv2/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.986.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-Mqi79L38qi1gCG3adlVdbNrSxvcm1IPDLiJPA3OBypY5ewxUyWbaA3DD4goG+EwET6LSFgZJcRSIh6KBNpP5pA=="], - "@aws-sdk/client-sesv2/@aws-sdk/util-user-agent-browser": ["@aws-sdk/util-user-agent-browser@3.972.2", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "bowser": "^2.11.0", "tslib": "^2.6.2" } }, "sha512-gz76bUyebPZRxIsBHJUd/v+yiyFzm9adHbr8NykP2nm+z/rFyvQneOHajrUejtmnc5tTBeaDPL4X25TnagRk4A=="], + "@aws-sdk/client-sesv2/@aws-sdk/util-user-agent-browser": ["@aws-sdk/util-user-agent-browser@3.972.3", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "bowser": "^2.11.0", "tslib": "^2.6.2" } }, "sha512-JurOwkRUcXD/5MTDBcqdyQ9eVedtAsZgw5rBwktsPTN7QtPiS2Ld1jkJepNgYoCufz1Wcut9iup7GJDoIHp8Fw=="], - "@aws-sdk/client-sesv2/@aws-sdk/util-user-agent-node": ["@aws-sdk/util-user-agent-node@3.972.2", "", { "dependencies": { "@aws-sdk/middleware-user-agent": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" }, "peerDependencies": { "aws-crt": ">=1.0.0" }, "optionalPeers": ["aws-crt"] }, "sha512-vnxOc4C6AR7hVbwyFo1YuH0GB6dgJlWt8nIOOJpnzJAWJPkUMPJ9Zv2lnKsSU7TTZbhP2hEO8OZ4PYH59XFv8Q=="], + "@aws-sdk/client-sesv2/@aws-sdk/util-user-agent-node": ["@aws-sdk/util-user-agent-node@3.972.5", "", { "dependencies": { "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/types": "^3.973.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" }, "peerDependencies": { "aws-crt": ">=1.0.0" }, "optionalPeers": ["aws-crt"] }, "sha512-GsUDF+rXyxDZkkJxUsDxnA67FG+kc5W1dnloCFLl6fWzceevsCYzJpASBzT+BPjwUgREE6FngfJYYYMQUY5fZQ=="], "@aws-sdk/client-sqs/@aws-sdk/core": ["@aws-sdk/core@3.947.0", "", { "dependencies": { "@aws-sdk/types": "3.936.0", "@aws-sdk/xml-builder": "3.930.0", "@smithy/core": "^3.18.7", "@smithy/node-config-provider": "^4.3.5", "@smithy/property-provider": "^4.2.5", "@smithy/protocol-http": "^5.3.5", "@smithy/signature-v4": "^5.3.5", "@smithy/smithy-client": "^4.9.10", "@smithy/types": "^4.9.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.5", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-Khq4zHhuAkvCFuFbgcy3GrZTzfSX7ZIjIcW1zRDxXRLZKRtuhnZdonqTUfaWi5K42/4OmxkYNpsO7X7trQOeHw=="], @@ -3696,13 +3700,13 @@ "@aws-sdk/middleware-expect-continue/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], - "@aws-sdk/middleware-flexible-checksums/@aws-sdk/core": ["@aws-sdk/core@3.973.4", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/xml-builder": "^3.972.2", "@smithy/core": "^3.22.0", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-8Rk+kPP74YiR47x54bxYlKZswsaSh0a4XvvRUMLvyS/koNawhsGu/+qSZxREqUeTO+GkKpFvSQIsAZR+deUP+g=="], + "@aws-sdk/middleware-flexible-checksums/@aws-sdk/core": ["@aws-sdk/core@3.973.7", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/xml-builder": "^3.972.4", "@smithy/core": "^3.22.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-wNZZQQNlJ+hzD49cKdo+PY6rsTDElO8yDImnrI69p2PLBa7QomeUKAJWYp9xnaR38nlHqWhMHZuYLCQ3oSX+xg=="], "@aws-sdk/middleware-flexible-checksums/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], "@aws-sdk/middleware-location-constraint/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], - "@aws-sdk/middleware-sdk-s3/@aws-sdk/core": ["@aws-sdk/core@3.973.4", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/xml-builder": "^3.972.2", "@smithy/core": "^3.22.0", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-8Rk+kPP74YiR47x54bxYlKZswsaSh0a4XvvRUMLvyS/koNawhsGu/+qSZxREqUeTO+GkKpFvSQIsAZR+deUP+g=="], + "@aws-sdk/middleware-sdk-s3/@aws-sdk/core": ["@aws-sdk/core@3.973.7", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@aws-sdk/xml-builder": "^3.972.4", "@smithy/core": "^3.22.1", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-wNZZQQNlJ+hzD49cKdo+PY6rsTDElO8yDImnrI69p2PLBa7QomeUKAJWYp9xnaR38nlHqWhMHZuYLCQ3oSX+xg=="], "@aws-sdk/middleware-sdk-s3/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], @@ -3712,9 +3716,7 @@ "@aws-sdk/s3-request-presigner/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], - "@aws-sdk/signature-v4-multi-region/@aws-sdk/middleware-sdk-s3": ["@aws-sdk/middleware-sdk-s3@3.972.0", "", { "dependencies": { "@aws-sdk/core": "3.972.0", "@aws-sdk/types": "3.972.0", "@aws-sdk/util-arn-parser": "3.972.0", "@smithy/core": "^3.20.6", "@smithy/node-config-provider": "^4.3.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.10.8", "@smithy/types": "^4.12.0", "@smithy/util-config-provider": "^4.2.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-stream": "^4.5.10", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-0bcKFXWx+NZ7tIlOo7KjQ+O2rydiHdIQahrq+fN6k9Osky29v17guy68urUKfhTobR6iY6KvxkroFWaFtTgS5w=="], - - "@aws-sdk/signature-v4-multi-region/@aws-sdk/types": ["@aws-sdk/types@3.972.0", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-U7xBIbLSetONxb2bNzHyDgND3oKGoIfmknrEVnoEU4GUSs+0augUOIn9DIWGUO2ETcRFdsRUnmx9KhPT9Ojbug=="], + "@aws-sdk/signature-v4-multi-region/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], "@aws-sdk/util-format-url/@aws-sdk/types": ["@aws-sdk/types@3.973.1", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg=="], @@ -3752,12 +3754,6 @@ "@inquirer/external-editor/iconv-lite": ["iconv-lite@0.7.1", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-2Tth85cXwGFHfvRgZWszZSvdo+0Xsqmw8k8ZwxScfcBneNUraK+dxRxRm24nszx80Y0TVio8kKLt5sLE7ZCLlw=="], - "@isaacs/cliui/string-width": ["string-width@5.1.2", "", { "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", "strip-ansi": "^7.0.1" } }, "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA=="], - - "@isaacs/cliui/strip-ansi": ["strip-ansi@7.1.2", "", { "dependencies": { "ansi-regex": "^6.0.1" } }, "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA=="], - - "@isaacs/cliui/wrap-ansi": ["wrap-ansi@8.1.0", "", { "dependencies": { "ansi-styles": "^6.1.0", "string-width": "^5.0.1", "strip-ansi": "^7.0.1" } }, "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ=="], - "@langchain/core/ansi-styles": ["ansi-styles@5.2.0", "", {}, "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA=="], "@langchain/core/uuid": ["uuid@10.0.0", "", { "bin": { "uuid": "dist/bin/uuid" } }, "sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ=="], @@ -3968,7 +3964,7 @@ "c12/chokidar": ["chokidar@4.0.3", "", { "dependencies": { "readdirp": "^4.0.1" } }, "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA=="], - "c12/confbox": ["confbox@0.2.2", "", {}, "sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ=="], + "c12/confbox": ["confbox@0.2.4", "", {}, "sha512-ysOGlgTFbN2/Y6Cg3Iye8YKulHw+R2fNXHrgSmXISQdMnomY6eNDprVdW9R5xBguEqI954+S6709UyiO7B+6OQ=="], "c12/pkg-types": ["pkg-types@2.3.0", "", { "dependencies": { "confbox": "^0.2.2", "exsolve": "^1.0.7", "pathe": "^2.0.3" } }, "sha512-SIqCzDRg0s9npO5XQ3tNZioRY1uK06lA41ynBC1YmFTmnY6FjUjVt6s4LoADmwoig1qqD0oK8h1p/8mlMx8Oig=="], @@ -4022,7 +4018,7 @@ "form-data/mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="], - "fumadocs-mdx/esbuild": ["esbuild@0.27.2", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.2", "@esbuild/android-arm": "0.27.2", "@esbuild/android-arm64": "0.27.2", "@esbuild/android-x64": "0.27.2", "@esbuild/darwin-arm64": "0.27.2", "@esbuild/darwin-x64": "0.27.2", "@esbuild/freebsd-arm64": "0.27.2", "@esbuild/freebsd-x64": "0.27.2", "@esbuild/linux-arm": "0.27.2", "@esbuild/linux-arm64": "0.27.2", "@esbuild/linux-ia32": "0.27.2", "@esbuild/linux-loong64": "0.27.2", "@esbuild/linux-mips64el": "0.27.2", "@esbuild/linux-ppc64": "0.27.2", "@esbuild/linux-riscv64": "0.27.2", "@esbuild/linux-s390x": "0.27.2", "@esbuild/linux-x64": "0.27.2", "@esbuild/netbsd-arm64": "0.27.2", "@esbuild/netbsd-x64": "0.27.2", "@esbuild/openbsd-arm64": "0.27.2", "@esbuild/openbsd-x64": "0.27.2", "@esbuild/openharmony-arm64": "0.27.2", "@esbuild/sunos-x64": "0.27.2", "@esbuild/win32-arm64": "0.27.2", "@esbuild/win32-ia32": "0.27.2", "@esbuild/win32-x64": "0.27.2" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw=="], + "fumadocs-mdx/esbuild": ["esbuild@0.27.3", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.3", "@esbuild/android-arm": "0.27.3", "@esbuild/android-arm64": "0.27.3", "@esbuild/android-x64": "0.27.3", "@esbuild/darwin-arm64": "0.27.3", "@esbuild/darwin-x64": "0.27.3", "@esbuild/freebsd-arm64": "0.27.3", "@esbuild/freebsd-x64": "0.27.3", "@esbuild/linux-arm": "0.27.3", "@esbuild/linux-arm64": "0.27.3", "@esbuild/linux-ia32": "0.27.3", "@esbuild/linux-loong64": "0.27.3", "@esbuild/linux-mips64el": "0.27.3", "@esbuild/linux-ppc64": "0.27.3", "@esbuild/linux-riscv64": "0.27.3", "@esbuild/linux-s390x": "0.27.3", "@esbuild/linux-x64": "0.27.3", "@esbuild/netbsd-arm64": "0.27.3", "@esbuild/netbsd-x64": "0.27.3", "@esbuild/openbsd-arm64": "0.27.3", "@esbuild/openbsd-x64": "0.27.3", "@esbuild/openharmony-arm64": "0.27.3", "@esbuild/sunos-x64": "0.27.3", "@esbuild/win32-arm64": "0.27.3", "@esbuild/win32-ia32": "0.27.3", "@esbuild/win32-x64": "0.27.3" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg=="], "fumadocs-mdx/js-yaml": ["js-yaml@4.1.1", "", { "dependencies": { "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" } }, "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA=="], @@ -4186,15 +4182,15 @@ "sim/nanoid": ["nanoid@3.3.11", "", { "bin": { "nanoid": "bin/nanoid.cjs" } }, "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w=="], - "sim/tailwind-merge": ["tailwind-merge@2.6.0", "", {}, "sha512-P+Vu1qXfzediirmHOC3xKGAYeZtPcV9g76X+xg2FD4tYgR71ewMA35Y3sCz3zhiN/dwefRpJX0yBcgwi1fXNQA=="], + "sim/tailwind-merge": ["tailwind-merge@2.6.1", "", {}, "sha512-Oo6tHdpZsGpkKG88HJ8RR1rg/RdnEkQEfMoEk2x1XRI3F1AxeU+ijRXpiVUF4UbLfcxxRGw6TbUINKYdWVsQTQ=="], "sim/tailwindcss": ["tailwindcss@3.4.19", "", { "dependencies": { "@alloc/quick-lru": "^5.2.0", "arg": "^5.0.2", "chokidar": "^3.6.0", "didyoumean": "^1.2.2", "dlv": "^1.1.3", "fast-glob": "^3.3.2", "glob-parent": "^6.0.2", "is-glob": "^4.0.3", "jiti": "^1.21.7", "lilconfig": "^3.1.3", "micromatch": "^4.0.8", "normalize-path": "^3.0.0", "object-hash": "^3.0.0", "picocolors": "^1.1.1", "postcss": "^8.4.47", "postcss-import": "^15.1.0", "postcss-js": "^4.0.1", "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0", "postcss-nested": "^6.2.0", "postcss-selector-parser": "^6.1.2", "resolve": "^1.22.8", "sucrase": "^3.35.0" }, "bin": { "tailwind": "lib/cli.js", "tailwindcss": "lib/cli.js" } }, "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ=="], - "simstudio/@types/node": ["@types/node@20.19.30", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-WJtwWJu7UdlvzEAUm484QNg5eAoq5QR08KDNx7g45Usrs2NtOPiX8ugDqmKdXkyL03rBqU5dYNYVQetEpBHq2g=="], + "simstudio/@types/node": ["@types/node@20.19.33", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-Rs1bVAIdBs5gbTIKza/tgpMuG1k3U/UMJLWecIMxNdJFDMzcM5LOiLVRYh3PilWEYDIeUDv7bpiHPLPsbydGcw=="], "simstudio/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - "simstudio-ts-sdk/@types/node": ["@types/node@20.19.30", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-WJtwWJu7UdlvzEAUm484QNg5eAoq5QR08KDNx7g45Usrs2NtOPiX8ugDqmKdXkyL03rBqU5dYNYVQetEpBHq2g=="], + "simstudio-ts-sdk/@types/node": ["@types/node@20.19.33", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-Rs1bVAIdBs5gbTIKza/tgpMuG1k3U/UMJLWecIMxNdJFDMzcM5LOiLVRYh3PilWEYDIeUDv7bpiHPLPsbydGcw=="], "slice-ansi/ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], @@ -4212,6 +4208,8 @@ "string_decoder/safe-buffer": ["safe-buffer@5.1.2", "", {}, "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="], + "strip-literal/js-tokens": ["js-tokens@9.0.1", "", {}, "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ=="], + "sucrase/commander": ["commander@4.1.1", "", {}, "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA=="], "tar-fs/chownr": ["chownr@1.1.4", "", {}, "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg=="], @@ -4240,7 +4238,7 @@ "unist-util-remove/unist-util-visit-parents": ["unist-util-visit-parents@5.1.3", "", { "dependencies": { "@types/unist": "^2.0.0", "unist-util-is": "^5.0.0" } }, "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg=="], - "vite/esbuild": ["esbuild@0.27.2", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.2", "@esbuild/android-arm": "0.27.2", "@esbuild/android-arm64": "0.27.2", "@esbuild/android-x64": "0.27.2", "@esbuild/darwin-arm64": "0.27.2", "@esbuild/darwin-x64": "0.27.2", "@esbuild/freebsd-arm64": "0.27.2", "@esbuild/freebsd-x64": "0.27.2", "@esbuild/linux-arm": "0.27.2", "@esbuild/linux-arm64": "0.27.2", "@esbuild/linux-ia32": "0.27.2", "@esbuild/linux-loong64": "0.27.2", "@esbuild/linux-mips64el": "0.27.2", "@esbuild/linux-ppc64": "0.27.2", "@esbuild/linux-riscv64": "0.27.2", "@esbuild/linux-s390x": "0.27.2", "@esbuild/linux-x64": "0.27.2", "@esbuild/netbsd-arm64": "0.27.2", "@esbuild/netbsd-x64": "0.27.2", "@esbuild/openbsd-arm64": "0.27.2", "@esbuild/openbsd-x64": "0.27.2", "@esbuild/openharmony-arm64": "0.27.2", "@esbuild/sunos-x64": "0.27.2", "@esbuild/win32-arm64": "0.27.2", "@esbuild/win32-ia32": "0.27.2", "@esbuild/win32-x64": "0.27.2" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw=="], + "vite/esbuild": ["esbuild@0.27.3", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.3", "@esbuild/android-arm": "0.27.3", "@esbuild/android-arm64": "0.27.3", "@esbuild/android-x64": "0.27.3", "@esbuild/darwin-arm64": "0.27.3", "@esbuild/darwin-x64": "0.27.3", "@esbuild/freebsd-arm64": "0.27.3", "@esbuild/freebsd-x64": "0.27.3", "@esbuild/linux-arm": "0.27.3", "@esbuild/linux-arm64": "0.27.3", "@esbuild/linux-ia32": "0.27.3", "@esbuild/linux-loong64": "0.27.3", "@esbuild/linux-mips64el": "0.27.3", "@esbuild/linux-ppc64": "0.27.3", "@esbuild/linux-riscv64": "0.27.3", "@esbuild/linux-s390x": "0.27.3", "@esbuild/linux-x64": "0.27.3", "@esbuild/netbsd-arm64": "0.27.3", "@esbuild/netbsd-x64": "0.27.3", "@esbuild/openbsd-arm64": "0.27.3", "@esbuild/openbsd-x64": "0.27.3", "@esbuild/openharmony-arm64": "0.27.3", "@esbuild/sunos-x64": "0.27.3", "@esbuild/win32-arm64": "0.27.3", "@esbuild/win32-ia32": "0.27.3", "@esbuild/win32-x64": "0.27.3" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg=="], "xml-crypto/xpath": ["xpath@0.0.33", "", {}, "sha512-NNXnzrkDrAzalLhIUc01jO2mOzXGXh1JwPgkihcLLzw98c0WgYDmmjSh1Kl3wzaxSVWMuA+fe0WTWOBDWCBmNA=="], @@ -4252,37 +4250,37 @@ "@aws-crypto/util/@smithy/util-utf8/@smithy/util-buffer-from": ["@smithy/util-buffer-from@2.2.0", "", { "dependencies": { "@smithy/is-array-buffer": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA=="], - "@aws-sdk/client-s3/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.2", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.2.5", "tslib": "^2.6.2" } }, "sha512-jGOOV/bV1DhkkUhHiZ3/1GZ67cZyOXaDb7d1rYD6ZiXf5V9tBNOcgqXwRRPvrCbYaFRa1pPMFb3ZjqjWpR3YfA=="], + "@aws-sdk/client-s3/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.4", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.3.4", "tslib": "^2.6.2" } }, "sha512-0zJ05ANfYqI6+rGqj8samZBFod0dPPousBjLEqg8WdxSgbMAkRgLyn81lP215Do0rFJ/17LIXwr7q0yK24mP6Q=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-env": ["@aws-sdk/credential-provider-env@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-wzH1EdrZsytG1xN9UHaK12J9+kfrnd2+c8y0LVoS4O4laEjPoie1qVK3k8/rZe7KOtvULzyMnO3FT4Krr9Z0Dg=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-env": ["@aws-sdk/credential-provider-env@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-LxJ9PEO4gKPXzkufvIESUysykPIdrV7+Ocb9yAhbhJLE4TiAYqbCVUE+VuKP1leGR1bBfjWjYgSV5MxprlX3mQ=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-http": ["@aws-sdk/credential-provider-http@3.972.4", "", { "dependencies": { "@aws-sdk/core": "^3.973.4", "@aws-sdk/types": "^3.973.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/node-http-handler": "^4.4.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/util-stream": "^4.5.10", "tslib": "^2.6.2" } }, "sha512-OC7F3ipXV12QfDEWybQGHLzoeHBlAdx/nLzPfHP0Wsabu3JBffu5nlzSaJNf7to9HGtOW8Bpu8NX0ugmDrCbtw=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-http": ["@aws-sdk/credential-provider-http@3.972.7", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/node-http-handler": "^4.4.9", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/util-stream": "^4.5.11", "tslib": "^2.6.2" } }, "sha512-L2uOGtvp2x3bTcxFTpSM+GkwFIPd8pHfGWO1764icMbo7e5xJh0nfhx1UwkXLnwvocTNEf8A7jISZLYjUSNaTg=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini": ["@aws-sdk/credential-provider-ini@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/credential-provider-env": "^3.972.2", "@aws-sdk/credential-provider-http": "^3.972.3", "@aws-sdk/credential-provider-login": "^3.972.2", "@aws-sdk/credential-provider-process": "^3.972.2", "@aws-sdk/credential-provider-sso": "^3.972.2", "@aws-sdk/credential-provider-web-identity": "^3.972.2", "@aws-sdk/nested-clients": "3.975.0", "@aws-sdk/types": "^3.973.1", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-Jrb8sLm6k8+L7520irBrvCtdLxNtrG7arIxe9TCeMJt/HxqMGJdbIjw8wILzkEHLMIi4MecF2FbXCln7OT1Tag=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini": ["@aws-sdk/credential-provider-ini@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/credential-provider-env": "^3.972.5", "@aws-sdk/credential-provider-http": "^3.972.7", "@aws-sdk/credential-provider-login": "^3.972.5", "@aws-sdk/credential-provider-process": "^3.972.5", "@aws-sdk/credential-provider-sso": "^3.972.5", "@aws-sdk/credential-provider-web-identity": "^3.972.5", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-SdDTYE6jkARzOeL7+kudMIM4DaFnP5dZVeatzw849k4bSXDdErDS188bgeNzc/RA2WGrlEpsqHUKP6G7sVXhZg=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-process": ["@aws-sdk/credential-provider-process@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-NLKLTT7jnUe9GpQAVkPTJO+cs2FjlQDt5fArIYS7h/Iw/CvamzgGYGFRVD2SE05nOHCMwafUSi42If8esGFV+g=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-process": ["@aws-sdk/credential-provider-process@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-HDKF3mVbLnuqGg6dMnzBf1VUOywE12/N286msI9YaK9mEIzdsGCtLTvrDhe3Up0R9/hGFbB+9l21/TwF5L1C6g=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso": ["@aws-sdk/credential-provider-sso@3.972.2", "", { "dependencies": { "@aws-sdk/client-sso": "3.975.0", "@aws-sdk/core": "^3.973.2", "@aws-sdk/token-providers": "3.975.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-YpwDn8g3gCGUl61cCV0sRxP2pFIwg+ZsMfWQ/GalSyjXtRkctCMFA+u0yPb/Q4uTfNEiya1Y4nm0C5rIHyPW5Q=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso": ["@aws-sdk/credential-provider-sso@3.972.5", "", { "dependencies": { "@aws-sdk/client-sso": "3.985.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/token-providers": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-8urj3AoeNeQisjMmMBhFeiY2gxt6/7wQQbEGun0YV/OaOOiXrIudTIEYF8ZfD+NQI6X1FY5AkRsx6O/CaGiybA=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity": ["@aws-sdk/credential-provider-web-identity@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/nested-clients": "3.975.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-x9DAiN9Qz+NjJ99ltDiVQ8d511M/tuF/9MFbe2jUgo7HZhD6+x4S3iT1YcP07ndwDUjmzKGmeOEgE24k4qvfdg=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity": ["@aws-sdk/credential-provider-web-identity@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-OK3cULuJl6c+RcDZfPpaK5o3deTOnKZbxm7pzhFNGA3fI2hF9yDih17fGRazJzGGWaDVlR9ejZrpDef4DJCEsw=="], - "@aws-sdk/client-s3/@aws-sdk/util-endpoints/@aws-sdk/types": ["@aws-sdk/types@3.972.0", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-U7xBIbLSetONxb2bNzHyDgND3oKGoIfmknrEVnoEU4GUSs+0augUOIn9DIWGUO2ETcRFdsRUnmx9KhPT9Ojbug=="], + "@aws-sdk/client-s3/@aws-sdk/middleware-user-agent/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], - "@aws-sdk/client-sesv2/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.2", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.2.5", "tslib": "^2.6.2" } }, "sha512-jGOOV/bV1DhkkUhHiZ3/1GZ67cZyOXaDb7d1rYD6ZiXf5V9tBNOcgqXwRRPvrCbYaFRa1pPMFb3ZjqjWpR3YfA=="], + "@aws-sdk/client-sesv2/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.4", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.3.4", "tslib": "^2.6.2" } }, "sha512-0zJ05ANfYqI6+rGqj8samZBFod0dPPousBjLEqg8WdxSgbMAkRgLyn81lP215Do0rFJ/17LIXwr7q0yK24mP6Q=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-env": ["@aws-sdk/credential-provider-env@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-wzH1EdrZsytG1xN9UHaK12J9+kfrnd2+c8y0LVoS4O4laEjPoie1qVK3k8/rZe7KOtvULzyMnO3FT4Krr9Z0Dg=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-env": ["@aws-sdk/credential-provider-env@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-LxJ9PEO4gKPXzkufvIESUysykPIdrV7+Ocb9yAhbhJLE4TiAYqbCVUE+VuKP1leGR1bBfjWjYgSV5MxprlX3mQ=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-http": ["@aws-sdk/credential-provider-http@3.972.4", "", { "dependencies": { "@aws-sdk/core": "^3.973.4", "@aws-sdk/types": "^3.973.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/node-http-handler": "^4.4.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.1", "@smithy/types": "^4.12.0", "@smithy/util-stream": "^4.5.10", "tslib": "^2.6.2" } }, "sha512-OC7F3ipXV12QfDEWybQGHLzoeHBlAdx/nLzPfHP0Wsabu3JBffu5nlzSaJNf7to9HGtOW8Bpu8NX0ugmDrCbtw=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-http": ["@aws-sdk/credential-provider-http@3.972.7", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/node-http-handler": "^4.4.9", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/util-stream": "^4.5.11", "tslib": "^2.6.2" } }, "sha512-L2uOGtvp2x3bTcxFTpSM+GkwFIPd8pHfGWO1764icMbo7e5xJh0nfhx1UwkXLnwvocTNEf8A7jISZLYjUSNaTg=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini": ["@aws-sdk/credential-provider-ini@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/credential-provider-env": "^3.972.2", "@aws-sdk/credential-provider-http": "^3.972.3", "@aws-sdk/credential-provider-login": "^3.972.2", "@aws-sdk/credential-provider-process": "^3.972.2", "@aws-sdk/credential-provider-sso": "^3.972.2", "@aws-sdk/credential-provider-web-identity": "^3.972.2", "@aws-sdk/nested-clients": "3.975.0", "@aws-sdk/types": "^3.973.1", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-Jrb8sLm6k8+L7520irBrvCtdLxNtrG7arIxe9TCeMJt/HxqMGJdbIjw8wILzkEHLMIi4MecF2FbXCln7OT1Tag=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini": ["@aws-sdk/credential-provider-ini@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/credential-provider-env": "^3.972.5", "@aws-sdk/credential-provider-http": "^3.972.7", "@aws-sdk/credential-provider-login": "^3.972.5", "@aws-sdk/credential-provider-process": "^3.972.5", "@aws-sdk/credential-provider-sso": "^3.972.5", "@aws-sdk/credential-provider-web-identity": "^3.972.5", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/credential-provider-imds": "^4.2.8", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-SdDTYE6jkARzOeL7+kudMIM4DaFnP5dZVeatzw849k4bSXDdErDS188bgeNzc/RA2WGrlEpsqHUKP6G7sVXhZg=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-process": ["@aws-sdk/credential-provider-process@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-NLKLTT7jnUe9GpQAVkPTJO+cs2FjlQDt5fArIYS7h/Iw/CvamzgGYGFRVD2SE05nOHCMwafUSi42If8esGFV+g=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-process": ["@aws-sdk/credential-provider-process@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-HDKF3mVbLnuqGg6dMnzBf1VUOywE12/N286msI9YaK9mEIzdsGCtLTvrDhe3Up0R9/hGFbB+9l21/TwF5L1C6g=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso": ["@aws-sdk/credential-provider-sso@3.972.2", "", { "dependencies": { "@aws-sdk/client-sso": "3.975.0", "@aws-sdk/core": "^3.973.2", "@aws-sdk/token-providers": "3.975.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-YpwDn8g3gCGUl61cCV0sRxP2pFIwg+ZsMfWQ/GalSyjXtRkctCMFA+u0yPb/Q4uTfNEiya1Y4nm0C5rIHyPW5Q=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso": ["@aws-sdk/credential-provider-sso@3.972.5", "", { "dependencies": { "@aws-sdk/client-sso": "3.985.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/token-providers": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-8urj3AoeNeQisjMmMBhFeiY2gxt6/7wQQbEGun0YV/OaOOiXrIudTIEYF8ZfD+NQI6X1FY5AkRsx6O/CaGiybA=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity": ["@aws-sdk/credential-provider-web-identity@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/nested-clients": "3.975.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-x9DAiN9Qz+NjJ99ltDiVQ8d511M/tuF/9MFbe2jUgo7HZhD6+x4S3iT1YcP07ndwDUjmzKGmeOEgE24k4qvfdg=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity": ["@aws-sdk/credential-provider-web-identity@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-OK3cULuJl6c+RcDZfPpaK5o3deTOnKZbxm7pzhFNGA3fI2hF9yDih17fGRazJzGGWaDVlR9ejZrpDef4DJCEsw=="], - "@aws-sdk/client-sesv2/@aws-sdk/util-endpoints/@aws-sdk/types": ["@aws-sdk/types@3.972.0", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-U7xBIbLSetONxb2bNzHyDgND3oKGoIfmknrEVnoEU4GUSs+0augUOIn9DIWGUO2ETcRFdsRUnmx9KhPT9Ojbug=="], + "@aws-sdk/client-sesv2/@aws-sdk/middleware-user-agent/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], "@aws-sdk/client-sqs/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-env": ["@aws-sdk/credential-provider-env@3.947.0", "", { "dependencies": { "@aws-sdk/core": "3.947.0", "@aws-sdk/types": "3.936.0", "@smithy/property-provider": "^4.2.5", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-VR2V6dRELmzwAsCpK4GqxUi6UW5WNhAXS9F9AzWi5jvijwJo3nH92YNJUP4quMpgFZxJHEWyXLWgPjh9u0zYOA=="], @@ -4296,13 +4294,9 @@ "@aws-sdk/client-sqs/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity": ["@aws-sdk/credential-provider-web-identity@3.947.0", "", { "dependencies": { "@aws-sdk/core": "3.947.0", "@aws-sdk/nested-clients": "3.947.0", "@aws-sdk/types": "3.936.0", "@smithy/property-provider": "^4.2.5", "@smithy/shared-ini-file-loader": "^4.4.0", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-gokm/e/YHiHLrZgLq4j8tNAn8RJDPbIcglFRKgy08q8DmAqHQ8MXAKW3eS0QjAuRXU9mcMmUo1NrX6FRNBCCPw=="], - "@aws-sdk/middleware-flexible-checksums/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.2", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.2.5", "tslib": "^2.6.2" } }, "sha512-jGOOV/bV1DhkkUhHiZ3/1GZ67cZyOXaDb7d1rYD6ZiXf5V9tBNOcgqXwRRPvrCbYaFRa1pPMFb3ZjqjWpR3YfA=="], + "@aws-sdk/middleware-flexible-checksums/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.4", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.3.4", "tslib": "^2.6.2" } }, "sha512-0zJ05ANfYqI6+rGqj8samZBFod0dPPousBjLEqg8WdxSgbMAkRgLyn81lP215Do0rFJ/17LIXwr7q0yK24mP6Q=="], - "@aws-sdk/middleware-sdk-s3/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.2", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.2.5", "tslib": "^2.6.2" } }, "sha512-jGOOV/bV1DhkkUhHiZ3/1GZ67cZyOXaDb7d1rYD6ZiXf5V9tBNOcgqXwRRPvrCbYaFRa1pPMFb3ZjqjWpR3YfA=="], - - "@aws-sdk/signature-v4-multi-region/@aws-sdk/middleware-sdk-s3/@aws-sdk/core": ["@aws-sdk/core@3.972.0", "", { "dependencies": { "@aws-sdk/types": "3.972.0", "@aws-sdk/xml-builder": "3.972.0", "@smithy/core": "^3.20.6", "@smithy/node-config-provider": "^4.3.8", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/signature-v4": "^5.3.8", "@smithy/smithy-client": "^4.10.8", "@smithy/types": "^4.12.0", "@smithy/util-base64": "^4.3.0", "@smithy/util-middleware": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-nEeUW2M9F+xdIaD98F5MBcQ4ITtykj3yKbgFZ6J0JtL3bq+Z90szQ6Yy8H/BLPYXTs3V4n9ifnBo8cprRDiE6A=="], - - "@aws-sdk/signature-v4-multi-region/@aws-sdk/middleware-sdk-s3/@aws-sdk/util-arn-parser": ["@aws-sdk/util-arn-parser@3.972.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-RM5Mmo/KJ593iMSrALlHEOcc9YOIyOsDmS5x2NLOMdEmzv1o00fcpAkCQ02IGu1eFneBFT7uX0Mpag0HI+Cz2g=="], + "@aws-sdk/middleware-sdk-s3/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.4", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.3.4", "tslib": "^2.6.2" } }, "sha512-0zJ05ANfYqI6+rGqj8samZBFod0dPPousBjLEqg8WdxSgbMAkRgLyn81lP215Do0rFJ/17LIXwr7q0yK24mP6Q=="], "@babel/helper-compilation-targets/lru-cache/yallist": ["yallist@3.1.1", "", {}, "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="], @@ -4362,12 +4356,6 @@ "@esbuild-kit/core-utils/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.18.20", "", { "os": "win32", "cpu": "x64" }, "sha512-kTdfRcSiDfQca/y9QIkng02avJ+NCaQvrMejlsB3RRv5sE9rRoeBPISaZpKxHELzRxZyLvNts1P27W3wV+8geQ=="], - "@isaacs/cliui/string-width/emoji-regex": ["emoji-regex@9.2.2", "", {}, "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="], - - "@isaacs/cliui/strip-ansi/ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], - - "@isaacs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], - "@octokit/plugin-paginate-rest/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@24.2.0", "", {}, "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg=="], "@octokit/plugin-rest-endpoint-methods/@octokit/types/@octokit/openapi-types": ["@octokit/openapi-types@24.2.0", "", {}, "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg=="], @@ -4452,63 +4440,63 @@ "engine.io/@types/node/undici-types": ["undici-types@7.10.0", "", {}, "sha512-t5Fy/nfn+14LuOc2KNYg75vZqClpAiqscVvMygNnlsHBFpSXdJaYtXMcdNLpl/Qvc3P2cB3s6lOV51nqsFq4ag=="], - "fetch-cookie/tough-cookie/tldts": ["tldts@7.0.19", "", { "dependencies": { "tldts-core": "^7.0.19" }, "bin": { "tldts": "bin/cli.js" } }, "sha512-8PWx8tvC4jDB39BQw1m4x8y5MH1BcQ5xHeL2n7UVFulMPH/3Q0uiamahFJ3lXA0zO2SUyRXuVVbWSDmstlt9YA=="], + "fetch-cookie/tough-cookie/tldts": ["tldts@7.0.23", "", { "dependencies": { "tldts-core": "^7.0.23" }, "bin": { "tldts": "bin/cli.js" } }, "sha512-ASdhgQIBSay0R/eXggAkQ53G4nTJqTXqC2kbaBbdDwM7SkjyZyO0OaaN1/FH7U/yCeqOHDwFO5j8+Os/IS1dXw=="], "ffmpeg-static/https-proxy-agent/agent-base": ["agent-base@6.0.2", "", { "dependencies": { "debug": "4" } }, "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ=="], "form-data/mime-types/mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="], - "fumadocs-mdx/esbuild/@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.2", "", { "os": "aix", "cpu": "ppc64" }, "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw=="], + "fumadocs-mdx/esbuild/@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.3", "", { "os": "aix", "cpu": "ppc64" }, "sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg=="], - "fumadocs-mdx/esbuild/@esbuild/android-arm": ["@esbuild/android-arm@0.27.2", "", { "os": "android", "cpu": "arm" }, "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA=="], + "fumadocs-mdx/esbuild/@esbuild/android-arm": ["@esbuild/android-arm@0.27.3", "", { "os": "android", "cpu": "arm" }, "sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA=="], - "fumadocs-mdx/esbuild/@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.2", "", { "os": "android", "cpu": "arm64" }, "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA=="], + "fumadocs-mdx/esbuild/@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.3", "", { "os": "android", "cpu": "arm64" }, "sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg=="], - "fumadocs-mdx/esbuild/@esbuild/android-x64": ["@esbuild/android-x64@0.27.2", "", { "os": "android", "cpu": "x64" }, "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A=="], + "fumadocs-mdx/esbuild/@esbuild/android-x64": ["@esbuild/android-x64@0.27.3", "", { "os": "android", "cpu": "x64" }, "sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ=="], - "fumadocs-mdx/esbuild/@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.2", "", { "os": "darwin", "cpu": "arm64" }, "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg=="], + "fumadocs-mdx/esbuild/@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.3", "", { "os": "darwin", "cpu": "arm64" }, "sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg=="], - "fumadocs-mdx/esbuild/@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.2", "", { "os": "darwin", "cpu": "x64" }, "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA=="], + "fumadocs-mdx/esbuild/@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.3", "", { "os": "darwin", "cpu": "x64" }, "sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg=="], - "fumadocs-mdx/esbuild/@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.2", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g=="], + "fumadocs-mdx/esbuild/@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.3", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w=="], - "fumadocs-mdx/esbuild/@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.2", "", { "os": "freebsd", "cpu": "x64" }, "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA=="], + "fumadocs-mdx/esbuild/@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.3", "", { "os": "freebsd", "cpu": "x64" }, "sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA=="], - "fumadocs-mdx/esbuild/@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.2", "", { "os": "linux", "cpu": "arm" }, "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw=="], + "fumadocs-mdx/esbuild/@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.3", "", { "os": "linux", "cpu": "arm" }, "sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw=="], - "fumadocs-mdx/esbuild/@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.2", "", { "os": "linux", "cpu": "arm64" }, "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw=="], + "fumadocs-mdx/esbuild/@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.3", "", { "os": "linux", "cpu": "arm64" }, "sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg=="], - "fumadocs-mdx/esbuild/@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.2", "", { "os": "linux", "cpu": "ia32" }, "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w=="], + "fumadocs-mdx/esbuild/@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.3", "", { "os": "linux", "cpu": "ia32" }, "sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg=="], - "fumadocs-mdx/esbuild/@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg=="], + "fumadocs-mdx/esbuild/@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA=="], - "fumadocs-mdx/esbuild/@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw=="], + "fumadocs-mdx/esbuild/@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw=="], - "fumadocs-mdx/esbuild/@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.2", "", { "os": "linux", "cpu": "ppc64" }, "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ=="], + "fumadocs-mdx/esbuild/@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.3", "", { "os": "linux", "cpu": "ppc64" }, "sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA=="], - "fumadocs-mdx/esbuild/@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA=="], + "fumadocs-mdx/esbuild/@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ=="], - "fumadocs-mdx/esbuild/@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.2", "", { "os": "linux", "cpu": "s390x" }, "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w=="], + "fumadocs-mdx/esbuild/@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.3", "", { "os": "linux", "cpu": "s390x" }, "sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw=="], - "fumadocs-mdx/esbuild/@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.2", "", { "os": "linux", "cpu": "x64" }, "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA=="], + "fumadocs-mdx/esbuild/@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.3", "", { "os": "linux", "cpu": "x64" }, "sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA=="], - "fumadocs-mdx/esbuild/@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.2", "", { "os": "none", "cpu": "arm64" }, "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw=="], + "fumadocs-mdx/esbuild/@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.3", "", { "os": "none", "cpu": "arm64" }, "sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA=="], - "fumadocs-mdx/esbuild/@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.2", "", { "os": "none", "cpu": "x64" }, "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA=="], + "fumadocs-mdx/esbuild/@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.3", "", { "os": "none", "cpu": "x64" }, "sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA=="], - "fumadocs-mdx/esbuild/@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.2", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA=="], + "fumadocs-mdx/esbuild/@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.3", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw=="], - "fumadocs-mdx/esbuild/@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.2", "", { "os": "openbsd", "cpu": "x64" }, "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg=="], + "fumadocs-mdx/esbuild/@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.3", "", { "os": "openbsd", "cpu": "x64" }, "sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ=="], - "fumadocs-mdx/esbuild/@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.2", "", { "os": "none", "cpu": "arm64" }, "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag=="], + "fumadocs-mdx/esbuild/@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.3", "", { "os": "none", "cpu": "arm64" }, "sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g=="], - "fumadocs-mdx/esbuild/@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.2", "", { "os": "sunos", "cpu": "x64" }, "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg=="], + "fumadocs-mdx/esbuild/@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.3", "", { "os": "sunos", "cpu": "x64" }, "sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA=="], - "fumadocs-mdx/esbuild/@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.2", "", { "os": "win32", "cpu": "arm64" }, "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg=="], + "fumadocs-mdx/esbuild/@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.3", "", { "os": "win32", "cpu": "arm64" }, "sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA=="], - "fumadocs-mdx/esbuild/@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.2", "", { "os": "win32", "cpu": "ia32" }, "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ=="], + "fumadocs-mdx/esbuild/@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.3", "", { "os": "win32", "cpu": "ia32" }, "sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q=="], - "fumadocs-mdx/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.2", "", { "os": "win32", "cpu": "x64" }, "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ=="], + "fumadocs-mdx/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.3", "", { "os": "win32", "cpu": "x64" }, "sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA=="], "gray-matter/js-yaml/argparse": ["argparse@1.0.10", "", { "dependencies": { "sprintf-js": "~1.0.2" } }, "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg=="], @@ -4598,7 +4586,7 @@ "next/sharp/@img/sharp-win32-x64": ["@img/sharp-win32-x64@0.34.5", "", { "os": "win32", "cpu": "x64" }, "sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw=="], - "nypm/pkg-types/confbox": ["confbox@0.2.2", "", {}, "sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ=="], + "nypm/pkg-types/confbox": ["confbox@0.2.4", "", {}, "sha512-ysOGlgTFbN2/Y6Cg3Iye8YKulHw+R2fNXHrgSmXISQdMnomY6eNDprVdW9R5xBguEqI954+S6709UyiO7B+6OQ=="], "oauth2-mock-server/express/body-parser": ["body-parser@1.20.4", "", { "dependencies": { "bytes": "~3.1.2", "content-type": "~1.0.5", "debug": "2.6.9", "depd": "2.0.0", "destroy": "~1.2.0", "http-errors": "~2.0.1", "iconv-lite": "~0.4.24", "on-finished": "~2.4.1", "qs": "~6.14.0", "raw-body": "~2.5.3", "type-is": "~1.6.18", "unpipe": "~1.0.0" } }, "sha512-ZTgYYLMOXY9qKU/57FAo8F+HA2dGX7bqGc71txDRC1rS4frdFI5R7NhluHxH6M0YItAP0sHB4uqAOcYKxO6uGA=="], @@ -4674,57 +4662,57 @@ "twilio/https-proxy-agent/agent-base": ["agent-base@6.0.2", "", { "dependencies": { "debug": "4" } }, "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ=="], - "vite/esbuild/@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.2", "", { "os": "aix", "cpu": "ppc64" }, "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw=="], + "vite/esbuild/@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.3", "", { "os": "aix", "cpu": "ppc64" }, "sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg=="], - "vite/esbuild/@esbuild/android-arm": ["@esbuild/android-arm@0.27.2", "", { "os": "android", "cpu": "arm" }, "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA=="], + "vite/esbuild/@esbuild/android-arm": ["@esbuild/android-arm@0.27.3", "", { "os": "android", "cpu": "arm" }, "sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA=="], - "vite/esbuild/@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.2", "", { "os": "android", "cpu": "arm64" }, "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA=="], + "vite/esbuild/@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.3", "", { "os": "android", "cpu": "arm64" }, "sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg=="], - "vite/esbuild/@esbuild/android-x64": ["@esbuild/android-x64@0.27.2", "", { "os": "android", "cpu": "x64" }, "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A=="], + "vite/esbuild/@esbuild/android-x64": ["@esbuild/android-x64@0.27.3", "", { "os": "android", "cpu": "x64" }, "sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ=="], - "vite/esbuild/@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.2", "", { "os": "darwin", "cpu": "arm64" }, "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg=="], + "vite/esbuild/@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.3", "", { "os": "darwin", "cpu": "arm64" }, "sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg=="], - "vite/esbuild/@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.2", "", { "os": "darwin", "cpu": "x64" }, "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA=="], + "vite/esbuild/@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.3", "", { "os": "darwin", "cpu": "x64" }, "sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg=="], - "vite/esbuild/@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.2", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g=="], + "vite/esbuild/@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.3", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w=="], - "vite/esbuild/@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.2", "", { "os": "freebsd", "cpu": "x64" }, "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA=="], + "vite/esbuild/@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.3", "", { "os": "freebsd", "cpu": "x64" }, "sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA=="], - "vite/esbuild/@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.2", "", { "os": "linux", "cpu": "arm" }, "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw=="], + "vite/esbuild/@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.3", "", { "os": "linux", "cpu": "arm" }, "sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw=="], - "vite/esbuild/@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.2", "", { "os": "linux", "cpu": "arm64" }, "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw=="], + "vite/esbuild/@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.3", "", { "os": "linux", "cpu": "arm64" }, "sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg=="], - "vite/esbuild/@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.2", "", { "os": "linux", "cpu": "ia32" }, "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w=="], + "vite/esbuild/@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.3", "", { "os": "linux", "cpu": "ia32" }, "sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg=="], - "vite/esbuild/@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg=="], + "vite/esbuild/@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA=="], - "vite/esbuild/@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw=="], + "vite/esbuild/@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw=="], - "vite/esbuild/@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.2", "", { "os": "linux", "cpu": "ppc64" }, "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ=="], + "vite/esbuild/@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.3", "", { "os": "linux", "cpu": "ppc64" }, "sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA=="], - "vite/esbuild/@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA=="], + "vite/esbuild/@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ=="], - "vite/esbuild/@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.2", "", { "os": "linux", "cpu": "s390x" }, "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w=="], + "vite/esbuild/@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.3", "", { "os": "linux", "cpu": "s390x" }, "sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw=="], - "vite/esbuild/@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.2", "", { "os": "linux", "cpu": "x64" }, "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA=="], + "vite/esbuild/@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.3", "", { "os": "linux", "cpu": "x64" }, "sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA=="], - "vite/esbuild/@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.2", "", { "os": "none", "cpu": "arm64" }, "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw=="], + "vite/esbuild/@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.3", "", { "os": "none", "cpu": "arm64" }, "sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA=="], - "vite/esbuild/@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.2", "", { "os": "none", "cpu": "x64" }, "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA=="], + "vite/esbuild/@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.3", "", { "os": "none", "cpu": "x64" }, "sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA=="], - "vite/esbuild/@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.2", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA=="], + "vite/esbuild/@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.3", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw=="], - "vite/esbuild/@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.2", "", { "os": "openbsd", "cpu": "x64" }, "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg=="], + "vite/esbuild/@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.3", "", { "os": "openbsd", "cpu": "x64" }, "sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ=="], - "vite/esbuild/@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.2", "", { "os": "none", "cpu": "arm64" }, "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag=="], + "vite/esbuild/@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.3", "", { "os": "none", "cpu": "arm64" }, "sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g=="], - "vite/esbuild/@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.2", "", { "os": "sunos", "cpu": "x64" }, "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg=="], + "vite/esbuild/@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.3", "", { "os": "sunos", "cpu": "x64" }, "sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA=="], - "vite/esbuild/@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.2", "", { "os": "win32", "cpu": "arm64" }, "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg=="], + "vite/esbuild/@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.3", "", { "os": "win32", "cpu": "arm64" }, "sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA=="], - "vite/esbuild/@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.2", "", { "os": "win32", "cpu": "ia32" }, "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ=="], + "vite/esbuild/@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.3", "", { "os": "win32", "cpu": "ia32" }, "sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q=="], - "vite/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.2", "", { "os": "win32", "cpu": "x64" }, "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ=="], + "vite/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.3", "", { "os": "win32", "cpu": "x64" }, "sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA=="], "@aws-crypto/sha1-browser/@smithy/util-utf8/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA=="], @@ -4732,29 +4720,29 @@ "@aws-crypto/util/@smithy/util-utf8/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA=="], - "@aws-sdk/client-s3/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.2.5", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-pfX9uG9Ki0yekDHx2SiuRIyFdyAr1kMIMitPvb0YBo8SUfKvia7w7FIyd/l6av85pFYRhZscS75MwMnbvY+hcQ=="], + "@aws-sdk/client-s3/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.3.4", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-EFd6afGmXlCx8H8WTZHhAoDaWaGyuIBoZJ2mknrNxug+aZKjkp0a0dlars9Izl+jF+7Gu1/5f/2h68cQpe0IiA=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/credential-provider-login": ["@aws-sdk/credential-provider-login@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/nested-clients": "3.975.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-mlaw2aiI3DrimW85ZMn3g7qrtHueidS58IGytZ+mbFpsYLK5wMjCAKZQtt7VatLMtSBG/dn/EY4njbnYXIDKeQ=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/credential-provider-login": ["@aws-sdk/credential-provider-login@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-uYq1ILyTSI6ZDCMY5+vUsRM0SOCVI7kaW4wBrehVVkhAxC6y+e9rvGtnoZqCOWL1gKjTMouvsf4Ilhc5NCg1Aw=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.975.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.1", "@aws-sdk/middleware-host-header": "^3.972.1", "@aws-sdk/middleware-logger": "^3.972.1", "@aws-sdk/middleware-recursion-detection": "^3.972.1", "@aws-sdk/middleware-user-agent": "^3.972.2", "@aws-sdk/region-config-resolver": "^3.972.1", "@aws-sdk/types": "^3.973.0", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.1", "@aws-sdk/util-user-agent-node": "^3.972.1", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.21.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.11", "@smithy/middleware-retry": "^4.4.27", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.10.12", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.26", "@smithy/util-defaults-mode-node": "^4.2.29", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-OkeFHPlQj2c/Y5bQGkX14pxhDWUGUFt3LRHhjcDKsSCw6lrxKcxN3WFZN0qbJwKNydP+knL5nxvfgKiCLpTLRA=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-TsWwKzb/2WHafAY0CE7uXgLj0FmnkBTgfioG9HO+7z/zCPcl1+YU+i7dW4o0y+aFxFgxTMG+ExBQpqT/k2ao8g=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/client-sso": ["@aws-sdk/client-sso@3.975.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.1", "@aws-sdk/middleware-host-header": "^3.972.1", "@aws-sdk/middleware-logger": "^3.972.1", "@aws-sdk/middleware-recursion-detection": "^3.972.1", "@aws-sdk/middleware-user-agent": "^3.972.2", "@aws-sdk/region-config-resolver": "^3.972.1", "@aws-sdk/types": "^3.973.0", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.1", "@aws-sdk/util-user-agent-node": "^3.972.1", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.21.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.11", "@smithy/middleware-retry": "^4.4.27", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.10.12", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.26", "@smithy/util-defaults-mode-node": "^4.2.29", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-HpgJuleH7P6uILxzJKQOmlHdwaCY+xYC6VgRDzlwVEqU/HXjo4m2gOAyjUbpXlBOCWfGgMUzfBlNJ9z3MboqEQ=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/client-sso": ["@aws-sdk/client-sso@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-81J8iE8MuXhdbMfIz4sWFj64Pe41bFi/uqqmqOC5SlGv+kwoyLsyKS/rH2tW2t5buih4vTUxskRjxlqikTD4oQ=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers": ["@aws-sdk/token-providers@3.975.0", "", { "dependencies": { "@aws-sdk/core": "^3.973.1", "@aws-sdk/nested-clients": "3.975.0", "@aws-sdk/types": "^3.973.0", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-AWQt64hkVbDQ+CmM09wnvSk2mVyH4iRROkmYkr3/lmUtFNbE2L/fnw26sckZnUcFCsHPqbkQrcsZAnTcBLbH4w=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers": ["@aws-sdk/token-providers@3.985.0", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-+hwpHZyEq8k+9JL2PkE60V93v2kNhUIv7STFt+EAez1UJsJOQDhc5LpzEX66pNjclI5OTwBROs/DhJjC/BtMjQ=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.975.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.1", "@aws-sdk/middleware-host-header": "^3.972.1", "@aws-sdk/middleware-logger": "^3.972.1", "@aws-sdk/middleware-recursion-detection": "^3.972.1", "@aws-sdk/middleware-user-agent": "^3.972.2", "@aws-sdk/region-config-resolver": "^3.972.1", "@aws-sdk/types": "^3.973.0", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.1", "@aws-sdk/util-user-agent-node": "^3.972.1", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.21.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.11", "@smithy/middleware-retry": "^4.4.27", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.10.12", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.26", "@smithy/util-defaults-mode-node": "^4.2.29", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-OkeFHPlQj2c/Y5bQGkX14pxhDWUGUFt3LRHhjcDKsSCw6lrxKcxN3WFZN0qbJwKNydP+knL5nxvfgKiCLpTLRA=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-TsWwKzb/2WHafAY0CE7uXgLj0FmnkBTgfioG9HO+7z/zCPcl1+YU+i7dW4o0y+aFxFgxTMG+ExBQpqT/k2ao8g=="], - "@aws-sdk/client-sesv2/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.2.5", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-pfX9uG9Ki0yekDHx2SiuRIyFdyAr1kMIMitPvb0YBo8SUfKvia7w7FIyd/l6av85pFYRhZscS75MwMnbvY+hcQ=="], + "@aws-sdk/client-sesv2/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.3.4", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-EFd6afGmXlCx8H8WTZHhAoDaWaGyuIBoZJ2mknrNxug+aZKjkp0a0dlars9Izl+jF+7Gu1/5f/2h68cQpe0IiA=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/credential-provider-login": ["@aws-sdk/credential-provider-login@3.972.2", "", { "dependencies": { "@aws-sdk/core": "^3.973.2", "@aws-sdk/nested-clients": "3.975.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-mlaw2aiI3DrimW85ZMn3g7qrtHueidS58IGytZ+mbFpsYLK5wMjCAKZQtt7VatLMtSBG/dn/EY4njbnYXIDKeQ=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/credential-provider-login": ["@aws-sdk/credential-provider-login@3.972.5", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/protocol-http": "^5.3.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-uYq1ILyTSI6ZDCMY5+vUsRM0SOCVI7kaW4wBrehVVkhAxC6y+e9rvGtnoZqCOWL1gKjTMouvsf4Ilhc5NCg1Aw=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.975.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.1", "@aws-sdk/middleware-host-header": "^3.972.1", "@aws-sdk/middleware-logger": "^3.972.1", "@aws-sdk/middleware-recursion-detection": "^3.972.1", "@aws-sdk/middleware-user-agent": "^3.972.2", "@aws-sdk/region-config-resolver": "^3.972.1", "@aws-sdk/types": "^3.973.0", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.1", "@aws-sdk/util-user-agent-node": "^3.972.1", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.21.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.11", "@smithy/middleware-retry": "^4.4.27", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.10.12", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.26", "@smithy/util-defaults-mode-node": "^4.2.29", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-OkeFHPlQj2c/Y5bQGkX14pxhDWUGUFt3LRHhjcDKsSCw6lrxKcxN3WFZN0qbJwKNydP+knL5nxvfgKiCLpTLRA=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-TsWwKzb/2WHafAY0CE7uXgLj0FmnkBTgfioG9HO+7z/zCPcl1+YU+i7dW4o0y+aFxFgxTMG+ExBQpqT/k2ao8g=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/client-sso": ["@aws-sdk/client-sso@3.975.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.1", "@aws-sdk/middleware-host-header": "^3.972.1", "@aws-sdk/middleware-logger": "^3.972.1", "@aws-sdk/middleware-recursion-detection": "^3.972.1", "@aws-sdk/middleware-user-agent": "^3.972.2", "@aws-sdk/region-config-resolver": "^3.972.1", "@aws-sdk/types": "^3.973.0", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.1", "@aws-sdk/util-user-agent-node": "^3.972.1", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.21.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.11", "@smithy/middleware-retry": "^4.4.27", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.10.12", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.26", "@smithy/util-defaults-mode-node": "^4.2.29", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-HpgJuleH7P6uILxzJKQOmlHdwaCY+xYC6VgRDzlwVEqU/HXjo4m2gOAyjUbpXlBOCWfGgMUzfBlNJ9z3MboqEQ=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/client-sso": ["@aws-sdk/client-sso@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-81J8iE8MuXhdbMfIz4sWFj64Pe41bFi/uqqmqOC5SlGv+kwoyLsyKS/rH2tW2t5buih4vTUxskRjxlqikTD4oQ=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers": ["@aws-sdk/token-providers@3.975.0", "", { "dependencies": { "@aws-sdk/core": "^3.973.1", "@aws-sdk/nested-clients": "3.975.0", "@aws-sdk/types": "^3.973.0", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-AWQt64hkVbDQ+CmM09wnvSk2mVyH4iRROkmYkr3/lmUtFNbE2L/fnw26sckZnUcFCsHPqbkQrcsZAnTcBLbH4w=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers": ["@aws-sdk/token-providers@3.985.0", "", { "dependencies": { "@aws-sdk/core": "^3.973.7", "@aws-sdk/nested-clients": "3.985.0", "@aws-sdk/types": "^3.973.1", "@smithy/property-provider": "^4.2.8", "@smithy/shared-ini-file-loader": "^4.4.3", "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-+hwpHZyEq8k+9JL2PkE60V93v2kNhUIv7STFt+EAez1UJsJOQDhc5LpzEX66pNjclI5OTwBROs/DhJjC/BtMjQ=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.975.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.1", "@aws-sdk/middleware-host-header": "^3.972.1", "@aws-sdk/middleware-logger": "^3.972.1", "@aws-sdk/middleware-recursion-detection": "^3.972.1", "@aws-sdk/middleware-user-agent": "^3.972.2", "@aws-sdk/region-config-resolver": "^3.972.1", "@aws-sdk/types": "^3.973.0", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.1", "@aws-sdk/util-user-agent-node": "^3.972.1", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.21.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.11", "@smithy/middleware-retry": "^4.4.27", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.10.12", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.26", "@smithy/util-defaults-mode-node": "^4.2.29", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-OkeFHPlQj2c/Y5bQGkX14pxhDWUGUFt3LRHhjcDKsSCw6lrxKcxN3WFZN0qbJwKNydP+knL5nxvfgKiCLpTLRA=="], + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-TsWwKzb/2WHafAY0CE7uXgLj0FmnkBTgfioG9HO+7z/zCPcl1+YU+i7dW4o0y+aFxFgxTMG+ExBQpqT/k2ao8g=="], "@aws-sdk/client-sqs/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/credential-provider-login": ["@aws-sdk/credential-provider-login@3.947.0", "", { "dependencies": { "@aws-sdk/core": "3.947.0", "@aws-sdk/nested-clients": "3.947.0", "@aws-sdk/types": "3.936.0", "@smithy/property-provider": "^4.2.5", "@smithy/protocol-http": "^5.3.5", "@smithy/shared-ini-file-loader": "^4.4.0", "@smithy/types": "^4.9.0", "tslib": "^2.6.2" } }, "sha512-u7M3hazcB7aJiVwosNdJRbIJDzbwQ861NTtl6S0HmvWpixaVb7iyhJZWg8/plyUznboZGBm7JVEdxtxv3u0bTA=="], @@ -4766,11 +4754,9 @@ "@aws-sdk/client-sqs/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.947.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "3.947.0", "@aws-sdk/middleware-host-header": "3.936.0", "@aws-sdk/middleware-logger": "3.936.0", "@aws-sdk/middleware-recursion-detection": "3.936.0", "@aws-sdk/middleware-user-agent": "3.947.0", "@aws-sdk/region-config-resolver": "3.936.0", "@aws-sdk/types": "3.936.0", "@aws-sdk/util-endpoints": "3.936.0", "@aws-sdk/util-user-agent-browser": "3.936.0", "@aws-sdk/util-user-agent-node": "3.947.0", "@smithy/config-resolver": "^4.4.3", "@smithy/core": "^3.18.7", "@smithy/fetch-http-handler": "^5.3.6", "@smithy/hash-node": "^4.2.5", "@smithy/invalid-dependency": "^4.2.5", "@smithy/middleware-content-length": "^4.2.5", "@smithy/middleware-endpoint": "^4.3.14", "@smithy/middleware-retry": "^4.4.14", "@smithy/middleware-serde": "^4.2.6", "@smithy/middleware-stack": "^4.2.5", "@smithy/node-config-provider": "^4.3.5", "@smithy/node-http-handler": "^4.4.5", "@smithy/protocol-http": "^5.3.5", "@smithy/smithy-client": "^4.9.10", "@smithy/types": "^4.9.0", "@smithy/url-parser": "^4.2.5", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.13", "@smithy/util-defaults-mode-node": "^4.2.16", "@smithy/util-endpoints": "^3.2.5", "@smithy/util-middleware": "^4.2.5", "@smithy/util-retry": "^4.2.5", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-DjRJEYNnHUTu9kGPPQDTSXquwSEd6myKR4ssI4FaYLFhdT3ldWpj73yYt807H3tdmhS7vPmdVqchSJnjurUQAw=="], - "@aws-sdk/middleware-flexible-checksums/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.2.5", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-pfX9uG9Ki0yekDHx2SiuRIyFdyAr1kMIMitPvb0YBo8SUfKvia7w7FIyd/l6av85pFYRhZscS75MwMnbvY+hcQ=="], + "@aws-sdk/middleware-flexible-checksums/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.3.4", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-EFd6afGmXlCx8H8WTZHhAoDaWaGyuIBoZJ2mknrNxug+aZKjkp0a0dlars9Izl+jF+7Gu1/5f/2h68cQpe0IiA=="], - "@aws-sdk/middleware-sdk-s3/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.2.5", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-pfX9uG9Ki0yekDHx2SiuRIyFdyAr1kMIMitPvb0YBo8SUfKvia7w7FIyd/l6av85pFYRhZscS75MwMnbvY+hcQ=="], - - "@aws-sdk/signature-v4-multi-region/@aws-sdk/middleware-sdk-s3/@aws-sdk/core/@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.0", "", { "dependencies": { "@smithy/types": "^4.12.0", "fast-xml-parser": "5.2.5", "tslib": "^2.6.2" } }, "sha512-POaGMcXnozzqBUyJM3HLUZ9GR6OKJWPGJEmhtTnxZXt8B6JcJ/6K3xRJ5H/j8oovVLz8Wg6vFxAHv8lvuASxMg=="], + "@aws-sdk/middleware-sdk-s3/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.3.4", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-EFd6afGmXlCx8H8WTZHhAoDaWaGyuIBoZJ2mknrNxug+aZKjkp0a0dlars9Izl+jF+7Gu1/5f/2h68cQpe0IiA=="], "@browserbasehq/sdk/node-fetch/whatwg-url/tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="], @@ -4800,7 +4786,7 @@ "cli-truncate/string-width/strip-ansi/ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], - "fetch-cookie/tough-cookie/tldts/tldts-core": ["tldts-core@7.0.19", "", {}, "sha512-lJX2dEWx0SGH4O6p+7FPwYmJ/bu1JbcGJ8RLaG9b7liIgZ85itUVEPbMtWRVrde/0fnDPEPHW10ZsKW3kVsE9A=="], + "fetch-cookie/tough-cookie/tldts/tldts-core": ["tldts-core@7.0.23", "", {}, "sha512-0g9vrtDQLrNIiCj22HSe9d4mLVG3g5ph5DZ8zCKBr4OtrspmNB6ss7hVyzArAeE88ceZocIEGkyW1Ime7fxPtQ=="], "groq-sdk/node-fetch/whatwg-url/tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="], @@ -4812,7 +4798,7 @@ "lint-staged/listr2/cli-truncate/string-width": ["string-width@7.2.0", "", { "dependencies": { "emoji-regex": "^10.3.0", "get-east-asian-width": "^1.0.0", "strip-ansi": "^7.1.0" } }, "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ=="], - "lint-staged/listr2/log-update/ansi-escapes": ["ansi-escapes@7.2.0", "", { "dependencies": { "environment": "^1.0.0" } }, "sha512-g6LhBsl+GBPRWGWsBtutpzBYuIIdBkLEvad5C/va/74Db018+5TZiyA26cZJAr3Rft5lprVqOIPxf5Vid6tqAw=="], + "lint-staged/listr2/log-update/ansi-escapes": ["ansi-escapes@7.3.0", "", { "dependencies": { "environment": "^1.0.0" } }, "sha512-BvU8nYgGQBxcmMuEeUEmNTvrMVjJNSH7RgW24vXexN4Ven6qCvy4TntnvlnwnMLTVlcRQQdbRY8NKnaIoeWDNg=="], "lint-staged/listr2/log-update/cli-cursor": ["cli-cursor@5.0.0", "", { "dependencies": { "restore-cursor": "^5.0.0" } }, "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw=="], @@ -4858,22 +4844,38 @@ "posthog-js/@opentelemetry/exporter-logs-otlp-http/@opentelemetry/otlp-transformer/@opentelemetry/sdk-trace-base": ["@opentelemetry/sdk-trace-base@2.2.0", "", { "dependencies": { "@opentelemetry/core": "2.2.0", "@opentelemetry/resources": "2.2.0", "@opentelemetry/semantic-conventions": "^1.29.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.3.0 <1.10.0" } }, "sha512-xWQgL0Bmctsalg6PaXExmzdedSp3gyKV8mQBwK/j9VGdCDu2fmXIb2gAehBKbkXCpJ4HPkgv3QfoJWRT4dHWbw=="], + "rimraf/glob/jackspeak/@isaacs/cliui": ["@isaacs/cliui@8.0.2", "", { "dependencies": { "string-width": "^5.1.2", "string-width-cjs": "npm:string-width@^4.2.0", "strip-ansi": "^7.0.1", "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", "wrap-ansi": "^8.1.0", "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" } }, "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA=="], + "rimraf/glob/path-scurry/lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="], + "sim/tailwindcss/chokidar/fsevents": ["fsevents@2.3.2", "", { "os": "darwin" }, "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA=="], + "sim/tailwindcss/chokidar/glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="], "sim/tailwindcss/chokidar/readdirp": ["readdirp@3.6.0", "", { "dependencies": { "picomatch": "^2.2.1" } }, "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA=="], + "test-exclude/glob/jackspeak/@isaacs/cliui": ["@isaacs/cliui@8.0.2", "", { "dependencies": { "string-width": "^5.1.2", "string-width-cjs": "npm:string-width@^4.2.0", "strip-ansi": "^7.0.1", "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", "wrap-ansi": "^8.1.0", "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" } }, "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA=="], + "test-exclude/glob/path-scurry/lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="], - "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.975.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.1", "@aws-sdk/middleware-host-header": "^3.972.1", "@aws-sdk/middleware-logger": "^3.972.1", "@aws-sdk/middleware-recursion-detection": "^3.972.1", "@aws-sdk/middleware-user-agent": "^3.972.2", "@aws-sdk/region-config-resolver": "^3.972.1", "@aws-sdk/types": "^3.973.0", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.1", "@aws-sdk/util-user-agent-node": "^3.972.1", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.21.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.11", "@smithy/middleware-retry": "^4.4.27", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.10.12", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.26", "@smithy/util-defaults-mode-node": "^4.2.29", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-OkeFHPlQj2c/Y5bQGkX14pxhDWUGUFt3LRHhjcDKsSCw6lrxKcxN3WFZN0qbJwKNydP+knL5nxvfgKiCLpTLRA=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/nested-clients/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], - "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.975.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.1", "@aws-sdk/middleware-host-header": "^3.972.1", "@aws-sdk/middleware-logger": "^3.972.1", "@aws-sdk/middleware-recursion-detection": "^3.972.1", "@aws-sdk/middleware-user-agent": "^3.972.2", "@aws-sdk/region-config-resolver": "^3.972.1", "@aws-sdk/types": "^3.973.0", "@aws-sdk/util-endpoints": "3.972.0", "@aws-sdk/util-user-agent-browser": "^3.972.1", "@aws-sdk/util-user-agent-node": "^3.972.1", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.21.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.11", "@smithy/middleware-retry": "^4.4.27", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.8", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.10.12", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.26", "@smithy/util-defaults-mode-node": "^4.2.29", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-OkeFHPlQj2c/Y5bQGkX14pxhDWUGUFt3LRHhjcDKsSCw6lrxKcxN3WFZN0qbJwKNydP+knL5nxvfgKiCLpTLRA=="], + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/client-sso/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-TsWwKzb/2WHafAY0CE7uXgLj0FmnkBTgfioG9HO+7z/zCPcl1+YU+i7dW4o0y+aFxFgxTMG+ExBQpqT/k2ao8g=="], + + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity/@aws-sdk/nested-clients/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-ini/@aws-sdk/nested-clients/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/client-sso/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.985.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.7", "@aws-sdk/middleware-host-header": "^3.972.3", "@aws-sdk/middleware-logger": "^3.972.3", "@aws-sdk/middleware-recursion-detection": "^3.972.3", "@aws-sdk/middleware-user-agent": "^3.972.7", "@aws-sdk/region-config-resolver": "^3.972.3", "@aws-sdk/types": "^3.973.1", "@aws-sdk/util-endpoints": "3.985.0", "@aws-sdk/util-user-agent-browser": "^3.972.3", "@aws-sdk/util-user-agent-node": "^3.972.5", "@smithy/config-resolver": "^4.4.6", "@smithy/core": "^3.22.1", "@smithy/fetch-http-handler": "^5.3.9", "@smithy/hash-node": "^4.2.8", "@smithy/invalid-dependency": "^4.2.8", "@smithy/middleware-content-length": "^4.2.8", "@smithy/middleware-endpoint": "^4.4.13", "@smithy/middleware-retry": "^4.4.30", "@smithy/middleware-serde": "^4.2.9", "@smithy/middleware-stack": "^4.2.8", "@smithy/node-config-provider": "^4.3.8", "@smithy/node-http-handler": "^4.4.9", "@smithy/protocol-http": "^5.3.8", "@smithy/smithy-client": "^4.11.2", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.29", "@smithy/util-defaults-mode-node": "^4.2.32", "@smithy/util-endpoints": "^3.2.8", "@smithy/util-middleware": "^4.2.8", "@smithy/util-retry": "^4.2.8", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-TsWwKzb/2WHafAY0CE7uXgLj0FmnkBTgfioG9HO+7z/zCPcl1+YU+i7dW4o0y+aFxFgxTMG+ExBQpqT/k2ao8g=="], + + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-web-identity/@aws-sdk/nested-clients/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], "@aws-sdk/client-sqs/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.947.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "3.947.0", "@aws-sdk/middleware-host-header": "3.936.0", "@aws-sdk/middleware-logger": "3.936.0", "@aws-sdk/middleware-recursion-detection": "3.936.0", "@aws-sdk/middleware-user-agent": "3.947.0", "@aws-sdk/region-config-resolver": "3.936.0", "@aws-sdk/types": "3.936.0", "@aws-sdk/util-endpoints": "3.936.0", "@aws-sdk/util-user-agent-browser": "3.936.0", "@aws-sdk/util-user-agent-node": "3.947.0", "@smithy/config-resolver": "^4.4.3", "@smithy/core": "^3.18.7", "@smithy/fetch-http-handler": "^5.3.6", "@smithy/hash-node": "^4.2.5", "@smithy/invalid-dependency": "^4.2.5", "@smithy/middleware-content-length": "^4.2.5", "@smithy/middleware-endpoint": "^4.3.14", "@smithy/middleware-retry": "^4.4.14", "@smithy/middleware-serde": "^4.2.6", "@smithy/middleware-stack": "^4.2.5", "@smithy/node-config-provider": "^4.3.5", "@smithy/node-http-handler": "^4.4.5", "@smithy/protocol-http": "^5.3.5", "@smithy/smithy-client": "^4.9.10", "@smithy/types": "^4.9.0", "@smithy/url-parser": "^4.2.5", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.13", "@smithy/util-defaults-mode-node": "^4.2.16", "@smithy/util-endpoints": "^3.2.5", "@smithy/util-middleware": "^4.2.5", "@smithy/util-retry": "^4.2.5", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-DjRJEYNnHUTu9kGPPQDTSXquwSEd6myKR4ssI4FaYLFhdT3ldWpj73yYt807H3tdmhS7vPmdVqchSJnjurUQAw=="], - "@aws-sdk/signature-v4-multi-region/@aws-sdk/middleware-sdk-s3/@aws-sdk/core/@aws-sdk/xml-builder/fast-xml-parser": ["fast-xml-parser@5.2.5", "", { "dependencies": { "strnum": "^2.1.0" }, "bin": { "fxparser": "src/cli/cli.js" } }, "sha512-pfX9uG9Ki0yekDHx2SiuRIyFdyAr1kMIMitPvb0YBo8SUfKvia7w7FIyd/l6av85pFYRhZscS75MwMnbvY+hcQ=="], - "@browserbasehq/stagehand/@anthropic-ai/sdk/node-fetch/whatwg-url/tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="], "@browserbasehq/stagehand/@anthropic-ai/sdk/node-fetch/whatwg-url/webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="], @@ -4896,10 +4898,38 @@ "oauth2-mock-server/express/type-is/mime-types/mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="], + "rimraf/glob/jackspeak/@isaacs/cliui/string-width": ["string-width@5.1.2", "", { "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", "strip-ansi": "^7.0.1" } }, "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA=="], + + "rimraf/glob/jackspeak/@isaacs/cliui/strip-ansi": ["strip-ansi@7.1.2", "", { "dependencies": { "ansi-regex": "^6.0.1" } }, "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA=="], + + "rimraf/glob/jackspeak/@isaacs/cliui/wrap-ansi": ["wrap-ansi@8.1.0", "", { "dependencies": { "ansi-styles": "^6.1.0", "string-width": "^5.0.1", "strip-ansi": "^7.0.1" } }, "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ=="], + "sim/tailwindcss/chokidar/readdirp/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="], + "test-exclude/glob/jackspeak/@isaacs/cliui/string-width": ["string-width@5.1.2", "", { "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", "strip-ansi": "^7.0.1" } }, "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA=="], + + "test-exclude/glob/jackspeak/@isaacs/cliui/strip-ansi": ["strip-ansi@7.1.2", "", { "dependencies": { "ansi-regex": "^6.0.1" } }, "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA=="], + + "test-exclude/glob/jackspeak/@isaacs/cliui/wrap-ansi": ["wrap-ansi@8.1.0", "", { "dependencies": { "ansi-styles": "^6.1.0", "string-width": "^5.0.1", "strip-ansi": "^7.0.1" } }, "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ=="], + + "@aws-sdk/client-s3/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + + "@aws-sdk/client-sesv2/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients/@aws-sdk/util-endpoints": ["@aws-sdk/util-endpoints@3.985.0", "", { "dependencies": { "@aws-sdk/types": "^3.973.1", "@smithy/types": "^4.12.0", "@smithy/url-parser": "^4.2.8", "@smithy/util-endpoints": "^3.2.8", "tslib": "^2.6.2" } }, "sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA=="], + "lint-staged/listr2/cli-truncate/string-width/strip-ansi/ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], "lint-staged/listr2/log-update/cli-cursor/restore-cursor/onetime": ["onetime@7.0.0", "", { "dependencies": { "mimic-function": "^5.0.0" } }, "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ=="], + + "rimraf/glob/jackspeak/@isaacs/cliui/string-width/emoji-regex": ["emoji-regex@9.2.2", "", {}, "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="], + + "rimraf/glob/jackspeak/@isaacs/cliui/strip-ansi/ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], + + "rimraf/glob/jackspeak/@isaacs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], + + "test-exclude/glob/jackspeak/@isaacs/cliui/string-width/emoji-regex": ["emoji-regex@9.2.2", "", {}, "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="], + + "test-exclude/glob/jackspeak/@isaacs/cliui/strip-ansi/ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], + + "test-exclude/glob/jackspeak/@isaacs/cliui/wrap-ansi/ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], } } diff --git a/packages/db/migrations/0153_complete_arclight.sql b/packages/db/migrations/0153_complete_arclight.sql new file mode 100644 index 000000000..f8f489862 --- /dev/null +++ b/packages/db/migrations/0153_complete_arclight.sql @@ -0,0 +1,4 @@ +ALTER TYPE "public"."usage_log_source" ADD VALUE 'mcp_copilot';--> statement-breakpoint +ALTER TABLE "user_stats" ADD COLUMN "total_mcp_copilot_calls" integer DEFAULT 0 NOT NULL;--> statement-breakpoint +ALTER TABLE "user_stats" ADD COLUMN "total_mcp_copilot_cost" numeric DEFAULT '0' NOT NULL;--> statement-breakpoint +ALTER TABLE "user_stats" ADD COLUMN "current_period_mcp_copilot_cost" numeric DEFAULT '0' NOT NULL; \ No newline at end of file diff --git a/packages/db/migrations/meta/0153_snapshot.json b/packages/db/migrations/meta/0153_snapshot.json new file mode 100644 index 000000000..82e45d5ee --- /dev/null +++ b/packages/db/migrations/meta/0153_snapshot.json @@ -0,0 +1,10640 @@ +{ + "id": "2652353e-bc06-43fe-a8c6-4d03fe4dac93", + "prevId": "137c6e6a-44df-4e0a-93df-61265ae36c52", + "version": "7", + "dialect": "postgresql", + "tables": { + "public.a2a_agent": { + "name": "a2a_agent", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "version": { + "name": "version", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'1.0.0'" + }, + "capabilities": { + "name": "capabilities", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "skills": { + "name": "skills", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'[]'" + }, + "authentication": { + "name": "authentication", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "signatures": { + "name": "signatures", + "type": "jsonb", + "primaryKey": false, + "notNull": false, + "default": "'[]'" + }, + "is_published": { + "name": "is_published", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "published_at": { + "name": "published_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "a2a_agent_workspace_id_idx": { + "name": "a2a_agent_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "a2a_agent_workflow_id_idx": { + "name": "a2a_agent_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "a2a_agent_created_by_idx": { + "name": "a2a_agent_created_by_idx", + "columns": [ + { + "expression": "created_by", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "a2a_agent_workspace_workflow_unique": { + "name": "a2a_agent_workspace_workflow_unique", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "a2a_agent_workspace_id_workspace_id_fk": { + "name": "a2a_agent_workspace_id_workspace_id_fk", + "tableFrom": "a2a_agent", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "a2a_agent_workflow_id_workflow_id_fk": { + "name": "a2a_agent_workflow_id_workflow_id_fk", + "tableFrom": "a2a_agent", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "a2a_agent_created_by_user_id_fk": { + "name": "a2a_agent_created_by_user_id_fk", + "tableFrom": "a2a_agent", + "tableTo": "user", + "columnsFrom": ["created_by"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.a2a_push_notification_config": { + "name": "a2a_push_notification_config", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "task_id": { + "name": "task_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "url": { + "name": "url", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "token": { + "name": "token", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "auth_schemes": { + "name": "auth_schemes", + "type": "jsonb", + "primaryKey": false, + "notNull": false, + "default": "'[]'" + }, + "auth_credentials": { + "name": "auth_credentials", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "is_active": { + "name": "is_active", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "a2a_push_notification_config_task_id_idx": { + "name": "a2a_push_notification_config_task_id_idx", + "columns": [ + { + "expression": "task_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "a2a_push_notification_config_task_unique": { + "name": "a2a_push_notification_config_task_unique", + "columns": [ + { + "expression": "task_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "a2a_push_notification_config_task_id_a2a_task_id_fk": { + "name": "a2a_push_notification_config_task_id_a2a_task_id_fk", + "tableFrom": "a2a_push_notification_config", + "tableTo": "a2a_task", + "columnsFrom": ["task_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.a2a_task": { + "name": "a2a_task", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "agent_id": { + "name": "agent_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "session_id": { + "name": "session_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "status": { + "name": "status", + "type": "a2a_task_status", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'submitted'" + }, + "messages": { + "name": "messages", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'[]'" + }, + "artifacts": { + "name": "artifacts", + "type": "jsonb", + "primaryKey": false, + "notNull": false, + "default": "'[]'" + }, + "execution_id": { + "name": "execution_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "metadata": { + "name": "metadata", + "type": "jsonb", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "completed_at": { + "name": "completed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "a2a_task_agent_id_idx": { + "name": "a2a_task_agent_id_idx", + "columns": [ + { + "expression": "agent_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "a2a_task_session_id_idx": { + "name": "a2a_task_session_id_idx", + "columns": [ + { + "expression": "session_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "a2a_task_status_idx": { + "name": "a2a_task_status_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "a2a_task_execution_id_idx": { + "name": "a2a_task_execution_id_idx", + "columns": [ + { + "expression": "execution_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "a2a_task_created_at_idx": { + "name": "a2a_task_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "a2a_task_agent_id_a2a_agent_id_fk": { + "name": "a2a_task_agent_id_a2a_agent_id_fk", + "tableFrom": "a2a_task", + "tableTo": "a2a_agent", + "columnsFrom": ["agent_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.account": { + "name": "account", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "account_id": { + "name": "account_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "provider_id": { + "name": "provider_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "access_token": { + "name": "access_token", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "refresh_token": { + "name": "refresh_token", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "id_token": { + "name": "id_token", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "access_token_expires_at": { + "name": "access_token_expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "refresh_token_expires_at": { + "name": "refresh_token_expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "scope": { + "name": "scope", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "password": { + "name": "password", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + } + }, + "indexes": { + "account_user_id_idx": { + "name": "account_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "idx_account_on_account_id_provider_id": { + "name": "idx_account_on_account_id_provider_id", + "columns": [ + { + "expression": "account_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "provider_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "account_user_provider_unique": { + "name": "account_user_provider_unique", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "provider_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "account_user_id_user_id_fk": { + "name": "account_user_id_user_id_fk", + "tableFrom": "account", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.api_key": { + "name": "api_key", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "key": { + "name": "key", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "type": { + "name": "type", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'personal'" + }, + "last_used": { + "name": "last_used", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "api_key_workspace_type_idx": { + "name": "api_key_workspace_type_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "api_key_user_type_idx": { + "name": "api_key_user_type_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "api_key_user_id_user_id_fk": { + "name": "api_key_user_id_user_id_fk", + "tableFrom": "api_key", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "api_key_workspace_id_workspace_id_fk": { + "name": "api_key_workspace_id_workspace_id_fk", + "tableFrom": "api_key", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "api_key_created_by_user_id_fk": { + "name": "api_key_created_by_user_id_fk", + "tableFrom": "api_key", + "tableTo": "user", + "columnsFrom": ["created_by"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "api_key_key_unique": { + "name": "api_key_key_unique", + "nullsNotDistinct": false, + "columns": ["key"] + } + }, + "policies": {}, + "checkConstraints": { + "workspace_type_check": { + "name": "workspace_type_check", + "value": "(type = 'workspace' AND workspace_id IS NOT NULL) OR (type = 'personal' AND workspace_id IS NULL)" + } + }, + "isRLSEnabled": false + }, + "public.async_jobs": { + "name": "async_jobs", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "type": { + "name": "type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "payload": { + "name": "payload", + "type": "jsonb", + "primaryKey": false, + "notNull": true + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "started_at": { + "name": "started_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "completed_at": { + "name": "completed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "run_at": { + "name": "run_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "attempts": { + "name": "attempts", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "max_attempts": { + "name": "max_attempts", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 3 + }, + "error": { + "name": "error", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "output": { + "name": "output", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "metadata": { + "name": "metadata", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "async_jobs_status_started_at_idx": { + "name": "async_jobs_status_started_at_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "started_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "async_jobs_status_completed_at_idx": { + "name": "async_jobs_status_completed_at_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "completed_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.chat": { + "name": "chat", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "identifier": { + "name": "identifier", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "title": { + "name": "title", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "is_active": { + "name": "is_active", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "customizations": { + "name": "customizations", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + }, + "auth_type": { + "name": "auth_type", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'public'" + }, + "password": { + "name": "password", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "allowed_emails": { + "name": "allowed_emails", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'[]'" + }, + "output_configs": { + "name": "output_configs", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'[]'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "identifier_idx": { + "name": "identifier_idx", + "columns": [ + { + "expression": "identifier", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "chat_workflow_id_workflow_id_fk": { + "name": "chat_workflow_id_workflow_id_fk", + "tableFrom": "chat", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "chat_user_id_user_id_fk": { + "name": "chat_user_id_user_id_fk", + "tableFrom": "chat", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.copilot_chats": { + "name": "copilot_chats", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "uuid", + "primaryKey": true, + "notNull": true, + "default": "gen_random_uuid()" + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "title": { + "name": "title", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "messages": { + "name": "messages", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'[]'" + }, + "model": { + "name": "model", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'claude-3-7-sonnet-latest'" + }, + "conversation_id": { + "name": "conversation_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "preview_yaml": { + "name": "preview_yaml", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "plan_artifact": { + "name": "plan_artifact", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "config": { + "name": "config", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "copilot_chats_user_id_idx": { + "name": "copilot_chats_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_chats_workflow_id_idx": { + "name": "copilot_chats_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_chats_user_workflow_idx": { + "name": "copilot_chats_user_workflow_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_chats_created_at_idx": { + "name": "copilot_chats_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_chats_updated_at_idx": { + "name": "copilot_chats_updated_at_idx", + "columns": [ + { + "expression": "updated_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "copilot_chats_user_id_user_id_fk": { + "name": "copilot_chats_user_id_user_id_fk", + "tableFrom": "copilot_chats", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "copilot_chats_workflow_id_workflow_id_fk": { + "name": "copilot_chats_workflow_id_workflow_id_fk", + "tableFrom": "copilot_chats", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.copilot_feedback": { + "name": "copilot_feedback", + "schema": "", + "columns": { + "feedback_id": { + "name": "feedback_id", + "type": "uuid", + "primaryKey": true, + "notNull": true, + "default": "gen_random_uuid()" + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "chat_id": { + "name": "chat_id", + "type": "uuid", + "primaryKey": false, + "notNull": true + }, + "user_query": { + "name": "user_query", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "agent_response": { + "name": "agent_response", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "is_positive": { + "name": "is_positive", + "type": "boolean", + "primaryKey": false, + "notNull": true + }, + "feedback": { + "name": "feedback", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "workflow_yaml": { + "name": "workflow_yaml", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "copilot_feedback_user_id_idx": { + "name": "copilot_feedback_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_feedback_chat_id_idx": { + "name": "copilot_feedback_chat_id_idx", + "columns": [ + { + "expression": "chat_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_feedback_user_chat_idx": { + "name": "copilot_feedback_user_chat_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "chat_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_feedback_is_positive_idx": { + "name": "copilot_feedback_is_positive_idx", + "columns": [ + { + "expression": "is_positive", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_feedback_created_at_idx": { + "name": "copilot_feedback_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "copilot_feedback_user_id_user_id_fk": { + "name": "copilot_feedback_user_id_user_id_fk", + "tableFrom": "copilot_feedback", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "copilot_feedback_chat_id_copilot_chats_id_fk": { + "name": "copilot_feedback_chat_id_copilot_chats_id_fk", + "tableFrom": "copilot_feedback", + "tableTo": "copilot_chats", + "columnsFrom": ["chat_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.credential_set": { + "name": "credential_set", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "provider_id": { + "name": "provider_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "credential_set_organization_id_idx": { + "name": "credential_set_organization_id_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_created_by_idx": { + "name": "credential_set_created_by_idx", + "columns": [ + { + "expression": "created_by", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_org_name_unique": { + "name": "credential_set_org_name_unique", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "name", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_provider_id_idx": { + "name": "credential_set_provider_id_idx", + "columns": [ + { + "expression": "provider_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "credential_set_organization_id_organization_id_fk": { + "name": "credential_set_organization_id_organization_id_fk", + "tableFrom": "credential_set", + "tableTo": "organization", + "columnsFrom": ["organization_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "credential_set_created_by_user_id_fk": { + "name": "credential_set_created_by_user_id_fk", + "tableFrom": "credential_set", + "tableTo": "user", + "columnsFrom": ["created_by"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.credential_set_invitation": { + "name": "credential_set_invitation", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "credential_set_id": { + "name": "credential_set_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "token": { + "name": "token", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "invited_by": { + "name": "invited_by", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "status": { + "name": "status", + "type": "credential_set_invitation_status", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "accepted_at": { + "name": "accepted_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "accepted_by_user_id": { + "name": "accepted_by_user_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "credential_set_invitation_set_id_idx": { + "name": "credential_set_invitation_set_id_idx", + "columns": [ + { + "expression": "credential_set_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_invitation_token_idx": { + "name": "credential_set_invitation_token_idx", + "columns": [ + { + "expression": "token", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_invitation_status_idx": { + "name": "credential_set_invitation_status_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_invitation_expires_at_idx": { + "name": "credential_set_invitation_expires_at_idx", + "columns": [ + { + "expression": "expires_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "credential_set_invitation_credential_set_id_credential_set_id_fk": { + "name": "credential_set_invitation_credential_set_id_credential_set_id_fk", + "tableFrom": "credential_set_invitation", + "tableTo": "credential_set", + "columnsFrom": ["credential_set_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "credential_set_invitation_invited_by_user_id_fk": { + "name": "credential_set_invitation_invited_by_user_id_fk", + "tableFrom": "credential_set_invitation", + "tableTo": "user", + "columnsFrom": ["invited_by"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "credential_set_invitation_accepted_by_user_id_user_id_fk": { + "name": "credential_set_invitation_accepted_by_user_id_user_id_fk", + "tableFrom": "credential_set_invitation", + "tableTo": "user", + "columnsFrom": ["accepted_by_user_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "credential_set_invitation_token_unique": { + "name": "credential_set_invitation_token_unique", + "nullsNotDistinct": false, + "columns": ["token"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.credential_set_member": { + "name": "credential_set_member", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "credential_set_id": { + "name": "credential_set_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "status": { + "name": "status", + "type": "credential_set_member_status", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "joined_at": { + "name": "joined_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "invited_by": { + "name": "invited_by", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "credential_set_member_set_id_idx": { + "name": "credential_set_member_set_id_idx", + "columns": [ + { + "expression": "credential_set_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_member_user_id_idx": { + "name": "credential_set_member_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_member_unique": { + "name": "credential_set_member_unique", + "columns": [ + { + "expression": "credential_set_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "credential_set_member_status_idx": { + "name": "credential_set_member_status_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "credential_set_member_credential_set_id_credential_set_id_fk": { + "name": "credential_set_member_credential_set_id_credential_set_id_fk", + "tableFrom": "credential_set_member", + "tableTo": "credential_set", + "columnsFrom": ["credential_set_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "credential_set_member_user_id_user_id_fk": { + "name": "credential_set_member_user_id_user_id_fk", + "tableFrom": "credential_set_member", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "credential_set_member_invited_by_user_id_fk": { + "name": "credential_set_member_invited_by_user_id_fk", + "tableFrom": "credential_set_member", + "tableTo": "user", + "columnsFrom": ["invited_by"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.custom_tools": { + "name": "custom_tools", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "title": { + "name": "title", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "schema": { + "name": "schema", + "type": "json", + "primaryKey": false, + "notNull": true + }, + "code": { + "name": "code", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "custom_tools_workspace_id_idx": { + "name": "custom_tools_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "custom_tools_workspace_title_unique": { + "name": "custom_tools_workspace_title_unique", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "title", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "custom_tools_workspace_id_workspace_id_fk": { + "name": "custom_tools_workspace_id_workspace_id_fk", + "tableFrom": "custom_tools", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "custom_tools_user_id_user_id_fk": { + "name": "custom_tools_user_id_user_id_fk", + "tableFrom": "custom_tools", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.docs_embeddings": { + "name": "docs_embeddings", + "schema": "", + "columns": { + "chunk_id": { + "name": "chunk_id", + "type": "uuid", + "primaryKey": true, + "notNull": true, + "default": "gen_random_uuid()" + }, + "chunk_text": { + "name": "chunk_text", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "source_document": { + "name": "source_document", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "source_link": { + "name": "source_link", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "header_text": { + "name": "header_text", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "header_level": { + "name": "header_level", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "token_count": { + "name": "token_count", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "embedding": { + "name": "embedding", + "type": "vector(1536)", + "primaryKey": false, + "notNull": true + }, + "embedding_model": { + "name": "embedding_model", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'text-embedding-3-small'" + }, + "metadata": { + "name": "metadata", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "chunk_text_tsv": { + "name": "chunk_text_tsv", + "type": "tsvector", + "primaryKey": false, + "notNull": false, + "generated": { + "as": "to_tsvector('english', \"docs_embeddings\".\"chunk_text\")", + "type": "stored" + } + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "docs_emb_source_document_idx": { + "name": "docs_emb_source_document_idx", + "columns": [ + { + "expression": "source_document", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "docs_emb_header_level_idx": { + "name": "docs_emb_header_level_idx", + "columns": [ + { + "expression": "header_level", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "docs_emb_source_header_idx": { + "name": "docs_emb_source_header_idx", + "columns": [ + { + "expression": "source_document", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "header_level", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "docs_emb_model_idx": { + "name": "docs_emb_model_idx", + "columns": [ + { + "expression": "embedding_model", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "docs_emb_created_at_idx": { + "name": "docs_emb_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "docs_embedding_vector_hnsw_idx": { + "name": "docs_embedding_vector_hnsw_idx", + "columns": [ + { + "expression": "embedding", + "isExpression": false, + "asc": true, + "nulls": "last", + "opclass": "vector_cosine_ops" + } + ], + "isUnique": false, + "concurrently": false, + "method": "hnsw", + "with": { + "m": 16, + "ef_construction": 64 + } + }, + "docs_emb_metadata_gin_idx": { + "name": "docs_emb_metadata_gin_idx", + "columns": [ + { + "expression": "metadata", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "gin", + "with": {} + }, + "docs_emb_chunk_text_fts_idx": { + "name": "docs_emb_chunk_text_fts_idx", + "columns": [ + { + "expression": "chunk_text_tsv", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "gin", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": { + "docs_embedding_not_null_check": { + "name": "docs_embedding_not_null_check", + "value": "\"embedding\" IS NOT NULL" + }, + "docs_header_level_check": { + "name": "docs_header_level_check", + "value": "\"header_level\" >= 1 AND \"header_level\" <= 6" + } + }, + "isRLSEnabled": false + }, + "public.document": { + "name": "document", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "knowledge_base_id": { + "name": "knowledge_base_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "filename": { + "name": "filename", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "file_url": { + "name": "file_url", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "file_size": { + "name": "file_size", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "mime_type": { + "name": "mime_type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "chunk_count": { + "name": "chunk_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "token_count": { + "name": "token_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "character_count": { + "name": "character_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "processing_status": { + "name": "processing_status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "processing_started_at": { + "name": "processing_started_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "processing_completed_at": { + "name": "processing_completed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "processing_error": { + "name": "processing_error", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "enabled": { + "name": "enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "deleted_at": { + "name": "deleted_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "tag1": { + "name": "tag1", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag2": { + "name": "tag2", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag3": { + "name": "tag3", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag4": { + "name": "tag4", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag5": { + "name": "tag5", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag6": { + "name": "tag6", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag7": { + "name": "tag7", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "number1": { + "name": "number1", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "number2": { + "name": "number2", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "number3": { + "name": "number3", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "number4": { + "name": "number4", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "number5": { + "name": "number5", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "date1": { + "name": "date1", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "date2": { + "name": "date2", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "boolean1": { + "name": "boolean1", + "type": "boolean", + "primaryKey": false, + "notNull": false + }, + "boolean2": { + "name": "boolean2", + "type": "boolean", + "primaryKey": false, + "notNull": false + }, + "boolean3": { + "name": "boolean3", + "type": "boolean", + "primaryKey": false, + "notNull": false + }, + "uploaded_at": { + "name": "uploaded_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "doc_kb_id_idx": { + "name": "doc_kb_id_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_filename_idx": { + "name": "doc_filename_idx", + "columns": [ + { + "expression": "filename", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_processing_status_idx": { + "name": "doc_processing_status_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "processing_status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag1_idx": { + "name": "doc_tag1_idx", + "columns": [ + { + "expression": "tag1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag2_idx": { + "name": "doc_tag2_idx", + "columns": [ + { + "expression": "tag2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag3_idx": { + "name": "doc_tag3_idx", + "columns": [ + { + "expression": "tag3", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag4_idx": { + "name": "doc_tag4_idx", + "columns": [ + { + "expression": "tag4", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag5_idx": { + "name": "doc_tag5_idx", + "columns": [ + { + "expression": "tag5", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag6_idx": { + "name": "doc_tag6_idx", + "columns": [ + { + "expression": "tag6", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag7_idx": { + "name": "doc_tag7_idx", + "columns": [ + { + "expression": "tag7", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_number1_idx": { + "name": "doc_number1_idx", + "columns": [ + { + "expression": "number1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_number2_idx": { + "name": "doc_number2_idx", + "columns": [ + { + "expression": "number2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_number3_idx": { + "name": "doc_number3_idx", + "columns": [ + { + "expression": "number3", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_number4_idx": { + "name": "doc_number4_idx", + "columns": [ + { + "expression": "number4", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_number5_idx": { + "name": "doc_number5_idx", + "columns": [ + { + "expression": "number5", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_date1_idx": { + "name": "doc_date1_idx", + "columns": [ + { + "expression": "date1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_date2_idx": { + "name": "doc_date2_idx", + "columns": [ + { + "expression": "date2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_boolean1_idx": { + "name": "doc_boolean1_idx", + "columns": [ + { + "expression": "boolean1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_boolean2_idx": { + "name": "doc_boolean2_idx", + "columns": [ + { + "expression": "boolean2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_boolean3_idx": { + "name": "doc_boolean3_idx", + "columns": [ + { + "expression": "boolean3", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "document_knowledge_base_id_knowledge_base_id_fk": { + "name": "document_knowledge_base_id_knowledge_base_id_fk", + "tableFrom": "document", + "tableTo": "knowledge_base", + "columnsFrom": ["knowledge_base_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.embedding": { + "name": "embedding", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "knowledge_base_id": { + "name": "knowledge_base_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "document_id": { + "name": "document_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "chunk_index": { + "name": "chunk_index", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "chunk_hash": { + "name": "chunk_hash", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "content": { + "name": "content", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "content_length": { + "name": "content_length", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "token_count": { + "name": "token_count", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "embedding": { + "name": "embedding", + "type": "vector(1536)", + "primaryKey": false, + "notNull": false + }, + "embedding_model": { + "name": "embedding_model", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'text-embedding-3-small'" + }, + "start_offset": { + "name": "start_offset", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "end_offset": { + "name": "end_offset", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "tag1": { + "name": "tag1", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag2": { + "name": "tag2", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag3": { + "name": "tag3", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag4": { + "name": "tag4", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag5": { + "name": "tag5", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag6": { + "name": "tag6", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag7": { + "name": "tag7", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "number1": { + "name": "number1", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "number2": { + "name": "number2", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "number3": { + "name": "number3", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "number4": { + "name": "number4", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "number5": { + "name": "number5", + "type": "double precision", + "primaryKey": false, + "notNull": false + }, + "date1": { + "name": "date1", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "date2": { + "name": "date2", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "boolean1": { + "name": "boolean1", + "type": "boolean", + "primaryKey": false, + "notNull": false + }, + "boolean2": { + "name": "boolean2", + "type": "boolean", + "primaryKey": false, + "notNull": false + }, + "boolean3": { + "name": "boolean3", + "type": "boolean", + "primaryKey": false, + "notNull": false + }, + "enabled": { + "name": "enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "content_tsv": { + "name": "content_tsv", + "type": "tsvector", + "primaryKey": false, + "notNull": false, + "generated": { + "as": "to_tsvector('english', \"embedding\".\"content\")", + "type": "stored" + } + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "emb_kb_id_idx": { + "name": "emb_kb_id_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_doc_id_idx": { + "name": "emb_doc_id_idx", + "columns": [ + { + "expression": "document_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_doc_chunk_idx": { + "name": "emb_doc_chunk_idx", + "columns": [ + { + "expression": "document_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "chunk_index", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_kb_model_idx": { + "name": "emb_kb_model_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "embedding_model", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_kb_enabled_idx": { + "name": "emb_kb_enabled_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "enabled", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_doc_enabled_idx": { + "name": "emb_doc_enabled_idx", + "columns": [ + { + "expression": "document_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "enabled", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "embedding_vector_hnsw_idx": { + "name": "embedding_vector_hnsw_idx", + "columns": [ + { + "expression": "embedding", + "isExpression": false, + "asc": true, + "nulls": "last", + "opclass": "vector_cosine_ops" + } + ], + "isUnique": false, + "concurrently": false, + "method": "hnsw", + "with": { + "m": 16, + "ef_construction": 64 + } + }, + "emb_tag1_idx": { + "name": "emb_tag1_idx", + "columns": [ + { + "expression": "tag1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag2_idx": { + "name": "emb_tag2_idx", + "columns": [ + { + "expression": "tag2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag3_idx": { + "name": "emb_tag3_idx", + "columns": [ + { + "expression": "tag3", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag4_idx": { + "name": "emb_tag4_idx", + "columns": [ + { + "expression": "tag4", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag5_idx": { + "name": "emb_tag5_idx", + "columns": [ + { + "expression": "tag5", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag6_idx": { + "name": "emb_tag6_idx", + "columns": [ + { + "expression": "tag6", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag7_idx": { + "name": "emb_tag7_idx", + "columns": [ + { + "expression": "tag7", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_number1_idx": { + "name": "emb_number1_idx", + "columns": [ + { + "expression": "number1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_number2_idx": { + "name": "emb_number2_idx", + "columns": [ + { + "expression": "number2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_number3_idx": { + "name": "emb_number3_idx", + "columns": [ + { + "expression": "number3", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_number4_idx": { + "name": "emb_number4_idx", + "columns": [ + { + "expression": "number4", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_number5_idx": { + "name": "emb_number5_idx", + "columns": [ + { + "expression": "number5", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_date1_idx": { + "name": "emb_date1_idx", + "columns": [ + { + "expression": "date1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_date2_idx": { + "name": "emb_date2_idx", + "columns": [ + { + "expression": "date2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_boolean1_idx": { + "name": "emb_boolean1_idx", + "columns": [ + { + "expression": "boolean1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_boolean2_idx": { + "name": "emb_boolean2_idx", + "columns": [ + { + "expression": "boolean2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_boolean3_idx": { + "name": "emb_boolean3_idx", + "columns": [ + { + "expression": "boolean3", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_content_fts_idx": { + "name": "emb_content_fts_idx", + "columns": [ + { + "expression": "content_tsv", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "gin", + "with": {} + } + }, + "foreignKeys": { + "embedding_knowledge_base_id_knowledge_base_id_fk": { + "name": "embedding_knowledge_base_id_knowledge_base_id_fk", + "tableFrom": "embedding", + "tableTo": "knowledge_base", + "columnsFrom": ["knowledge_base_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "embedding_document_id_document_id_fk": { + "name": "embedding_document_id_document_id_fk", + "tableFrom": "embedding", + "tableTo": "document", + "columnsFrom": ["document_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": { + "embedding_not_null_check": { + "name": "embedding_not_null_check", + "value": "\"embedding\" IS NOT NULL" + } + }, + "isRLSEnabled": false + }, + "public.environment": { + "name": "environment", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "variables": { + "name": "variables", + "type": "json", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": { + "environment_user_id_user_id_fk": { + "name": "environment_user_id_user_id_fk", + "tableFrom": "environment", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "environment_user_id_unique": { + "name": "environment_user_id_unique", + "nullsNotDistinct": false, + "columns": ["user_id"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.form": { + "name": "form", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "identifier": { + "name": "identifier", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "title": { + "name": "title", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "is_active": { + "name": "is_active", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "customizations": { + "name": "customizations", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + }, + "auth_type": { + "name": "auth_type", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'public'" + }, + "password": { + "name": "password", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "allowed_emails": { + "name": "allowed_emails", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'[]'" + }, + "show_branding": { + "name": "show_branding", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "form_identifier_idx": { + "name": "form_identifier_idx", + "columns": [ + { + "expression": "identifier", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "form_workflow_id_idx": { + "name": "form_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "form_user_id_idx": { + "name": "form_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "form_workflow_id_workflow_id_fk": { + "name": "form_workflow_id_workflow_id_fk", + "tableFrom": "form", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "form_user_id_user_id_fk": { + "name": "form_user_id_user_id_fk", + "tableFrom": "form", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.idempotency_key": { + "name": "idempotency_key", + "schema": "", + "columns": { + "key": { + "name": "key", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "result": { + "name": "result", + "type": "json", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "idempotency_key_created_at_idx": { + "name": "idempotency_key_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.invitation": { + "name": "invitation", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "inviter_id": { + "name": "inviter_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "role": { + "name": "role", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "invitation_email_idx": { + "name": "invitation_email_idx", + "columns": [ + { + "expression": "email", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "invitation_organization_id_idx": { + "name": "invitation_organization_id_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "invitation_inviter_id_user_id_fk": { + "name": "invitation_inviter_id_user_id_fk", + "tableFrom": "invitation", + "tableTo": "user", + "columnsFrom": ["inviter_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "invitation_organization_id_organization_id_fk": { + "name": "invitation_organization_id_organization_id_fk", + "tableFrom": "invitation", + "tableTo": "organization", + "columnsFrom": ["organization_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.knowledge_base": { + "name": "knowledge_base", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "token_count": { + "name": "token_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "embedding_model": { + "name": "embedding_model", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'text-embedding-3-small'" + }, + "embedding_dimension": { + "name": "embedding_dimension", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 1536 + }, + "chunking_config": { + "name": "chunking_config", + "type": "json", + "primaryKey": false, + "notNull": true, + "default": "'{\"maxSize\": 1024, \"minSize\": 1, \"overlap\": 200}'" + }, + "deleted_at": { + "name": "deleted_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "kb_user_id_idx": { + "name": "kb_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "kb_workspace_id_idx": { + "name": "kb_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "kb_user_workspace_idx": { + "name": "kb_user_workspace_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "kb_deleted_at_idx": { + "name": "kb_deleted_at_idx", + "columns": [ + { + "expression": "deleted_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "knowledge_base_user_id_user_id_fk": { + "name": "knowledge_base_user_id_user_id_fk", + "tableFrom": "knowledge_base", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "knowledge_base_workspace_id_workspace_id_fk": { + "name": "knowledge_base_workspace_id_workspace_id_fk", + "tableFrom": "knowledge_base", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "no action", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.knowledge_base_tag_definitions": { + "name": "knowledge_base_tag_definitions", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "knowledge_base_id": { + "name": "knowledge_base_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "tag_slot": { + "name": "tag_slot", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "display_name": { + "name": "display_name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "field_type": { + "name": "field_type", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'text'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "kb_tag_definitions_kb_slot_idx": { + "name": "kb_tag_definitions_kb_slot_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "tag_slot", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "kb_tag_definitions_kb_display_name_idx": { + "name": "kb_tag_definitions_kb_display_name_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "display_name", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "kb_tag_definitions_kb_id_idx": { + "name": "kb_tag_definitions_kb_id_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "knowledge_base_tag_definitions_knowledge_base_id_knowledge_base_id_fk": { + "name": "knowledge_base_tag_definitions_knowledge_base_id_knowledge_base_id_fk", + "tableFrom": "knowledge_base_tag_definitions", + "tableTo": "knowledge_base", + "columnsFrom": ["knowledge_base_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.mcp_servers": { + "name": "mcp_servers", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "transport": { + "name": "transport", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "url": { + "name": "url", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "headers": { + "name": "headers", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + }, + "timeout": { + "name": "timeout", + "type": "integer", + "primaryKey": false, + "notNull": false, + "default": 30000 + }, + "retries": { + "name": "retries", + "type": "integer", + "primaryKey": false, + "notNull": false, + "default": 3 + }, + "enabled": { + "name": "enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "last_connected": { + "name": "last_connected", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "connection_status": { + "name": "connection_status", + "type": "text", + "primaryKey": false, + "notNull": false, + "default": "'disconnected'" + }, + "last_error": { + "name": "last_error", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "status_config": { + "name": "status_config", + "type": "jsonb", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + }, + "tool_count": { + "name": "tool_count", + "type": "integer", + "primaryKey": false, + "notNull": false, + "default": 0 + }, + "last_tools_refresh": { + "name": "last_tools_refresh", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "total_requests": { + "name": "total_requests", + "type": "integer", + "primaryKey": false, + "notNull": false, + "default": 0 + }, + "last_used": { + "name": "last_used", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "deleted_at": { + "name": "deleted_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "mcp_servers_workspace_enabled_idx": { + "name": "mcp_servers_workspace_enabled_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "enabled", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "mcp_servers_workspace_deleted_idx": { + "name": "mcp_servers_workspace_deleted_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "deleted_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "mcp_servers_workspace_id_workspace_id_fk": { + "name": "mcp_servers_workspace_id_workspace_id_fk", + "tableFrom": "mcp_servers", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "mcp_servers_created_by_user_id_fk": { + "name": "mcp_servers_created_by_user_id_fk", + "tableFrom": "mcp_servers", + "tableTo": "user", + "columnsFrom": ["created_by"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.member": { + "name": "member", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "role": { + "name": "role", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "member_user_id_unique": { + "name": "member_user_id_unique", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "member_organization_id_idx": { + "name": "member_organization_id_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "member_user_id_user_id_fk": { + "name": "member_user_id_user_id_fk", + "tableFrom": "member", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "member_organization_id_organization_id_fk": { + "name": "member_organization_id_organization_id_fk", + "tableFrom": "member", + "tableTo": "organization", + "columnsFrom": ["organization_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.memory": { + "name": "memory", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "key": { + "name": "key", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "data": { + "name": "data", + "type": "jsonb", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "deleted_at": { + "name": "deleted_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "memory_key_idx": { + "name": "memory_key_idx", + "columns": [ + { + "expression": "key", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "memory_workspace_idx": { + "name": "memory_workspace_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "memory_workspace_key_idx": { + "name": "memory_workspace_key_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "key", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "memory_workspace_id_workspace_id_fk": { + "name": "memory_workspace_id_workspace_id_fk", + "tableFrom": "memory", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.organization": { + "name": "organization", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "slug": { + "name": "slug", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "logo": { + "name": "logo", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "metadata": { + "name": "metadata", + "type": "json", + "primaryKey": false, + "notNull": false + }, + "org_usage_limit": { + "name": "org_usage_limit", + "type": "numeric", + "primaryKey": false, + "notNull": false + }, + "storage_used_bytes": { + "name": "storage_used_bytes", + "type": "bigint", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "departed_member_usage": { + "name": "departed_member_usage", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "credit_balance": { + "name": "credit_balance", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.paused_executions": { + "name": "paused_executions", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "execution_id": { + "name": "execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "execution_snapshot": { + "name": "execution_snapshot", + "type": "jsonb", + "primaryKey": false, + "notNull": true + }, + "pause_points": { + "name": "pause_points", + "type": "jsonb", + "primaryKey": false, + "notNull": true + }, + "total_pause_count": { + "name": "total_pause_count", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "resumed_count": { + "name": "resumed_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'paused'" + }, + "metadata": { + "name": "metadata", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'::jsonb" + }, + "paused_at": { + "name": "paused_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "paused_executions_workflow_id_idx": { + "name": "paused_executions_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "paused_executions_status_idx": { + "name": "paused_executions_status_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "paused_executions_execution_id_unique": { + "name": "paused_executions_execution_id_unique", + "columns": [ + { + "expression": "execution_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "paused_executions_workflow_id_workflow_id_fk": { + "name": "paused_executions_workflow_id_workflow_id_fk", + "tableFrom": "paused_executions", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.permission_group": { + "name": "permission_group", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "config": { + "name": "config", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "auto_add_new_members": { + "name": "auto_add_new_members", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + } + }, + "indexes": { + "permission_group_organization_id_idx": { + "name": "permission_group_organization_id_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permission_group_created_by_idx": { + "name": "permission_group_created_by_idx", + "columns": [ + { + "expression": "created_by", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permission_group_org_name_unique": { + "name": "permission_group_org_name_unique", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "name", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permission_group_org_auto_add_unique": { + "name": "permission_group_org_auto_add_unique", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "where": "auto_add_new_members = true", + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "permission_group_organization_id_organization_id_fk": { + "name": "permission_group_organization_id_organization_id_fk", + "tableFrom": "permission_group", + "tableTo": "organization", + "columnsFrom": ["organization_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "permission_group_created_by_user_id_fk": { + "name": "permission_group_created_by_user_id_fk", + "tableFrom": "permission_group", + "tableTo": "user", + "columnsFrom": ["created_by"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.permission_group_member": { + "name": "permission_group_member", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "permission_group_id": { + "name": "permission_group_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "assigned_by": { + "name": "assigned_by", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "assigned_at": { + "name": "assigned_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "permission_group_member_group_id_idx": { + "name": "permission_group_member_group_id_idx", + "columns": [ + { + "expression": "permission_group_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permission_group_member_user_id_unique": { + "name": "permission_group_member_user_id_unique", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "permission_group_member_permission_group_id_permission_group_id_fk": { + "name": "permission_group_member_permission_group_id_permission_group_id_fk", + "tableFrom": "permission_group_member", + "tableTo": "permission_group", + "columnsFrom": ["permission_group_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "permission_group_member_user_id_user_id_fk": { + "name": "permission_group_member_user_id_user_id_fk", + "tableFrom": "permission_group_member", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "permission_group_member_assigned_by_user_id_fk": { + "name": "permission_group_member_assigned_by_user_id_fk", + "tableFrom": "permission_group_member", + "tableTo": "user", + "columnsFrom": ["assigned_by"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.permissions": { + "name": "permissions", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "entity_type": { + "name": "entity_type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "entity_id": { + "name": "entity_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "permission_type": { + "name": "permission_type", + "type": "permission_type", + "typeSchema": "public", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "permissions_user_id_idx": { + "name": "permissions_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permissions_entity_idx": { + "name": "permissions_entity_idx", + "columns": [ + { + "expression": "entity_type", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permissions_user_entity_type_idx": { + "name": "permissions_user_entity_type_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permissions_user_entity_permission_idx": { + "name": "permissions_user_entity_permission_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_type", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "permission_type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permissions_user_entity_idx": { + "name": "permissions_user_entity_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_type", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permissions_unique_constraint": { + "name": "permissions_unique_constraint", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_type", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "permissions_user_id_user_id_fk": { + "name": "permissions_user_id_user_id_fk", + "tableFrom": "permissions", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.rate_limit_bucket": { + "name": "rate_limit_bucket", + "schema": "", + "columns": { + "key": { + "name": "key", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "tokens": { + "name": "tokens", + "type": "numeric", + "primaryKey": false, + "notNull": true + }, + "last_refill_at": { + "name": "last_refill_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.resume_queue": { + "name": "resume_queue", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "paused_execution_id": { + "name": "paused_execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "parent_execution_id": { + "name": "parent_execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "new_execution_id": { + "name": "new_execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "context_id": { + "name": "context_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "resume_input": { + "name": "resume_input", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "queued_at": { + "name": "queued_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "claimed_at": { + "name": "claimed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "completed_at": { + "name": "completed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "failure_reason": { + "name": "failure_reason", + "type": "text", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "resume_queue_parent_status_idx": { + "name": "resume_queue_parent_status_idx", + "columns": [ + { + "expression": "parent_execution_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "queued_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "resume_queue_new_execution_idx": { + "name": "resume_queue_new_execution_idx", + "columns": [ + { + "expression": "new_execution_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "resume_queue_paused_execution_id_paused_executions_id_fk": { + "name": "resume_queue_paused_execution_id_paused_executions_id_fk", + "tableFrom": "resume_queue", + "tableTo": "paused_executions", + "columnsFrom": ["paused_execution_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.session": { + "name": "session", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "token": { + "name": "token", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "ip_address": { + "name": "ip_address", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_agent": { + "name": "user_agent", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "active_organization_id": { + "name": "active_organization_id", + "type": "text", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "session_user_id_idx": { + "name": "session_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "session_token_idx": { + "name": "session_token_idx", + "columns": [ + { + "expression": "token", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "session_user_id_user_id_fk": { + "name": "session_user_id_user_id_fk", + "tableFrom": "session", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "session_active_organization_id_organization_id_fk": { + "name": "session_active_organization_id_organization_id_fk", + "tableFrom": "session", + "tableTo": "organization", + "columnsFrom": ["active_organization_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "session_token_unique": { + "name": "session_token_unique", + "nullsNotDistinct": false, + "columns": ["token"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.settings": { + "name": "settings", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "theme": { + "name": "theme", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'dark'" + }, + "auto_connect": { + "name": "auto_connect", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "telemetry_enabled": { + "name": "telemetry_enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "email_preferences": { + "name": "email_preferences", + "type": "json", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "billing_usage_notifications_enabled": { + "name": "billing_usage_notifications_enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "show_training_controls": { + "name": "show_training_controls", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "super_user_mode_enabled": { + "name": "super_user_mode_enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "error_notifications_enabled": { + "name": "error_notifications_enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "snap_to_grid_size": { + "name": "snap_to_grid_size", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "show_action_bar": { + "name": "show_action_bar", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "copilot_enabled_models": { + "name": "copilot_enabled_models", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "copilot_auto_allowed_tools": { + "name": "copilot_auto_allowed_tools", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'[]'" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": { + "settings_user_id_user_id_fk": { + "name": "settings_user_id_user_id_fk", + "tableFrom": "settings", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "settings_user_id_unique": { + "name": "settings_user_id_unique", + "nullsNotDistinct": false, + "columns": ["user_id"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.skill": { + "name": "skill", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "content": { + "name": "content", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "skill_workspace_id_idx": { + "name": "skill_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "skill_workspace_name_unique": { + "name": "skill_workspace_name_unique", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "name", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "skill_workspace_id_workspace_id_fk": { + "name": "skill_workspace_id_workspace_id_fk", + "tableFrom": "skill", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "skill_user_id_user_id_fk": { + "name": "skill_user_id_user_id_fk", + "tableFrom": "skill", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.sso_provider": { + "name": "sso_provider", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "issuer": { + "name": "issuer", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "domain": { + "name": "domain", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "oidc_config": { + "name": "oidc_config", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "saml_config": { + "name": "saml_config", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "provider_id": { + "name": "provider_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "sso_provider_provider_id_idx": { + "name": "sso_provider_provider_id_idx", + "columns": [ + { + "expression": "provider_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "sso_provider_domain_idx": { + "name": "sso_provider_domain_idx", + "columns": [ + { + "expression": "domain", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "sso_provider_user_id_idx": { + "name": "sso_provider_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "sso_provider_organization_id_idx": { + "name": "sso_provider_organization_id_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "sso_provider_user_id_user_id_fk": { + "name": "sso_provider_user_id_user_id_fk", + "tableFrom": "sso_provider", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "sso_provider_organization_id_organization_id_fk": { + "name": "sso_provider_organization_id_organization_id_fk", + "tableFrom": "sso_provider", + "tableTo": "organization", + "columnsFrom": ["organization_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.subscription": { + "name": "subscription", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "plan": { + "name": "plan", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "reference_id": { + "name": "reference_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "stripe_customer_id": { + "name": "stripe_customer_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "stripe_subscription_id": { + "name": "stripe_subscription_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "period_start": { + "name": "period_start", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "period_end": { + "name": "period_end", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "cancel_at_period_end": { + "name": "cancel_at_period_end", + "type": "boolean", + "primaryKey": false, + "notNull": false + }, + "seats": { + "name": "seats", + "type": "integer", + "primaryKey": false, + "notNull": false + }, + "trial_start": { + "name": "trial_start", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "trial_end": { + "name": "trial_end", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "metadata": { + "name": "metadata", + "type": "json", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "subscription_reference_status_idx": { + "name": "subscription_reference_status_idx", + "columns": [ + { + "expression": "reference_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": { + "check_enterprise_metadata": { + "name": "check_enterprise_metadata", + "value": "plan != 'enterprise' OR metadata IS NOT NULL" + } + }, + "isRLSEnabled": false + }, + "public.template_creators": { + "name": "template_creators", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "reference_type": { + "name": "reference_type", + "type": "template_creator_type", + "typeSchema": "public", + "primaryKey": false, + "notNull": true + }, + "reference_id": { + "name": "reference_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "profile_image_url": { + "name": "profile_image_url", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "details": { + "name": "details", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "verified": { + "name": "verified", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "template_creators_reference_idx": { + "name": "template_creators_reference_idx", + "columns": [ + { + "expression": "reference_type", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "reference_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_creators_reference_id_idx": { + "name": "template_creators_reference_id_idx", + "columns": [ + { + "expression": "reference_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_creators_created_by_idx": { + "name": "template_creators_created_by_idx", + "columns": [ + { + "expression": "created_by", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "template_creators_created_by_user_id_fk": { + "name": "template_creators_created_by_user_id_fk", + "tableFrom": "template_creators", + "tableTo": "user", + "columnsFrom": ["created_by"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.template_stars": { + "name": "template_stars", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "template_id": { + "name": "template_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "starred_at": { + "name": "starred_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "template_stars_user_id_idx": { + "name": "template_stars_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_template_id_idx": { + "name": "template_stars_template_id_idx", + "columns": [ + { + "expression": "template_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_user_template_idx": { + "name": "template_stars_user_template_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "template_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_template_user_idx": { + "name": "template_stars_template_user_idx", + "columns": [ + { + "expression": "template_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_starred_at_idx": { + "name": "template_stars_starred_at_idx", + "columns": [ + { + "expression": "starred_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_template_starred_at_idx": { + "name": "template_stars_template_starred_at_idx", + "columns": [ + { + "expression": "template_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "starred_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_user_template_unique": { + "name": "template_stars_user_template_unique", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "template_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "template_stars_user_id_user_id_fk": { + "name": "template_stars_user_id_user_id_fk", + "tableFrom": "template_stars", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "template_stars_template_id_templates_id_fk": { + "name": "template_stars_template_id_templates_id_fk", + "tableFrom": "template_stars", + "tableTo": "templates", + "columnsFrom": ["template_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.templates": { + "name": "templates", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "details": { + "name": "details", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "creator_id": { + "name": "creator_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "views": { + "name": "views", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "stars": { + "name": "stars", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "status": { + "name": "status", + "type": "template_status", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "tags": { + "name": "tags", + "type": "text[]", + "primaryKey": false, + "notNull": true, + "default": "'{}'::text[]" + }, + "required_credentials": { + "name": "required_credentials", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'[]'" + }, + "state": { + "name": "state", + "type": "jsonb", + "primaryKey": false, + "notNull": true + }, + "og_image_url": { + "name": "og_image_url", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "templates_status_idx": { + "name": "templates_status_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_creator_id_idx": { + "name": "templates_creator_id_idx", + "columns": [ + { + "expression": "creator_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_views_idx": { + "name": "templates_views_idx", + "columns": [ + { + "expression": "views", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_stars_idx": { + "name": "templates_stars_idx", + "columns": [ + { + "expression": "stars", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_status_views_idx": { + "name": "templates_status_views_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "views", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_status_stars_idx": { + "name": "templates_status_stars_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "stars", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_created_at_idx": { + "name": "templates_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_updated_at_idx": { + "name": "templates_updated_at_idx", + "columns": [ + { + "expression": "updated_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "templates_workflow_id_workflow_id_fk": { + "name": "templates_workflow_id_workflow_id_fk", + "tableFrom": "templates", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + }, + "templates_creator_id_template_creators_id_fk": { + "name": "templates_creator_id_template_creators_id_fk", + "tableFrom": "templates", + "tableTo": "template_creators", + "columnsFrom": ["creator_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.usage_log": { + "name": "usage_log", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "category": { + "name": "category", + "type": "usage_log_category", + "typeSchema": "public", + "primaryKey": false, + "notNull": true + }, + "source": { + "name": "source", + "type": "usage_log_source", + "typeSchema": "public", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "metadata": { + "name": "metadata", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "cost": { + "name": "cost", + "type": "numeric", + "primaryKey": false, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "execution_id": { + "name": "execution_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "usage_log_user_created_at_idx": { + "name": "usage_log_user_created_at_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "usage_log_source_idx": { + "name": "usage_log_source_idx", + "columns": [ + { + "expression": "source", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "usage_log_workspace_id_idx": { + "name": "usage_log_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "usage_log_workflow_id_idx": { + "name": "usage_log_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "usage_log_user_id_user_id_fk": { + "name": "usage_log_user_id_user_id_fk", + "tableFrom": "usage_log", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "usage_log_workspace_id_workspace_id_fk": { + "name": "usage_log_workspace_id_workspace_id_fk", + "tableFrom": "usage_log", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + }, + "usage_log_workflow_id_workflow_id_fk": { + "name": "usage_log_workflow_id_workflow_id_fk", + "tableFrom": "usage_log", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.user": { + "name": "user", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "email_verified": { + "name": "email_verified", + "type": "boolean", + "primaryKey": false, + "notNull": true + }, + "image": { + "name": "image", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "stripe_customer_id": { + "name": "stripe_customer_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "is_super_user": { + "name": "is_super_user", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + } + }, + "indexes": {}, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "user_email_unique": { + "name": "user_email_unique", + "nullsNotDistinct": false, + "columns": ["email"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.user_stats": { + "name": "user_stats", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "total_manual_executions": { + "name": "total_manual_executions", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_api_calls": { + "name": "total_api_calls", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_webhook_triggers": { + "name": "total_webhook_triggers", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_scheduled_executions": { + "name": "total_scheduled_executions", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_chat_executions": { + "name": "total_chat_executions", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_mcp_executions": { + "name": "total_mcp_executions", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_a2a_executions": { + "name": "total_a2a_executions", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_tokens_used": { + "name": "total_tokens_used", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_cost": { + "name": "total_cost", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "current_usage_limit": { + "name": "current_usage_limit", + "type": "numeric", + "primaryKey": false, + "notNull": false, + "default": "'20'" + }, + "usage_limit_updated_at": { + "name": "usage_limit_updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false, + "default": "now()" + }, + "current_period_cost": { + "name": "current_period_cost", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "last_period_cost": { + "name": "last_period_cost", + "type": "numeric", + "primaryKey": false, + "notNull": false, + "default": "'0'" + }, + "billed_overage_this_period": { + "name": "billed_overage_this_period", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "pro_period_cost_snapshot": { + "name": "pro_period_cost_snapshot", + "type": "numeric", + "primaryKey": false, + "notNull": false, + "default": "'0'" + }, + "credit_balance": { + "name": "credit_balance", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "total_copilot_cost": { + "name": "total_copilot_cost", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "current_period_copilot_cost": { + "name": "current_period_copilot_cost", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "last_period_copilot_cost": { + "name": "last_period_copilot_cost", + "type": "numeric", + "primaryKey": false, + "notNull": false, + "default": "'0'" + }, + "total_copilot_tokens": { + "name": "total_copilot_tokens", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_copilot_calls": { + "name": "total_copilot_calls", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_mcp_copilot_calls": { + "name": "total_mcp_copilot_calls", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_mcp_copilot_cost": { + "name": "total_mcp_copilot_cost", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "current_period_mcp_copilot_cost": { + "name": "current_period_mcp_copilot_cost", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "storage_used_bytes": { + "name": "storage_used_bytes", + "type": "bigint", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "last_active": { + "name": "last_active", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "billing_blocked": { + "name": "billing_blocked", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "billing_blocked_reason": { + "name": "billing_blocked_reason", + "type": "billing_blocked_reason", + "typeSchema": "public", + "primaryKey": false, + "notNull": false + } + }, + "indexes": {}, + "foreignKeys": { + "user_stats_user_id_user_id_fk": { + "name": "user_stats_user_id_user_id_fk", + "tableFrom": "user_stats", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "user_stats_user_id_unique": { + "name": "user_stats_user_id_unique", + "nullsNotDistinct": false, + "columns": ["user_id"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.verification": { + "name": "verification", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "identifier": { + "name": "identifier", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "value": { + "name": "value", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "verification_identifier_idx": { + "name": "verification_identifier_idx", + "columns": [ + { + "expression": "identifier", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "verification_expires_at_idx": { + "name": "verification_expires_at_idx", + "columns": [ + { + "expression": "expires_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.waitlist": { + "name": "waitlist", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "waitlist_email_unique": { + "name": "waitlist_email_unique", + "nullsNotDistinct": false, + "columns": ["email"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.webhook": { + "name": "webhook", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "deployment_version_id": { + "name": "deployment_version_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "block_id": { + "name": "block_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "path": { + "name": "path", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "provider": { + "name": "provider", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "provider_config": { + "name": "provider_config", + "type": "json", + "primaryKey": false, + "notNull": false + }, + "is_active": { + "name": "is_active", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "failed_count": { + "name": "failed_count", + "type": "integer", + "primaryKey": false, + "notNull": false, + "default": 0 + }, + "last_failed_at": { + "name": "last_failed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "credential_set_id": { + "name": "credential_set_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "path_deployment_unique": { + "name": "path_deployment_unique", + "columns": [ + { + "expression": "path", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "deployment_version_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "idx_webhook_on_workflow_id_block_id": { + "name": "idx_webhook_on_workflow_id_block_id", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "block_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "webhook_workflow_deployment_idx": { + "name": "webhook_workflow_deployment_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "deployment_version_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "webhook_credential_set_id_idx": { + "name": "webhook_credential_set_id_idx", + "columns": [ + { + "expression": "credential_set_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "webhook_workflow_id_workflow_id_fk": { + "name": "webhook_workflow_id_workflow_id_fk", + "tableFrom": "webhook", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "webhook_deployment_version_id_workflow_deployment_version_id_fk": { + "name": "webhook_deployment_version_id_workflow_deployment_version_id_fk", + "tableFrom": "webhook", + "tableTo": "workflow_deployment_version", + "columnsFrom": ["deployment_version_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "webhook_credential_set_id_credential_set_id_fk": { + "name": "webhook_credential_set_id_credential_set_id_fk", + "tableFrom": "webhook", + "tableTo": "credential_set", + "columnsFrom": ["credential_set_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow": { + "name": "workflow", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "folder_id": { + "name": "folder_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "sort_order": { + "name": "sort_order", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "color": { + "name": "color", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'#3972F6'" + }, + "last_synced": { + "name": "last_synced", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "is_deployed": { + "name": "is_deployed", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "deployed_at": { + "name": "deployed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "run_count": { + "name": "run_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "last_run_at": { + "name": "last_run_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "variables": { + "name": "variables", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + } + }, + "indexes": { + "workflow_user_id_idx": { + "name": "workflow_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_workspace_id_idx": { + "name": "workflow_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_user_workspace_idx": { + "name": "workflow_user_workspace_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_folder_sort_idx": { + "name": "workflow_folder_sort_idx", + "columns": [ + { + "expression": "folder_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "sort_order", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_user_id_user_id_fk": { + "name": "workflow_user_id_user_id_fk", + "tableFrom": "workflow", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_workspace_id_workspace_id_fk": { + "name": "workflow_workspace_id_workspace_id_fk", + "tableFrom": "workflow", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_folder_id_workflow_folder_id_fk": { + "name": "workflow_folder_id_workflow_folder_id_fk", + "tableFrom": "workflow", + "tableTo": "workflow_folder", + "columnsFrom": ["folder_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_blocks": { + "name": "workflow_blocks", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "type": { + "name": "type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "position_x": { + "name": "position_x", + "type": "numeric", + "primaryKey": false, + "notNull": true + }, + "position_y": { + "name": "position_y", + "type": "numeric", + "primaryKey": false, + "notNull": true + }, + "enabled": { + "name": "enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "horizontal_handles": { + "name": "horizontal_handles", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "is_wide": { + "name": "is_wide", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "advanced_mode": { + "name": "advanced_mode", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "trigger_mode": { + "name": "trigger_mode", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "locked": { + "name": "locked", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "height": { + "name": "height", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "sub_blocks": { + "name": "sub_blocks", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "outputs": { + "name": "outputs", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "data": { + "name": "data", + "type": "jsonb", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_blocks_workflow_id_idx": { + "name": "workflow_blocks_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_blocks_type_idx": { + "name": "workflow_blocks_type_idx", + "columns": [ + { + "expression": "type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_blocks_workflow_id_workflow_id_fk": { + "name": "workflow_blocks_workflow_id_workflow_id_fk", + "tableFrom": "workflow_blocks", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_checkpoints": { + "name": "workflow_checkpoints", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "uuid", + "primaryKey": true, + "notNull": true, + "default": "gen_random_uuid()" + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "chat_id": { + "name": "chat_id", + "type": "uuid", + "primaryKey": false, + "notNull": true + }, + "message_id": { + "name": "message_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "workflow_state": { + "name": "workflow_state", + "type": "json", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_checkpoints_user_id_idx": { + "name": "workflow_checkpoints_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_workflow_id_idx": { + "name": "workflow_checkpoints_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_chat_id_idx": { + "name": "workflow_checkpoints_chat_id_idx", + "columns": [ + { + "expression": "chat_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_message_id_idx": { + "name": "workflow_checkpoints_message_id_idx", + "columns": [ + { + "expression": "message_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_user_workflow_idx": { + "name": "workflow_checkpoints_user_workflow_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_workflow_chat_idx": { + "name": "workflow_checkpoints_workflow_chat_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "chat_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_created_at_idx": { + "name": "workflow_checkpoints_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_chat_created_at_idx": { + "name": "workflow_checkpoints_chat_created_at_idx", + "columns": [ + { + "expression": "chat_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_checkpoints_user_id_user_id_fk": { + "name": "workflow_checkpoints_user_id_user_id_fk", + "tableFrom": "workflow_checkpoints", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_checkpoints_workflow_id_workflow_id_fk": { + "name": "workflow_checkpoints_workflow_id_workflow_id_fk", + "tableFrom": "workflow_checkpoints", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_checkpoints_chat_id_copilot_chats_id_fk": { + "name": "workflow_checkpoints_chat_id_copilot_chats_id_fk", + "tableFrom": "workflow_checkpoints", + "tableTo": "copilot_chats", + "columnsFrom": ["chat_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_deployment_version": { + "name": "workflow_deployment_version", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "version": { + "name": "version", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "state": { + "name": "state", + "type": "json", + "primaryKey": false, + "notNull": true + }, + "is_active": { + "name": "is_active", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "workflow_deployment_version_workflow_version_unique": { + "name": "workflow_deployment_version_workflow_version_unique", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "version", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_deployment_version_workflow_active_idx": { + "name": "workflow_deployment_version_workflow_active_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "is_active", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_deployment_version_created_at_idx": { + "name": "workflow_deployment_version_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_deployment_version_workflow_id_workflow_id_fk": { + "name": "workflow_deployment_version_workflow_id_workflow_id_fk", + "tableFrom": "workflow_deployment_version", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_edges": { + "name": "workflow_edges", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "source_block_id": { + "name": "source_block_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "target_block_id": { + "name": "target_block_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "source_handle": { + "name": "source_handle", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "target_handle": { + "name": "target_handle", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_edges_workflow_id_idx": { + "name": "workflow_edges_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_edges_workflow_source_idx": { + "name": "workflow_edges_workflow_source_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "source_block_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_edges_workflow_target_idx": { + "name": "workflow_edges_workflow_target_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "target_block_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_edges_workflow_id_workflow_id_fk": { + "name": "workflow_edges_workflow_id_workflow_id_fk", + "tableFrom": "workflow_edges", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_edges_source_block_id_workflow_blocks_id_fk": { + "name": "workflow_edges_source_block_id_workflow_blocks_id_fk", + "tableFrom": "workflow_edges", + "tableTo": "workflow_blocks", + "columnsFrom": ["source_block_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_edges_target_block_id_workflow_blocks_id_fk": { + "name": "workflow_edges_target_block_id_workflow_blocks_id_fk", + "tableFrom": "workflow_edges", + "tableTo": "workflow_blocks", + "columnsFrom": ["target_block_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_execution_logs": { + "name": "workflow_execution_logs", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "execution_id": { + "name": "execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "state_snapshot_id": { + "name": "state_snapshot_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "deployment_version_id": { + "name": "deployment_version_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "level": { + "name": "level", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'running'" + }, + "trigger": { + "name": "trigger", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "started_at": { + "name": "started_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "ended_at": { + "name": "ended_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "total_duration_ms": { + "name": "total_duration_ms", + "type": "integer", + "primaryKey": false, + "notNull": false + }, + "execution_data": { + "name": "execution_data", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "cost": { + "name": "cost", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "files": { + "name": "files", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_execution_logs_workflow_id_idx": { + "name": "workflow_execution_logs_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_state_snapshot_id_idx": { + "name": "workflow_execution_logs_state_snapshot_id_idx", + "columns": [ + { + "expression": "state_snapshot_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_deployment_version_id_idx": { + "name": "workflow_execution_logs_deployment_version_id_idx", + "columns": [ + { + "expression": "deployment_version_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_trigger_idx": { + "name": "workflow_execution_logs_trigger_idx", + "columns": [ + { + "expression": "trigger", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_level_idx": { + "name": "workflow_execution_logs_level_idx", + "columns": [ + { + "expression": "level", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_started_at_idx": { + "name": "workflow_execution_logs_started_at_idx", + "columns": [ + { + "expression": "started_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_execution_id_unique": { + "name": "workflow_execution_logs_execution_id_unique", + "columns": [ + { + "expression": "execution_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_workflow_started_at_idx": { + "name": "workflow_execution_logs_workflow_started_at_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "started_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_workspace_started_at_idx": { + "name": "workflow_execution_logs_workspace_started_at_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "started_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_execution_logs_workflow_id_workflow_id_fk": { + "name": "workflow_execution_logs_workflow_id_workflow_id_fk", + "tableFrom": "workflow_execution_logs", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + }, + "workflow_execution_logs_workspace_id_workspace_id_fk": { + "name": "workflow_execution_logs_workspace_id_workspace_id_fk", + "tableFrom": "workflow_execution_logs", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_execution_logs_state_snapshot_id_workflow_execution_snapshots_id_fk": { + "name": "workflow_execution_logs_state_snapshot_id_workflow_execution_snapshots_id_fk", + "tableFrom": "workflow_execution_logs", + "tableTo": "workflow_execution_snapshots", + "columnsFrom": ["state_snapshot_id"], + "columnsTo": ["id"], + "onDelete": "no action", + "onUpdate": "no action" + }, + "workflow_execution_logs_deployment_version_id_workflow_deployment_version_id_fk": { + "name": "workflow_execution_logs_deployment_version_id_workflow_deployment_version_id_fk", + "tableFrom": "workflow_execution_logs", + "tableTo": "workflow_deployment_version", + "columnsFrom": ["deployment_version_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_execution_snapshots": { + "name": "workflow_execution_snapshots", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "state_hash": { + "name": "state_hash", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "state_data": { + "name": "state_data", + "type": "jsonb", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_snapshots_workflow_id_idx": { + "name": "workflow_snapshots_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_snapshots_hash_idx": { + "name": "workflow_snapshots_hash_idx", + "columns": [ + { + "expression": "state_hash", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_snapshots_workflow_hash_idx": { + "name": "workflow_snapshots_workflow_hash_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "state_hash", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_snapshots_created_at_idx": { + "name": "workflow_snapshots_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_execution_snapshots_workflow_id_workflow_id_fk": { + "name": "workflow_execution_snapshots_workflow_id_workflow_id_fk", + "tableFrom": "workflow_execution_snapshots", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_folder": { + "name": "workflow_folder", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "parent_id": { + "name": "parent_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "color": { + "name": "color", + "type": "text", + "primaryKey": false, + "notNull": false, + "default": "'#6B7280'" + }, + "is_expanded": { + "name": "is_expanded", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "sort_order": { + "name": "sort_order", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_folder_user_idx": { + "name": "workflow_folder_user_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_folder_workspace_parent_idx": { + "name": "workflow_folder_workspace_parent_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "parent_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_folder_parent_sort_idx": { + "name": "workflow_folder_parent_sort_idx", + "columns": [ + { + "expression": "parent_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "sort_order", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_folder_user_id_user_id_fk": { + "name": "workflow_folder_user_id_user_id_fk", + "tableFrom": "workflow_folder", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_folder_workspace_id_workspace_id_fk": { + "name": "workflow_folder_workspace_id_workspace_id_fk", + "tableFrom": "workflow_folder", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_mcp_server": { + "name": "workflow_mcp_server", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "is_public": { + "name": "is_public", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_mcp_server_workspace_id_idx": { + "name": "workflow_mcp_server_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_mcp_server_created_by_idx": { + "name": "workflow_mcp_server_created_by_idx", + "columns": [ + { + "expression": "created_by", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_mcp_server_workspace_id_workspace_id_fk": { + "name": "workflow_mcp_server_workspace_id_workspace_id_fk", + "tableFrom": "workflow_mcp_server", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_mcp_server_created_by_user_id_fk": { + "name": "workflow_mcp_server_created_by_user_id_fk", + "tableFrom": "workflow_mcp_server", + "tableTo": "user", + "columnsFrom": ["created_by"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_mcp_tool": { + "name": "workflow_mcp_tool", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "server_id": { + "name": "server_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "tool_name": { + "name": "tool_name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "tool_description": { + "name": "tool_description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "parameter_schema": { + "name": "parameter_schema", + "type": "json", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_mcp_tool_server_id_idx": { + "name": "workflow_mcp_tool_server_id_idx", + "columns": [ + { + "expression": "server_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_mcp_tool_workflow_id_idx": { + "name": "workflow_mcp_tool_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_mcp_tool_server_workflow_unique": { + "name": "workflow_mcp_tool_server_workflow_unique", + "columns": [ + { + "expression": "server_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_mcp_tool_server_id_workflow_mcp_server_id_fk": { + "name": "workflow_mcp_tool_server_id_workflow_mcp_server_id_fk", + "tableFrom": "workflow_mcp_tool", + "tableTo": "workflow_mcp_server", + "columnsFrom": ["server_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_mcp_tool_workflow_id_workflow_id_fk": { + "name": "workflow_mcp_tool_workflow_id_workflow_id_fk", + "tableFrom": "workflow_mcp_tool", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_schedule": { + "name": "workflow_schedule", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "deployment_version_id": { + "name": "deployment_version_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "block_id": { + "name": "block_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "cron_expression": { + "name": "cron_expression", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "next_run_at": { + "name": "next_run_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "last_ran_at": { + "name": "last_ran_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "last_queued_at": { + "name": "last_queued_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "trigger_type": { + "name": "trigger_type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "timezone": { + "name": "timezone", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'UTC'" + }, + "failed_count": { + "name": "failed_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'active'" + }, + "last_failed_at": { + "name": "last_failed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_schedule_workflow_block_deployment_unique": { + "name": "workflow_schedule_workflow_block_deployment_unique", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "block_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "deployment_version_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_schedule_workflow_deployment_idx": { + "name": "workflow_schedule_workflow_deployment_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "deployment_version_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_schedule_workflow_id_workflow_id_fk": { + "name": "workflow_schedule_workflow_id_workflow_id_fk", + "tableFrom": "workflow_schedule", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_schedule_deployment_version_id_workflow_deployment_version_id_fk": { + "name": "workflow_schedule_deployment_version_id_workflow_deployment_version_id_fk", + "tableFrom": "workflow_schedule", + "tableTo": "workflow_deployment_version", + "columnsFrom": ["deployment_version_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_subflows": { + "name": "workflow_subflows", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "type": { + "name": "type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "config": { + "name": "config", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_subflows_workflow_id_idx": { + "name": "workflow_subflows_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_subflows_workflow_type_idx": { + "name": "workflow_subflows_workflow_type_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_subflows_workflow_id_workflow_id_fk": { + "name": "workflow_subflows_workflow_id_workflow_id_fk", + "tableFrom": "workflow_subflows", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace": { + "name": "workspace", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "owner_id": { + "name": "owner_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "billed_account_user_id": { + "name": "billed_account_user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "allow_personal_api_keys": { + "name": "allow_personal_api_keys", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": { + "workspace_owner_id_user_id_fk": { + "name": "workspace_owner_id_user_id_fk", + "tableFrom": "workspace", + "tableTo": "user", + "columnsFrom": ["owner_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_billed_account_user_id_user_id_fk": { + "name": "workspace_billed_account_user_id_user_id_fk", + "tableFrom": "workspace", + "tableTo": "user", + "columnsFrom": ["billed_account_user_id"], + "columnsTo": ["id"], + "onDelete": "no action", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_byok_keys": { + "name": "workspace_byok_keys", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "provider_id": { + "name": "provider_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "encrypted_api_key": { + "name": "encrypted_api_key", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workspace_byok_provider_unique": { + "name": "workspace_byok_provider_unique", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "provider_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_byok_workspace_idx": { + "name": "workspace_byok_workspace_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workspace_byok_keys_workspace_id_workspace_id_fk": { + "name": "workspace_byok_keys_workspace_id_workspace_id_fk", + "tableFrom": "workspace_byok_keys", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_byok_keys_created_by_user_id_fk": { + "name": "workspace_byok_keys_created_by_user_id_fk", + "tableFrom": "workspace_byok_keys", + "tableTo": "user", + "columnsFrom": ["created_by"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_environment": { + "name": "workspace_environment", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "variables": { + "name": "variables", + "type": "json", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workspace_environment_workspace_unique": { + "name": "workspace_environment_workspace_unique", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workspace_environment_workspace_id_workspace_id_fk": { + "name": "workspace_environment_workspace_id_workspace_id_fk", + "tableFrom": "workspace_environment", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_file": { + "name": "workspace_file", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "key": { + "name": "key", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "size": { + "name": "size", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "type": { + "name": "type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "uploaded_by": { + "name": "uploaded_by", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "uploaded_at": { + "name": "uploaded_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workspace_file_workspace_id_idx": { + "name": "workspace_file_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_file_key_idx": { + "name": "workspace_file_key_idx", + "columns": [ + { + "expression": "key", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workspace_file_workspace_id_workspace_id_fk": { + "name": "workspace_file_workspace_id_workspace_id_fk", + "tableFrom": "workspace_file", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_file_uploaded_by_user_id_fk": { + "name": "workspace_file_uploaded_by_user_id_fk", + "tableFrom": "workspace_file", + "tableTo": "user", + "columnsFrom": ["uploaded_by"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "workspace_file_key_unique": { + "name": "workspace_file_key_unique", + "nullsNotDistinct": false, + "columns": ["key"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_files": { + "name": "workspace_files", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "key": { + "name": "key", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "context": { + "name": "context", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "original_name": { + "name": "original_name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "content_type": { + "name": "content_type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "size": { + "name": "size", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "uploaded_at": { + "name": "uploaded_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workspace_files_key_idx": { + "name": "workspace_files_key_idx", + "columns": [ + { + "expression": "key", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_files_user_id_idx": { + "name": "workspace_files_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_files_workspace_id_idx": { + "name": "workspace_files_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_files_context_idx": { + "name": "workspace_files_context_idx", + "columns": [ + { + "expression": "context", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workspace_files_user_id_user_id_fk": { + "name": "workspace_files_user_id_user_id_fk", + "tableFrom": "workspace_files", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_files_workspace_id_workspace_id_fk": { + "name": "workspace_files_workspace_id_workspace_id_fk", + "tableFrom": "workspace_files", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "workspace_files_key_unique": { + "name": "workspace_files_key_unique", + "nullsNotDistinct": false, + "columns": ["key"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_invitation": { + "name": "workspace_invitation", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "inviter_id": { + "name": "inviter_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "role": { + "name": "role", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'member'" + }, + "status": { + "name": "status", + "type": "workspace_invitation_status", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "token": { + "name": "token", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "permissions": { + "name": "permissions", + "type": "permission_type", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'admin'" + }, + "org_invitation_id": { + "name": "org_invitation_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": { + "workspace_invitation_workspace_id_workspace_id_fk": { + "name": "workspace_invitation_workspace_id_workspace_id_fk", + "tableFrom": "workspace_invitation", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_invitation_inviter_id_user_id_fk": { + "name": "workspace_invitation_inviter_id_user_id_fk", + "tableFrom": "workspace_invitation", + "tableTo": "user", + "columnsFrom": ["inviter_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "workspace_invitation_token_unique": { + "name": "workspace_invitation_token_unique", + "nullsNotDistinct": false, + "columns": ["token"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_notification_delivery": { + "name": "workspace_notification_delivery", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "subscription_id": { + "name": "subscription_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "execution_id": { + "name": "execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "status": { + "name": "status", + "type": "notification_delivery_status", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "attempts": { + "name": "attempts", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "last_attempt_at": { + "name": "last_attempt_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "next_attempt_at": { + "name": "next_attempt_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "response_status": { + "name": "response_status", + "type": "integer", + "primaryKey": false, + "notNull": false + }, + "response_body": { + "name": "response_body", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "error_message": { + "name": "error_message", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workspace_notification_delivery_subscription_id_idx": { + "name": "workspace_notification_delivery_subscription_id_idx", + "columns": [ + { + "expression": "subscription_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_notification_delivery_execution_id_idx": { + "name": "workspace_notification_delivery_execution_id_idx", + "columns": [ + { + "expression": "execution_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_notification_delivery_status_idx": { + "name": "workspace_notification_delivery_status_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_notification_delivery_next_attempt_idx": { + "name": "workspace_notification_delivery_next_attempt_idx", + "columns": [ + { + "expression": "next_attempt_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workspace_notification_delivery_subscription_id_workspace_notification_subscription_id_fk": { + "name": "workspace_notification_delivery_subscription_id_workspace_notification_subscription_id_fk", + "tableFrom": "workspace_notification_delivery", + "tableTo": "workspace_notification_subscription", + "columnsFrom": ["subscription_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_notification_delivery_workflow_id_workflow_id_fk": { + "name": "workspace_notification_delivery_workflow_id_workflow_id_fk", + "tableFrom": "workspace_notification_delivery", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_notification_subscription": { + "name": "workspace_notification_subscription", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "notification_type": { + "name": "notification_type", + "type": "notification_type", + "typeSchema": "public", + "primaryKey": false, + "notNull": true + }, + "workflow_ids": { + "name": "workflow_ids", + "type": "text[]", + "primaryKey": false, + "notNull": true, + "default": "'{}'::text[]" + }, + "all_workflows": { + "name": "all_workflows", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "level_filter": { + "name": "level_filter", + "type": "text[]", + "primaryKey": false, + "notNull": true, + "default": "ARRAY['info', 'error']::text[]" + }, + "trigger_filter": { + "name": "trigger_filter", + "type": "text[]", + "primaryKey": false, + "notNull": true, + "default": "ARRAY['api', 'webhook', 'schedule', 'manual', 'chat']::text[]" + }, + "include_final_output": { + "name": "include_final_output", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "include_trace_spans": { + "name": "include_trace_spans", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "include_rate_limits": { + "name": "include_rate_limits", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "include_usage_data": { + "name": "include_usage_data", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "webhook_config": { + "name": "webhook_config", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "email_recipients": { + "name": "email_recipients", + "type": "text[]", + "primaryKey": false, + "notNull": false + }, + "slack_config": { + "name": "slack_config", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "alert_config": { + "name": "alert_config", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "last_alert_at": { + "name": "last_alert_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "active": { + "name": "active", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workspace_notification_workspace_id_idx": { + "name": "workspace_notification_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_notification_active_idx": { + "name": "workspace_notification_active_idx", + "columns": [ + { + "expression": "active", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_notification_type_idx": { + "name": "workspace_notification_type_idx", + "columns": [ + { + "expression": "notification_type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workspace_notification_subscription_workspace_id_workspace_id_fk": { + "name": "workspace_notification_subscription_workspace_id_workspace_id_fk", + "tableFrom": "workspace_notification_subscription", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_notification_subscription_created_by_user_id_fk": { + "name": "workspace_notification_subscription_created_by_user_id_fk", + "tableFrom": "workspace_notification_subscription", + "tableTo": "user", + "columnsFrom": ["created_by"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + } + }, + "enums": { + "public.a2a_task_status": { + "name": "a2a_task_status", + "schema": "public", + "values": [ + "submitted", + "working", + "input-required", + "completed", + "failed", + "canceled", + "rejected", + "auth-required", + "unknown" + ] + }, + "public.billing_blocked_reason": { + "name": "billing_blocked_reason", + "schema": "public", + "values": ["payment_failed", "dispute"] + }, + "public.credential_set_invitation_status": { + "name": "credential_set_invitation_status", + "schema": "public", + "values": ["pending", "accepted", "expired", "cancelled"] + }, + "public.credential_set_member_status": { + "name": "credential_set_member_status", + "schema": "public", + "values": ["active", "pending", "revoked"] + }, + "public.notification_delivery_status": { + "name": "notification_delivery_status", + "schema": "public", + "values": ["pending", "in_progress", "success", "failed"] + }, + "public.notification_type": { + "name": "notification_type", + "schema": "public", + "values": ["webhook", "email", "slack"] + }, + "public.permission_type": { + "name": "permission_type", + "schema": "public", + "values": ["admin", "write", "read"] + }, + "public.template_creator_type": { + "name": "template_creator_type", + "schema": "public", + "values": ["user", "organization"] + }, + "public.template_status": { + "name": "template_status", + "schema": "public", + "values": ["pending", "approved", "rejected"] + }, + "public.usage_log_category": { + "name": "usage_log_category", + "schema": "public", + "values": ["model", "fixed"] + }, + "public.usage_log_source": { + "name": "usage_log_source", + "schema": "public", + "values": ["workflow", "wand", "copilot", "mcp_copilot"] + }, + "public.workspace_invitation_status": { + "name": "workspace_invitation_status", + "schema": "public", + "values": ["pending", "accepted", "rejected", "cancelled"] + } + }, + "schemas": {}, + "sequences": {}, + "roles": {}, + "policies": {}, + "views": {}, + "_meta": { + "columns": {}, + "schemas": {}, + "tables": {} + } +} diff --git a/packages/db/migrations/meta/_journal.json b/packages/db/migrations/meta/_journal.json index 9a4839319..2fa880f2a 100644 --- a/packages/db/migrations/meta/_journal.json +++ b/packages/db/migrations/meta/_journal.json @@ -1065,6 +1065,13 @@ "when": 1770336289511, "tag": "0152_parallel_frog_thor", "breakpoints": true + }, + { + "idx": 153, + "version": "7", + "when": 1770410282842, + "tag": "0153_complete_arclight", + "breakpoints": true } ] } diff --git a/packages/db/schema.ts b/packages/db/schema.ts index 6fee4786a..d145c5796 100644 --- a/packages/db/schema.ts +++ b/packages/db/schema.ts @@ -715,6 +715,10 @@ export const userStats = pgTable('user_stats', { lastPeriodCopilotCost: decimal('last_period_copilot_cost').default('0'), totalCopilotTokens: integer('total_copilot_tokens').notNull().default(0), totalCopilotCalls: integer('total_copilot_calls').notNull().default(0), + // MCP Copilot usage tracking + totalMcpCopilotCalls: integer('total_mcp_copilot_calls').notNull().default(0), + totalMcpCopilotCost: decimal('total_mcp_copilot_cost').notNull().default('0'), + currentPeriodMcpCopilotCost: decimal('current_period_mcp_copilot_cost').notNull().default('0'), // Storage tracking (for free/pro users) storageUsedBytes: bigint('storage_used_bytes', { mode: 'number' }).notNull().default(0), lastActive: timestamp('last_active').notNull().defaultNow(), @@ -1968,7 +1972,12 @@ export const a2aPushNotificationConfig = pgTable( ) export const usageLogCategoryEnum = pgEnum('usage_log_category', ['model', 'fixed']) -export const usageLogSourceEnum = pgEnum('usage_log_source', ['workflow', 'wand', 'copilot']) +export const usageLogSourceEnum = pgEnum('usage_log_source', [ + 'workflow', + 'wand', + 'copilot', + 'mcp_copilot', +]) export const usageLog = pgTable( 'usage_log',