mirror of
https://github.com/simstudioai/sim.git
synced 2026-02-07 05:05:15 -05:00
Compare commits
45 Commits
v0.5.82
...
feat/the-c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2a7ebfb396 | ||
|
|
ca76e38e8c | ||
|
|
dad6fd68fa | ||
|
|
d1a2d661c9 | ||
|
|
f63ed61bc8 | ||
|
|
0f5eb9d351 | ||
|
|
665cc6a3d1 | ||
|
|
a7341cdcd3 | ||
|
|
92efd817d2 | ||
|
|
3d5321d9a1 | ||
|
|
13c8621513 | ||
|
|
529d382d49 | ||
|
|
fe70beb751 | ||
|
|
529233bfb6 | ||
|
|
43a32a627f | ||
|
|
ea7a07a0d0 | ||
|
|
3f3d5b276d | ||
|
|
ef4cae48f2 | ||
|
|
460935c032 | ||
|
|
5fc5f46733 | ||
|
|
8d70132a4b | ||
|
|
c045580230 | ||
|
|
bff3f03ba6 | ||
|
|
c20a5633bf | ||
|
|
1c23805782 | ||
|
|
bb6b182d24 | ||
|
|
b7aaa53300 | ||
|
|
8d477c0bed | ||
|
|
01371c8809 | ||
|
|
490b6bde08 | ||
|
|
9073c1a0bf | ||
|
|
d0329e14e5 | ||
|
|
d1b2e6c757 | ||
|
|
decc19e73b | ||
|
|
d79fcab659 | ||
|
|
c72e244655 | ||
|
|
7bb3dd6103 | ||
|
|
75b62423bc | ||
|
|
565167d3b3 | ||
|
|
9ff5237a2e | ||
|
|
e9b80c566c | ||
|
|
664ce3168c | ||
|
|
5d82f7ae73 | ||
|
|
1e21ec1fa3 | ||
|
|
71bd535d04 |
@@ -1131,6 +1131,32 @@ export function AirtableIcon(props: SVGProps<SVGSVGElement>) {
|
||||
)
|
||||
}
|
||||
|
||||
export function AirweaveIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg
|
||||
{...props}
|
||||
width='143'
|
||||
height='143'
|
||||
viewBox='0 0 143 143'
|
||||
fill='none'
|
||||
xmlns='http://www.w3.org/2000/svg'
|
||||
>
|
||||
<path
|
||||
d='M89.8854 128.872C79.9165 123.339 66.7502 115.146 60.5707 107.642L60.0432 107.018C58.7836 105.5 57.481 104.014 56.1676 102.593C51.9152 97.9641 47.3614 93.7978 42.646 90.2021C40.7405 88.7487 38.7704 87.3492 36.8111 86.0789C35.7991 85.4222 34.8302 84.8193 33.9151 84.2703C31.6221 82.903 28.8338 82.5263 26.2716 83.2476C23.8385 83.9366 21.89 85.5406 20.7596 87.7476C18.5634 92.0323 20.0814 97.3289 24.2046 99.805C27.5204 101.786 30.7608 104.111 33.8398 106.717C34.2381 107.05 34.3996 107.578 34.2596 108.062C33.1292 112.185 31.9989 118.957 31.5682 121.67C30.6424 127.429 33.4737 133.081 38.5982 135.751L38.7812 135.848C41.0204 137 43.6472 136.946 45.8219 135.697C47.9858 134.459 49.353 132.231 49.4822 129.733C49.536 128.657 49.6006 127.58 49.676 126.59C49.719 126.062 50.042 125.632 50.5264 125.459C50.6772 125.406 50.8494 125.373 51.0001 125.373C51.3554 125.373 51.6784 125.513 51.9475 125.782C56.243 130.185 60.8829 134.169 65.7167 137.625C70.3674 140.951 75.8686 142.706 81.639 142.706C83.7383 142.706 85.8376 142.469 87.8938 141.995L88.1199 141.942C90.9943 141.274 93.029 139.024 93.4488 136.085C93.8687 133.146 92.4476 130.315 89.8747 128.883H89.8639L89.8854 128.872Z'
|
||||
fill='currentColor'
|
||||
/>
|
||||
<path
|
||||
d='M142.551 58.1747L142.529 58.0563C142.045 55.591 140.118 53.7069 137.598 53.2548C135.112 52.8134 132.754 53.8577 131.484 55.9893L131.408 56.1077C126.704 64.1604 120.061 71.6101 111.653 78.2956C109.446 80.0504 107.293 81.902 105.226 83.8075C103.644 85.2717 101.265 85.53 99.4452 84.4212C97.6474 83.3339 95.8495 82.1389 94.1055 80.8686C90.3268 78.1233 86.6772 74.9475 83.2753 71.4271C81.4989 69.597 79.798 67.6915 78.1939 65.7321C76.0408 63.1161 73.7477 60.5539 71.3685 58.1316C66.3195 52.9857 56.6089 45.9127 53.7453 43.878C53.3792 43.6304 53.1639 43.2428 53.0993 42.8014C53.0455 42.3601 53.1639 41.9509 53.4546 41.6064C55.274 39.4318 56.9965 37.1818 58.5683 34.921C60.2369 32.5311 60.786 29.6028 60.0862 26.8899C59.408 24.2523 57.6424 22.11 55.134 20.8827C50.9139 18.7942 45.8972 20.0968 43.2273 23.9293C40.8373 27.3636 38.0167 30.7332 34.8732 33.9306C34.5718 34.232 34.1304 34.3397 33.7213 34.1889C30.5239 33.1447 27.2296 32.2942 23.9461 31.659C23.7093 31.616 23.354 31.5514 22.9126 31.4975C16.4102 30.5286 10.1123 33.7798 7.21639 39.5717L7.1195 39.7548C6.18289 41.628 6.26902 43.8349 7.32405 45.6651C8.40061 47.5167 10.3277 48.701 12.4592 48.8194C13.4604 48.8732 14.4401 48.9378 15.3659 49.0024C15.7966 49.0347 16.1411 49.2823 16.3025 49.6914C16.4533 50.1112 16.3671 50.5419 16.0657 50.8541C12.147 54.8804 8.60515 59.1974 5.5262 63.6867C1.1446 70.0814 -0.481008 78.2095 1.08 85.9822L1.10154 86.1006C1.70441 89.0719 4.05131 91.2035 7.07644 91.5264C9.98315 91.8386 12.6099 90.3208 13.7619 87.6724L13.8265 87.5109C18.6925 75.8625 26.7559 65.5168 37.7907 56.7536C38.3182 56.3445 39.0072 56.28 39.567 56.5922C45.3373 59.768 50.8601 63.902 55.9738 68.8864C56.5982 69.4893 56.6089 70.5013 56.0168 71.1257C53.4761 73.8063 51.0862 76.6054 48.9115 79.469C47.2106 81.7083 47.5335 84.8949 49.6221 86.7358L53.3254 89.9977L53.2824 90.0409C53.8637 90.5576 54.445 91.0744 55.0264 91.5911L55.8123 92.194C56.9319 93.1844 58.3529 93.6365 59.8386 93.4858C61.3027 93.3351 62.67 92.56 63.5635 91.3758C65.1353 89.2873 66.8578 87.2525 68.6556 85.304C68.957 84.9702 69.3661 84.798 69.8075 84.7872C70.2705 84.7872 70.6257 84.9379 70.9164 85.2286C75.8147 90.0624 81.1114 94.3686 86.6772 97.9966C88.8626 99.4176 89.4978 102.26 88.1306 104.477C86.9248 106.448 85.7729 108.493 84.7179 110.539C83.5014 112.918 83.2968 115.738 84.1688 118.257C84.9978 120.68 86.7095 122.585 88.981 123.64C90.2514 124.232 91.5971 124.534 92.9859 124.534C96.5062 124.534 99.682 122.596 101.286 119.452C102.729 116.61 104.419 113.8 106.281 111.131C107.369 109.559 109.36 108.838 111.255 109.322C115.26 110.355 120.643 111.421 124.454 112.143C128.308 112.864 132.119 111.023 133.96 107.578L134.143 107.233C135.521 104.628 135.531 101.506 134.164 98.8901C132.786 96.2526 130.181 94.4655 127.21 94.121C126.478 94.0349 125.778 93.9488 125.11 93.8626C124.97 93.8411 124.852 93.8196 124.744 93.798L123.356 93.4751L124.357 92.4523C124.432 92.377 124.529 92.2801 124.658 92.194C128.771 88.8028 132.571 85.1963 135.962 81.4714C141.668 75.1951 144.122 66.4965 142.518 58.1747H142.529H142.551Z'
|
||||
fill='currentColor'
|
||||
/>
|
||||
<path
|
||||
d='M56.6506 14.3371C65.5861 19.6338 77.4067 27.3743 82.9833 34.1674C83.64 34.9532 84.2967 35.7391 84.9534 36.4927C86.1591 37.8815 86.2991 39.8731 85.2979 41.4233C83.4892 44.2116 81.4115 46.9569 79.1399 49.5945C77.4713 51.5107 77.4067 54.3098 78.9785 56.2476L79.0431 56.323C79.2261 56.5598 79.4306 56.8074 79.6136 57.0442C81.2931 59.1758 83.0801 61.2213 84.9211 63.1375C85.9007 64.1603 87.2249 64.7309 88.6352 64.7309L88.7644 65.5275L88.7429 64.7309C90.207 64.6986 91.6173 64.0526 92.5969 62.933C94.8362 60.4031 96.9247 57.744 98.8302 55.0633C100.133 53.2224 102.63 52.8026 104.525 54.1052C106.463 55.4402 108.465 56.7105 110.457 57.8839C112.793 59.2511 115.614 59.5095 118.165 58.5621C120.749 57.604 122.762 55.5694 123.656 52.9533C125.055 48.9055 123.257 44.2547 119.382 41.9078C116.755 40.3145 114.15 38.5166 111.674 36.5788C110.382 35.5561 109.833 33.8767 110.296 32.2941C111.437 28.3001 112.481 23.1218 113.148 19.4831C113.837 15.7259 112.147 11.8826 108.939 9.94477L108.562 9.72944C105.871 8.12537 102.587 8.00696 99.7668 9.40649C96.9247 10.8168 95.03 13.5405 94.6855 16.6733L94.6639 16.867C94.6209 17.2546 94.384 17.5453 94.018 17.6637C93.652 17.7821 93.2859 17.6852 93.0168 17.4269C89.0012 13.1422 84.738 9.25576 80.3134 5.8646C74.3708 1.31075 66.7811 -0.583999 59.4928 0.675575L59.1805 0.729423C56.1124 1.2677 53.7547 3.60383 53.1949 6.68279C52.6351 9.72946 53.9915 12.7223 56.6722 14.3048H56.6614L56.6506 14.3371Z'
|
||||
fill='currentColor'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function GoogleDocsIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg
|
||||
|
||||
@@ -7,6 +7,7 @@ import {
|
||||
A2AIcon,
|
||||
AhrefsIcon,
|
||||
AirtableIcon,
|
||||
AirweaveIcon,
|
||||
ApifyIcon,
|
||||
ApolloIcon,
|
||||
ArxivIcon,
|
||||
@@ -141,6 +142,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
|
||||
a2a: A2AIcon,
|
||||
ahrefs: AhrefsIcon,
|
||||
airtable: AirtableIcon,
|
||||
airweave: AirweaveIcon,
|
||||
apify: ApifyIcon,
|
||||
apollo: ApolloIcon,
|
||||
arxiv: ArxivIcon,
|
||||
|
||||
@@ -56,7 +56,7 @@ Switch between modes using the mode selector at the bottom of the input area.
|
||||
Select your preferred AI model using the model selector at the bottom right of the input area.
|
||||
|
||||
**Available Models:**
|
||||
- Claude 4.5 Opus, Sonnet (default), Haiku
|
||||
- Claude 4.6 Opus (default), 4.5 Opus, Sonnet, Haiku
|
||||
- GPT 5.2 Codex, Pro
|
||||
- Gemini 3 Pro
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
"connections",
|
||||
"mcp",
|
||||
"copilot",
|
||||
"skills",
|
||||
"knowledgebase",
|
||||
"variables",
|
||||
"execution",
|
||||
|
||||
83
apps/docs/content/docs/en/skills/index.mdx
Normal file
83
apps/docs/content/docs/en/skills/index.mdx
Normal file
@@ -0,0 +1,83 @@
|
||||
---
|
||||
title: Agent Skills
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
|
||||
Agent Skills are reusable packages of instructions that give your AI agents specialized capabilities. Based on the open [Agent Skills](https://agentskills.io) format, skills let you capture domain expertise, workflows, and best practices that agents can load on demand.
|
||||
|
||||
## How Skills Work
|
||||
|
||||
Skills use **progressive disclosure** to keep agent context lean:
|
||||
|
||||
1. **Discovery** — Only skill names and descriptions are included in the agent's system prompt (~50-100 tokens each)
|
||||
2. **Activation** — When the agent decides a skill is relevant, it calls the `load_skill` tool to load the full instructions into context
|
||||
3. **Execution** — The agent follows the loaded instructions to complete the task
|
||||
|
||||
This means you can attach many skills to an agent without bloating its context window. The agent only loads what it needs.
|
||||
|
||||
## Creating Skills
|
||||
|
||||
Go to **Settings** (gear icon) and select **Skills** under the Tools section.
|
||||
|
||||
Click **Add** to create a new skill with three fields:
|
||||
|
||||
| Field | Description |
|
||||
|-------|-------------|
|
||||
| **Name** | A kebab-case identifier (e.g. `sql-expert`, `code-reviewer`). Max 64 characters. |
|
||||
| **Description** | A short explanation of what the skill does and when to use it. This is what the agent reads to decide whether to activate the skill. Max 1024 characters. |
|
||||
| **Content** | The full skill instructions in markdown. This is loaded when the agent activates the skill. |
|
||||
|
||||
<Callout type="info">
|
||||
The description is critical — it's the only thing the agent sees before deciding to load a skill. Be specific about when and why the skill should be used.
|
||||
</Callout>
|
||||
|
||||
### Writing Good Skill Content
|
||||
|
||||
Skill content follows the same conventions as [SKILL.md files](https://agentskills.io/specification):
|
||||
|
||||
```markdown
|
||||
# SQL Expert
|
||||
|
||||
## When to use this skill
|
||||
Use when the user asks you to write, optimize, or debug SQL queries.
|
||||
|
||||
## Instructions
|
||||
1. Always ask which database engine (PostgreSQL, MySQL, SQLite)
|
||||
2. Use CTEs over subqueries for readability
|
||||
3. Add index recommendations when relevant
|
||||
4. Explain query plans for optimization requests
|
||||
|
||||
## Common Patterns
|
||||
...
|
||||
```
|
||||
|
||||
## Adding Skills to an Agent
|
||||
|
||||
Open any **Agent** block and find the **Skills** dropdown below the tools section. Select the skills you want the agent to have access to.
|
||||
|
||||
Selected skills appear as chips that you can click to edit or remove.
|
||||
|
||||
### What Happens at Runtime
|
||||
|
||||
When the workflow runs:
|
||||
|
||||
1. The agent's system prompt includes an `<available_skills>` section listing each skill's name and description
|
||||
2. A `load_skill` tool is automatically added to the agent's available tools
|
||||
3. When the agent determines a skill is relevant to the current task, it calls `load_skill` with the skill name
|
||||
4. The full skill content is returned as a tool response, giving the agent detailed instructions
|
||||
|
||||
This works across all supported LLM providers — the `load_skill` tool uses standard tool-calling, so no provider-specific configuration is needed.
|
||||
|
||||
## Tips
|
||||
|
||||
- **Keep descriptions actionable** — Instead of "Helps with SQL", write "Write optimized SQL queries for PostgreSQL, MySQL, and SQLite, including index recommendations and query plan analysis"
|
||||
- **One skill per domain** — A focused `sql-expert` skill works better than a broad `database-everything` skill
|
||||
- **Use markdown structure** — Headers, lists, and code blocks help the agent parse and follow instructions
|
||||
- **Test iteratively** — Run your workflow and check if the agent activates the skill when expected
|
||||
|
||||
## Learn More
|
||||
|
||||
- [Agent Skills specification](https://agentskills.io) — The open format for portable agent skills
|
||||
- [Example skills](https://github.com/anthropics/skills) — Browse community skill examples
|
||||
- [Best practices](https://agentskills.io/what-are-skills) — Writing effective skills
|
||||
52
apps/docs/content/docs/en/tools/airweave.mdx
Normal file
52
apps/docs/content/docs/en/tools/airweave.mdx
Normal file
@@ -0,0 +1,52 @@
|
||||
---
|
||||
title: Airweave
|
||||
description: Search your synced data collections
|
||||
---
|
||||
|
||||
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="airweave"
|
||||
color="#6366F1"
|
||||
/>
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Search across your synced data sources using Airweave. Supports semantic search with hybrid, neural, or keyword retrieval strategies. Optionally generate AI-powered answers from search results.
|
||||
|
||||
|
||||
|
||||
## Tools
|
||||
|
||||
### `airweave_search`
|
||||
|
||||
Search your synced data collections using Airweave. Supports semantic search with hybrid, neural, or keyword retrieval strategies. Optionally generate AI-powered answers from search results.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Airweave API Key for authentication |
|
||||
| `collectionId` | string | Yes | The readable ID of the collection to search |
|
||||
| `query` | string | Yes | The search query text |
|
||||
| `limit` | number | No | Maximum number of results to return \(default: 100\) |
|
||||
| `retrievalStrategy` | string | No | Retrieval strategy: hybrid \(default\), neural, or keyword |
|
||||
| `expandQuery` | boolean | No | Generate query variations to improve recall |
|
||||
| `rerank` | boolean | No | Reorder results for improved relevance using LLM |
|
||||
| `generateAnswer` | boolean | No | Generate a natural-language answer to the query |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | array | Search results with content, scores, and metadata from your synced data |
|
||||
| ↳ `entity_id` | string | Unique identifier for the search result entity |
|
||||
| ↳ `source_name` | string | Name of the data source \(e.g., "GitHub", "Slack"\) |
|
||||
| ↳ `md_content` | string | Markdown-formatted content of the result |
|
||||
| ↳ `score` | number | Relevance score from the search |
|
||||
| ↳ `metadata` | object | Additional metadata associated with the result |
|
||||
| ↳ `breadcrumbs` | array | Navigation path to the result within its source |
|
||||
| ↳ `url` | string | URL to the original content |
|
||||
| `completion` | string | AI-generated answer to the query \(when generateAnswer is enabled\) |
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
"a2a",
|
||||
"ahrefs",
|
||||
"airtable",
|
||||
"airweave",
|
||||
"apify",
|
||||
"apollo",
|
||||
"arxiv",
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery'
|
||||
|
||||
export async function GET(request: NextRequest): Promise<NextResponse> {
|
||||
return createMcpAuthorizationServerMetadataResponse(request)
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery'
|
||||
|
||||
export async function GET(request: NextRequest): Promise<NextResponse> {
|
||||
return createMcpAuthorizationServerMetadataResponse(request)
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery'
|
||||
|
||||
export async function GET(request: NextRequest): Promise<NextResponse> {
|
||||
return createMcpAuthorizationServerMetadataResponse(request)
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { createMcpProtectedResourceMetadataResponse } from '@/lib/mcp/oauth-discovery'
|
||||
|
||||
export async function GET(request: NextRequest): Promise<NextResponse> {
|
||||
return createMcpProtectedResourceMetadataResponse(request)
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { createMcpProtectedResourceMetadataResponse } from '@/lib/mcp/oauth-discovery'
|
||||
|
||||
export async function GET(request: NextRequest): Promise<NextResponse> {
|
||||
return createMcpProtectedResourceMetadataResponse(request)
|
||||
}
|
||||
@@ -18,6 +18,7 @@ const UpdateCostSchema = z.object({
|
||||
model: z.string().min(1, 'Model is required'),
|
||||
inputTokens: z.number().min(0).default(0),
|
||||
outputTokens: z.number().min(0).default(0),
|
||||
source: z.enum(['copilot', 'mcp_copilot']).default('copilot'),
|
||||
})
|
||||
|
||||
/**
|
||||
@@ -75,12 +76,14 @@ export async function POST(req: NextRequest) {
|
||||
)
|
||||
}
|
||||
|
||||
const { userId, cost, model, inputTokens, outputTokens } = validation.data
|
||||
const { userId, cost, model, inputTokens, outputTokens, source } = validation.data
|
||||
const isMcp = source === 'mcp_copilot'
|
||||
|
||||
logger.info(`[${requestId}] Processing cost update`, {
|
||||
userId,
|
||||
cost,
|
||||
model,
|
||||
source,
|
||||
})
|
||||
|
||||
// Check if user stats record exists (same as ExecutionLogger)
|
||||
@@ -96,7 +99,7 @@ export async function POST(req: NextRequest) {
|
||||
return NextResponse.json({ error: 'User stats record not found' }, { status: 500 })
|
||||
}
|
||||
|
||||
const updateFields = {
|
||||
const updateFields: Record<string, unknown> = {
|
||||
totalCost: sql`total_cost + ${cost}`,
|
||||
currentPeriodCost: sql`current_period_cost + ${cost}`,
|
||||
totalCopilotCost: sql`total_copilot_cost + ${cost}`,
|
||||
@@ -105,17 +108,24 @@ export async function POST(req: NextRequest) {
|
||||
lastActive: new Date(),
|
||||
}
|
||||
|
||||
// Also increment MCP-specific counters when source is mcp_copilot
|
||||
if (isMcp) {
|
||||
updateFields.totalMcpCopilotCost = sql`total_mcp_copilot_cost + ${cost}`
|
||||
updateFields.currentPeriodMcpCopilotCost = sql`current_period_mcp_copilot_cost + ${cost}`
|
||||
}
|
||||
|
||||
await db.update(userStats).set(updateFields).where(eq(userStats.userId, userId))
|
||||
|
||||
logger.info(`[${requestId}] Updated user stats record`, {
|
||||
userId,
|
||||
addedCost: cost,
|
||||
source,
|
||||
})
|
||||
|
||||
// Log usage for complete audit trail
|
||||
await logModelUsage({
|
||||
userId,
|
||||
source: 'copilot',
|
||||
source: isMcp ? 'mcp_copilot' : 'copilot',
|
||||
model,
|
||||
inputTokens,
|
||||
outputTokens,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants'
|
||||
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
|
||||
const GenerateApiKeySchema = z.object({
|
||||
@@ -17,9 +17,6 @@ export async function POST(req: NextRequest) {
|
||||
|
||||
const userId = session.user.id
|
||||
|
||||
// Move environment variable access inside the function
|
||||
const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT
|
||||
|
||||
const body = await req.json().catch(() => ({}))
|
||||
const validationResult = GenerateApiKeySchema.safeParse(body)
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants'
|
||||
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
@@ -12,8 +12,6 @@ export async function GET(request: NextRequest) {
|
||||
|
||||
const userId = session.user.id
|
||||
|
||||
const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT
|
||||
|
||||
const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key/get-api-keys`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
@@ -68,8 +66,6 @@ export async function DELETE(request: NextRequest) {
|
||||
return NextResponse.json({ error: 'id is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT
|
||||
|
||||
const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key/delete`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
130
apps/sim/app/api/copilot/chat/stream/route.ts
Normal file
130
apps/sim/app/api/copilot/chat/stream/route.ts
Normal file
@@ -0,0 +1,130 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import {
|
||||
getStreamMeta,
|
||||
readStreamEvents,
|
||||
type StreamMeta,
|
||||
} from '@/lib/copilot/orchestrator/stream-buffer'
|
||||
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
|
||||
import { SSE_HEADERS } from '@/lib/core/utils/sse'
|
||||
|
||||
const logger = createLogger('CopilotChatStreamAPI')
|
||||
const POLL_INTERVAL_MS = 250
|
||||
const MAX_STREAM_MS = 10 * 60 * 1000
|
||||
|
||||
function encodeEvent(event: Record<string, any>): Uint8Array {
|
||||
return new TextEncoder().encode(`data: ${JSON.stringify(event)}\n\n`)
|
||||
}
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
const { userId: authenticatedUserId, isAuthenticated } =
|
||||
await authenticateCopilotRequestSessionOnly()
|
||||
|
||||
if (!isAuthenticated || !authenticatedUserId) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const url = new URL(request.url)
|
||||
const streamId = url.searchParams.get('streamId') || ''
|
||||
const fromParam = url.searchParams.get('from') || '0'
|
||||
const fromEventId = Number(fromParam || 0)
|
||||
// If batch=true, return buffered events as JSON instead of SSE
|
||||
const batchMode = url.searchParams.get('batch') === 'true'
|
||||
const toParam = url.searchParams.get('to')
|
||||
const toEventId = toParam ? Number(toParam) : undefined
|
||||
|
||||
if (!streamId) {
|
||||
return NextResponse.json({ error: 'streamId is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
const meta = (await getStreamMeta(streamId)) as StreamMeta | null
|
||||
logger.info('[Resume] Stream lookup', {
|
||||
streamId,
|
||||
fromEventId,
|
||||
toEventId,
|
||||
batchMode,
|
||||
hasMeta: !!meta,
|
||||
metaStatus: meta?.status,
|
||||
})
|
||||
if (!meta) {
|
||||
return NextResponse.json({ error: 'Stream not found' }, { status: 404 })
|
||||
}
|
||||
if (meta.userId && meta.userId !== authenticatedUserId) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 403 })
|
||||
}
|
||||
|
||||
// Batch mode: return all buffered events as JSON
|
||||
if (batchMode) {
|
||||
const events = await readStreamEvents(streamId, fromEventId)
|
||||
const filteredEvents = toEventId ? events.filter((e) => e.eventId <= toEventId) : events
|
||||
logger.info('[Resume] Batch response', {
|
||||
streamId,
|
||||
fromEventId,
|
||||
toEventId,
|
||||
eventCount: filteredEvents.length,
|
||||
})
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
events: filteredEvents,
|
||||
status: meta.status,
|
||||
})
|
||||
}
|
||||
|
||||
const startTime = Date.now()
|
||||
|
||||
const stream = new ReadableStream({
|
||||
async start(controller) {
|
||||
let lastEventId = Number.isFinite(fromEventId) ? fromEventId : 0
|
||||
|
||||
const flushEvents = async () => {
|
||||
const events = await readStreamEvents(streamId, lastEventId)
|
||||
if (events.length > 0) {
|
||||
logger.info('[Resume] Flushing events', {
|
||||
streamId,
|
||||
fromEventId: lastEventId,
|
||||
eventCount: events.length,
|
||||
})
|
||||
}
|
||||
for (const entry of events) {
|
||||
lastEventId = entry.eventId
|
||||
const payload = {
|
||||
...entry.event,
|
||||
eventId: entry.eventId,
|
||||
streamId: entry.streamId,
|
||||
}
|
||||
controller.enqueue(encodeEvent(payload))
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
await flushEvents()
|
||||
|
||||
while (Date.now() - startTime < MAX_STREAM_MS) {
|
||||
const currentMeta = await getStreamMeta(streamId)
|
||||
if (!currentMeta) break
|
||||
|
||||
await flushEvents()
|
||||
|
||||
if (currentMeta.status === 'complete' || currentMeta.status === 'error') {
|
||||
break
|
||||
}
|
||||
|
||||
if (request.signal.aborted) {
|
||||
break
|
||||
}
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS))
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Stream replay failed', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
} finally {
|
||||
controller.close()
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
return new Response(stream, { headers: SSE_HEADERS })
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { REDIS_TOOL_CALL_PREFIX, REDIS_TOOL_CALL_TTL_SECONDS } from '@/lib/copilot/constants'
|
||||
import {
|
||||
authenticateCopilotRequestSessionOnly,
|
||||
createBadRequestResponse,
|
||||
@@ -23,7 +24,8 @@ const ConfirmationSchema = z.object({
|
||||
})
|
||||
|
||||
/**
|
||||
* Update tool call status in Redis
|
||||
* Write the user's tool decision to Redis. The server-side orchestrator's
|
||||
* waitForToolDecision() polls Redis for this value.
|
||||
*/
|
||||
async function updateToolCallStatus(
|
||||
toolCallId: string,
|
||||
@@ -32,57 +34,24 @@ async function updateToolCallStatus(
|
||||
): Promise<boolean> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) {
|
||||
logger.warn('updateToolCallStatus: Redis client not available')
|
||||
logger.warn('Redis client not available for tool confirmation')
|
||||
return false
|
||||
}
|
||||
|
||||
try {
|
||||
const key = `tool_call:${toolCallId}`
|
||||
const timeout = 600000 // 10 minutes timeout for user confirmation
|
||||
const pollInterval = 100 // Poll every 100ms
|
||||
const startTime = Date.now()
|
||||
|
||||
logger.info('Polling for tool call in Redis', { toolCallId, key, timeout })
|
||||
|
||||
// Poll until the key exists or timeout
|
||||
while (Date.now() - startTime < timeout) {
|
||||
const exists = await redis.exists(key)
|
||||
if (exists) {
|
||||
break
|
||||
}
|
||||
|
||||
// Wait before next poll
|
||||
await new Promise((resolve) => setTimeout(resolve, pollInterval))
|
||||
}
|
||||
|
||||
// Final check if key exists after polling
|
||||
const exists = await redis.exists(key)
|
||||
if (!exists) {
|
||||
logger.warn('Tool call not found in Redis after polling timeout', {
|
||||
toolCallId,
|
||||
key,
|
||||
timeout,
|
||||
pollDuration: Date.now() - startTime,
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
// Store both status and message as JSON
|
||||
const toolCallData = {
|
||||
const key = `${REDIS_TOOL_CALL_PREFIX}${toolCallId}`
|
||||
const payload = {
|
||||
status,
|
||||
message: message || null,
|
||||
timestamp: new Date().toISOString(),
|
||||
}
|
||||
|
||||
await redis.set(key, JSON.stringify(toolCallData), 'EX', 86400) // Keep 24 hour expiry
|
||||
|
||||
await redis.set(key, JSON.stringify(payload), 'EX', REDIS_TOOL_CALL_TTL_SECONDS)
|
||||
return true
|
||||
} catch (error) {
|
||||
logger.error('Failed to update tool call status in Redis', {
|
||||
logger.error('Failed to update tool call status', {
|
||||
toolCallId,
|
||||
status,
|
||||
message,
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
28
apps/sim/app/api/copilot/credentials/route.ts
Normal file
28
apps/sim/app/api/copilot/credentials/route.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
|
||||
import { routeExecution } from '@/lib/copilot/tools/server/router'
|
||||
|
||||
/**
|
||||
* GET /api/copilot/credentials
|
||||
* Returns connected OAuth credentials for the authenticated user.
|
||||
* Used by the copilot store for credential masking.
|
||||
*/
|
||||
export async function GET(_req: NextRequest) {
|
||||
const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly()
|
||||
if (!isAuthenticated || !userId) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await routeExecution('get_credentials', {}, { userId })
|
||||
return NextResponse.json({ success: true, result })
|
||||
} catch (error) {
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Failed to load credentials',
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import {
|
||||
authenticateCopilotRequestSessionOnly,
|
||||
createBadRequestResponse,
|
||||
createInternalServerErrorResponse,
|
||||
createRequestTracker,
|
||||
createUnauthorizedResponse,
|
||||
} from '@/lib/copilot/request-helpers'
|
||||
import { routeExecution } from '@/lib/copilot/tools/server/router'
|
||||
|
||||
const logger = createLogger('ExecuteCopilotServerToolAPI')
|
||||
|
||||
const ExecuteSchema = z.object({
|
||||
toolName: z.string(),
|
||||
payload: z.unknown().optional(),
|
||||
})
|
||||
|
||||
export async function POST(req: NextRequest) {
|
||||
const tracker = createRequestTracker()
|
||||
try {
|
||||
const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly()
|
||||
if (!isAuthenticated || !userId) {
|
||||
return createUnauthorizedResponse()
|
||||
}
|
||||
|
||||
const body = await req.json()
|
||||
try {
|
||||
const preview = JSON.stringify(body).slice(0, 300)
|
||||
logger.debug(`[${tracker.requestId}] Incoming request body preview`, { preview })
|
||||
} catch {}
|
||||
|
||||
const { toolName, payload } = ExecuteSchema.parse(body)
|
||||
|
||||
logger.info(`[${tracker.requestId}] Executing server tool`, { toolName })
|
||||
const result = await routeExecution(toolName, payload, { userId })
|
||||
|
||||
try {
|
||||
const resultPreview = JSON.stringify(result).slice(0, 300)
|
||||
logger.debug(`[${tracker.requestId}] Server tool result preview`, { toolName, resultPreview })
|
||||
} catch {}
|
||||
|
||||
return NextResponse.json({ success: true, result })
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
logger.debug(`[${tracker.requestId}] Zod validation error`, { issues: error.issues })
|
||||
return createBadRequestResponse('Invalid request body for execute-copilot-server-tool')
|
||||
}
|
||||
logger.error(`[${tracker.requestId}] Failed to execute server tool:`, error)
|
||||
const errorMessage = error instanceof Error ? error.message : 'Failed to execute server tool'
|
||||
return createInternalServerErrorResponse(errorMessage)
|
||||
}
|
||||
}
|
||||
@@ -1,247 +0,0 @@
|
||||
import { db } from '@sim/db'
|
||||
import { account, workflow } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { and, eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import {
|
||||
createBadRequestResponse,
|
||||
createInternalServerErrorResponse,
|
||||
createRequestTracker,
|
||||
createUnauthorizedResponse,
|
||||
} from '@/lib/copilot/request-helpers'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
|
||||
import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||
import { resolveEnvVarReferences } from '@/executor/utils/reference-validation'
|
||||
import { executeTool } from '@/tools'
|
||||
import { getTool, resolveToolId } from '@/tools/utils'
|
||||
|
||||
const logger = createLogger('CopilotExecuteToolAPI')
|
||||
|
||||
const ExecuteToolSchema = z.object({
|
||||
toolCallId: z.string(),
|
||||
toolName: z.string(),
|
||||
arguments: z.record(z.any()).optional().default({}),
|
||||
workflowId: z.string().optional(),
|
||||
})
|
||||
|
||||
export async function POST(req: NextRequest) {
|
||||
const tracker = createRequestTracker()
|
||||
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
return createUnauthorizedResponse()
|
||||
}
|
||||
|
||||
const userId = session.user.id
|
||||
const body = await req.json()
|
||||
|
||||
try {
|
||||
const preview = JSON.stringify(body).slice(0, 300)
|
||||
logger.debug(`[${tracker.requestId}] Incoming execute-tool request`, { preview })
|
||||
} catch {}
|
||||
|
||||
const { toolCallId, toolName, arguments: toolArgs, workflowId } = ExecuteToolSchema.parse(body)
|
||||
|
||||
const resolvedToolName = resolveToolId(toolName)
|
||||
|
||||
logger.info(`[${tracker.requestId}] Executing tool`, {
|
||||
toolCallId,
|
||||
toolName,
|
||||
resolvedToolName,
|
||||
workflowId,
|
||||
hasArgs: Object.keys(toolArgs).length > 0,
|
||||
})
|
||||
|
||||
const toolConfig = getTool(resolvedToolName)
|
||||
if (!toolConfig) {
|
||||
// Find similar tool names to help debug
|
||||
const { tools: allTools } = await import('@/tools/registry')
|
||||
const allToolNames = Object.keys(allTools)
|
||||
const prefix = toolName.split('_').slice(0, 2).join('_')
|
||||
const similarTools = allToolNames
|
||||
.filter((name) => name.startsWith(`${prefix.split('_')[0]}_`))
|
||||
.slice(0, 10)
|
||||
|
||||
logger.warn(`[${tracker.requestId}] Tool not found in registry`, {
|
||||
toolName,
|
||||
prefix,
|
||||
similarTools,
|
||||
totalToolsInRegistry: allToolNames.length,
|
||||
})
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: `Tool not found: ${toolName}. Similar tools: ${similarTools.join(', ')}`,
|
||||
toolCallId,
|
||||
},
|
||||
{ status: 404 }
|
||||
)
|
||||
}
|
||||
|
||||
// Get the workspaceId from the workflow (env vars are stored at workspace level)
|
||||
let workspaceId: string | undefined
|
||||
if (workflowId) {
|
||||
const workflowResult = await db
|
||||
.select({ workspaceId: workflow.workspaceId })
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, workflowId))
|
||||
.limit(1)
|
||||
workspaceId = workflowResult[0]?.workspaceId ?? undefined
|
||||
}
|
||||
|
||||
// Get decrypted environment variables early so we can resolve all {{VAR}} references
|
||||
const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId)
|
||||
|
||||
logger.info(`[${tracker.requestId}] Fetched environment variables`, {
|
||||
workflowId,
|
||||
workspaceId,
|
||||
envVarCount: Object.keys(decryptedEnvVars).length,
|
||||
envVarKeys: Object.keys(decryptedEnvVars),
|
||||
})
|
||||
|
||||
// Build execution params starting with LLM-provided arguments
|
||||
// Resolve all {{ENV_VAR}} references in the arguments (deep for nested objects)
|
||||
const executionParams: Record<string, any> = resolveEnvVarReferences(
|
||||
toolArgs,
|
||||
decryptedEnvVars,
|
||||
{ deep: true }
|
||||
) as Record<string, any>
|
||||
|
||||
logger.info(`[${tracker.requestId}] Resolved env var references in arguments`, {
|
||||
toolName,
|
||||
originalArgKeys: Object.keys(toolArgs),
|
||||
resolvedArgKeys: Object.keys(executionParams),
|
||||
})
|
||||
|
||||
// Resolve OAuth access token if required
|
||||
if (toolConfig.oauth?.required && toolConfig.oauth.provider) {
|
||||
const provider = toolConfig.oauth.provider
|
||||
logger.info(`[${tracker.requestId}] Resolving OAuth token`, { provider })
|
||||
|
||||
try {
|
||||
// Find the account for this provider and user
|
||||
const accounts = await db
|
||||
.select()
|
||||
.from(account)
|
||||
.where(and(eq(account.providerId, provider), eq(account.userId, userId)))
|
||||
.limit(1)
|
||||
|
||||
if (accounts.length > 0) {
|
||||
const acc = accounts[0]
|
||||
const requestId = generateRequestId()
|
||||
const { accessToken } = await refreshTokenIfNeeded(requestId, acc as any, acc.id)
|
||||
|
||||
if (accessToken) {
|
||||
executionParams.accessToken = accessToken
|
||||
logger.info(`[${tracker.requestId}] OAuth token resolved`, { provider })
|
||||
} else {
|
||||
logger.warn(`[${tracker.requestId}] No access token available`, { provider })
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: `OAuth token not available for ${provider}. Please reconnect your account.`,
|
||||
toolCallId,
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
} else {
|
||||
logger.warn(`[${tracker.requestId}] No account found for provider`, { provider })
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: `No ${provider} account connected. Please connect your account first.`,
|
||||
toolCallId,
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`[${tracker.requestId}] Failed to resolve OAuth token`, {
|
||||
provider,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: `Failed to get OAuth token for ${provider}`,
|
||||
toolCallId,
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if tool requires an API key that wasn't resolved via {{ENV_VAR}} reference
|
||||
const needsApiKey = toolConfig.params?.apiKey?.required
|
||||
|
||||
if (needsApiKey && !executionParams.apiKey) {
|
||||
logger.warn(`[${tracker.requestId}] No API key found for tool`, { toolName })
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: `API key not provided for ${toolName}. Use {{YOUR_API_KEY_ENV_VAR}} to reference your environment variable.`,
|
||||
toolCallId,
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
// Add execution context
|
||||
executionParams._context = {
|
||||
workflowId,
|
||||
userId,
|
||||
}
|
||||
|
||||
// Special handling for function_execute - inject environment variables
|
||||
if (toolName === 'function_execute') {
|
||||
executionParams.envVars = decryptedEnvVars
|
||||
executionParams.workflowVariables = {} // No workflow variables in copilot context
|
||||
executionParams.blockData = {} // No block data in copilot context
|
||||
executionParams.blockNameMapping = {} // No block mapping in copilot context
|
||||
executionParams.language = executionParams.language || 'javascript'
|
||||
executionParams.timeout = executionParams.timeout || 30000
|
||||
|
||||
logger.info(`[${tracker.requestId}] Injected env vars for function_execute`, {
|
||||
envVarCount: Object.keys(decryptedEnvVars).length,
|
||||
})
|
||||
}
|
||||
|
||||
// Execute the tool
|
||||
logger.info(`[${tracker.requestId}] Executing tool with resolved credentials`, {
|
||||
toolName,
|
||||
hasAccessToken: !!executionParams.accessToken,
|
||||
hasApiKey: !!executionParams.apiKey,
|
||||
})
|
||||
|
||||
const result = await executeTool(resolvedToolName, executionParams)
|
||||
|
||||
logger.info(`[${tracker.requestId}] Tool execution complete`, {
|
||||
toolName,
|
||||
success: result.success,
|
||||
hasOutput: !!result.output,
|
||||
})
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
toolCallId,
|
||||
result: {
|
||||
success: result.success,
|
||||
output: result.output,
|
||||
error: result.error,
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
logger.debug(`[${tracker.requestId}] Zod validation error`, { issues: error.issues })
|
||||
return createBadRequestResponse('Invalid request body for execute-tool')
|
||||
}
|
||||
logger.error(`[${tracker.requestId}] Failed to execute tool:`, error)
|
||||
const errorMessage = error instanceof Error ? error.message : 'Failed to execute tool'
|
||||
return createInternalServerErrorResponse(errorMessage)
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants'
|
||||
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
|
||||
import {
|
||||
authenticateCopilotRequestSessionOnly,
|
||||
createBadRequestResponse,
|
||||
@@ -10,8 +10,6 @@ import {
|
||||
} from '@/lib/copilot/request-helpers'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
|
||||
const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT
|
||||
|
||||
const BodySchema = z.object({
|
||||
messageId: z.string(),
|
||||
diffCreated: z.boolean(),
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants'
|
||||
import {
|
||||
authenticateCopilotRequestSessionOnly,
|
||||
createBadRequestResponse,
|
||||
createInternalServerErrorResponse,
|
||||
createRequestTracker,
|
||||
createUnauthorizedResponse,
|
||||
} from '@/lib/copilot/request-helpers'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
|
||||
const logger = createLogger('CopilotMarkToolCompleteAPI')
|
||||
|
||||
const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT
|
||||
|
||||
const MarkCompleteSchema = z.object({
|
||||
id: z.string(),
|
||||
name: z.string(),
|
||||
status: z.number().int(),
|
||||
message: z.any().optional(),
|
||||
data: z.any().optional(),
|
||||
})
|
||||
|
||||
/**
|
||||
* POST /api/copilot/tools/mark-complete
|
||||
* Proxy to Sim Agent: POST /api/tools/mark-complete
|
||||
*/
|
||||
export async function POST(req: NextRequest) {
|
||||
const tracker = createRequestTracker()
|
||||
|
||||
try {
|
||||
const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly()
|
||||
if (!isAuthenticated || !userId) {
|
||||
return createUnauthorizedResponse()
|
||||
}
|
||||
|
||||
const body = await req.json()
|
||||
|
||||
// Log raw body shape for diagnostics (avoid dumping huge payloads)
|
||||
try {
|
||||
const bodyPreview = JSON.stringify(body).slice(0, 300)
|
||||
logger.debug(`[${tracker.requestId}] Incoming mark-complete raw body preview`, {
|
||||
preview: `${bodyPreview}${bodyPreview.length === 300 ? '...' : ''}`,
|
||||
})
|
||||
} catch {}
|
||||
|
||||
const parsed = MarkCompleteSchema.parse(body)
|
||||
|
||||
const messagePreview = (() => {
|
||||
try {
|
||||
const s =
|
||||
typeof parsed.message === 'string' ? parsed.message : JSON.stringify(parsed.message)
|
||||
return s ? `${s.slice(0, 200)}${s.length > 200 ? '...' : ''}` : undefined
|
||||
} catch {
|
||||
return undefined
|
||||
}
|
||||
})()
|
||||
|
||||
logger.info(`[${tracker.requestId}] Forwarding tool mark-complete`, {
|
||||
userId,
|
||||
toolCallId: parsed.id,
|
||||
toolName: parsed.name,
|
||||
status: parsed.status,
|
||||
hasMessage: parsed.message !== undefined,
|
||||
hasData: parsed.data !== undefined,
|
||||
messagePreview,
|
||||
agentUrl: `${SIM_AGENT_API_URL}/api/tools/mark-complete`,
|
||||
})
|
||||
|
||||
const agentRes = await fetch(`${SIM_AGENT_API_URL}/api/tools/mark-complete`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}),
|
||||
},
|
||||
body: JSON.stringify(parsed),
|
||||
})
|
||||
|
||||
// Attempt to parse agent response JSON
|
||||
let agentJson: any = null
|
||||
let agentText: string | null = null
|
||||
try {
|
||||
agentJson = await agentRes.json()
|
||||
} catch (_) {
|
||||
try {
|
||||
agentText = await agentRes.text()
|
||||
} catch {}
|
||||
}
|
||||
|
||||
logger.info(`[${tracker.requestId}] Agent responded to mark-complete`, {
|
||||
status: agentRes.status,
|
||||
ok: agentRes.ok,
|
||||
responseJsonPreview: agentJson ? JSON.stringify(agentJson).slice(0, 300) : undefined,
|
||||
responseTextPreview: agentText ? agentText.slice(0, 300) : undefined,
|
||||
})
|
||||
|
||||
if (agentRes.ok) {
|
||||
return NextResponse.json({ success: true })
|
||||
}
|
||||
|
||||
const errorMessage =
|
||||
agentJson?.error || agentText || `Agent responded with status ${agentRes.status}`
|
||||
const status = agentRes.status >= 500 ? 500 : 400
|
||||
|
||||
logger.warn(`[${tracker.requestId}] Mark-complete failed`, {
|
||||
status,
|
||||
error: errorMessage,
|
||||
})
|
||||
|
||||
return NextResponse.json({ success: false, error: errorMessage }, { status })
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
logger.warn(`[${tracker.requestId}] Invalid mark-complete request body`, {
|
||||
issues: error.issues,
|
||||
})
|
||||
return createBadRequestResponse('Invalid request body for mark-complete')
|
||||
}
|
||||
logger.error(`[${tracker.requestId}] Failed to proxy mark-complete:`, error)
|
||||
return createInternalServerErrorResponse('Failed to mark tool as complete')
|
||||
}
|
||||
}
|
||||
@@ -28,6 +28,7 @@ const DEFAULT_ENABLED_MODELS: Record<CopilotModelId, boolean> = {
|
||||
'claude-4-sonnet': false,
|
||||
'claude-4.5-haiku': true,
|
||||
'claude-4.5-sonnet': true,
|
||||
'claude-4.6-opus': true,
|
||||
'claude-4.5-opus': true,
|
||||
'claude-4.1-opus': false,
|
||||
'gemini-3-pro': true,
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery'
|
||||
|
||||
export async function GET(request: NextRequest): Promise<NextResponse> {
|
||||
return createMcpAuthorizationServerMetadataResponse(request)
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { createMcpProtectedResourceMetadataResponse } from '@/lib/mcp/oauth-discovery'
|
||||
|
||||
export async function GET(request: NextRequest): Promise<NextResponse> {
|
||||
return createMcpProtectedResourceMetadataResponse(request)
|
||||
}
|
||||
776
apps/sim/app/api/mcp/copilot/route.ts
Normal file
776
apps/sim/app/api/mcp/copilot/route.ts
Normal file
@@ -0,0 +1,776 @@
|
||||
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
||||
import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js'
|
||||
import {
|
||||
CallToolRequestSchema,
|
||||
type CallToolResult,
|
||||
ErrorCode,
|
||||
type JSONRPCError,
|
||||
type ListToolsResult,
|
||||
ListToolsRequestSchema,
|
||||
McpError,
|
||||
type RequestId,
|
||||
} from '@modelcontextprotocol/sdk/types.js'
|
||||
import { db } from '@sim/db'
|
||||
import { userStats } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { randomUUID } from 'node:crypto'
|
||||
import { eq, sql } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
|
||||
import { getCopilotModel } from '@/lib/copilot/config'
|
||||
import { SIM_AGENT_API_URL, SIM_AGENT_VERSION } from '@/lib/copilot/constants'
|
||||
import { RateLimiter } from '@/lib/core/rate-limiter'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
|
||||
import { orchestrateSubagentStream } from '@/lib/copilot/orchestrator/subagent'
|
||||
import {
|
||||
executeToolServerSide,
|
||||
prepareExecutionContext,
|
||||
} from '@/lib/copilot/orchestrator/tool-executor'
|
||||
import { DIRECT_TOOL_DEFS, SUBAGENT_TOOL_DEFS } from '@/lib/copilot/tools/mcp/definitions'
|
||||
import { resolveWorkflowIdForUser } from '@/lib/workflows/utils'
|
||||
|
||||
const logger = createLogger('CopilotMcpAPI')
|
||||
const mcpRateLimiter = new RateLimiter()
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
export const runtime = 'nodejs'
|
||||
|
||||
interface CopilotKeyAuthResult {
|
||||
success: boolean
|
||||
userId?: string
|
||||
error?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates a copilot API key by forwarding it to the Go copilot service's
|
||||
* `/api/validate-key` endpoint. Returns the associated userId on success.
|
||||
*/
|
||||
async function authenticateCopilotApiKey(apiKey: string): Promise<CopilotKeyAuthResult> {
|
||||
try {
|
||||
const internalSecret = env.INTERNAL_API_SECRET
|
||||
if (!internalSecret) {
|
||||
logger.error('INTERNAL_API_SECRET not configured')
|
||||
return { success: false, error: 'Server configuration error' }
|
||||
}
|
||||
|
||||
const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': internalSecret,
|
||||
},
|
||||
body: JSON.stringify({ targetApiKey: apiKey }),
|
||||
signal: AbortSignal.timeout(10_000),
|
||||
})
|
||||
|
||||
if (!res.ok) {
|
||||
const body = await res.json().catch(() => null)
|
||||
const upstream = (body as Record<string, unknown>)?.message
|
||||
const status = res.status
|
||||
|
||||
if (status === 401 || status === 403) {
|
||||
return {
|
||||
success: false,
|
||||
error: `Invalid Copilot API key. Generate a new key in Settings → Copilot and set it in the x-api-key header.`,
|
||||
}
|
||||
}
|
||||
if (status === 402) {
|
||||
return {
|
||||
success: false,
|
||||
error: `Usage limit exceeded for this Copilot API key. Upgrade your plan or wait for your quota to reset.`,
|
||||
}
|
||||
}
|
||||
|
||||
return { success: false, error: String(upstream ?? 'Copilot API key validation failed') }
|
||||
}
|
||||
|
||||
const data = (await res.json()) as { ok?: boolean; userId?: string }
|
||||
if (!data.ok || !data.userId) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Invalid Copilot API key. Generate a new key in Settings → Copilot.',
|
||||
}
|
||||
}
|
||||
|
||||
return { success: true, userId: data.userId }
|
||||
} catch (error) {
|
||||
logger.error('Copilot API key validation failed', { error })
|
||||
return {
|
||||
success: false,
|
||||
error: 'Could not validate Copilot API key — the authentication service is temporarily unreachable. This is NOT a problem with the API key itself; please retry shortly.',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* MCP Server instructions that guide LLMs on how to use the Sim copilot tools.
|
||||
* This is included in the initialize response to help external LLMs understand
|
||||
* the workflow lifecycle and best practices.
|
||||
*/
|
||||
const MCP_SERVER_INSTRUCTIONS = `
|
||||
## Sim Workflow Copilot
|
||||
|
||||
Sim is a workflow automation platform. Workflows are visual pipelines of connected blocks (Agent, Function, Condition, API, integrations, etc.). The Agent block is the core — an LLM with tools, memory, structured output, and knowledge bases.
|
||||
|
||||
### Workflow Lifecycle (Happy Path)
|
||||
|
||||
1. \`list_workspaces\` → know where to work
|
||||
2. \`create_workflow(name, workspaceId)\` → get a workflowId
|
||||
3. \`sim_build(request, workflowId)\` → plan and build in one pass
|
||||
4. \`sim_test(request, workflowId)\` → verify it works
|
||||
5. \`sim_deploy("deploy as api", workflowId)\` → make it accessible externally (optional)
|
||||
|
||||
For fine-grained control, use \`sim_plan\` → \`sim_edit\` instead of \`sim_build\`. Pass the plan object from sim_plan EXACTLY as-is to sim_edit's context.plan field.
|
||||
|
||||
### Working with Existing Workflows
|
||||
|
||||
When the user refers to a workflow by name or description ("the email one", "my Slack bot"):
|
||||
1. Use \`sim_discovery\` to find it by functionality
|
||||
2. Or use \`list_workflows\` and match by name
|
||||
3. Then pass the workflowId to other tools
|
||||
|
||||
### Organization
|
||||
|
||||
- \`rename_workflow\` — rename a workflow
|
||||
- \`move_workflow\` — move a workflow into a folder (or root with null)
|
||||
- \`move_folder\` — nest a folder inside another (or root with null)
|
||||
- \`create_folder(name, parentId)\` — create nested folder hierarchies
|
||||
|
||||
### Key Rules
|
||||
|
||||
- You can test workflows immediately after building — deployment is only needed for external access (API, chat, MCP).
|
||||
- All copilot tools (build, plan, edit, deploy, test, debug) require workflowId.
|
||||
- If the user reports errors → use \`sim_debug\` first, don't guess.
|
||||
- Variable syntax: \`<blockname.field>\` for block outputs, \`{{ENV_VAR}}\` for env vars.
|
||||
`
|
||||
|
||||
type HeaderMap = Record<string, string | string[] | undefined>
|
||||
|
||||
function createError(id: RequestId, code: ErrorCode | number, message: string): JSONRPCError {
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id,
|
||||
error: { code, message },
|
||||
}
|
||||
}
|
||||
|
||||
function normalizeRequestHeaders(request: NextRequest): HeaderMap {
|
||||
const headers: HeaderMap = {}
|
||||
|
||||
request.headers.forEach((value, key) => {
|
||||
headers[key.toLowerCase()] = value
|
||||
})
|
||||
|
||||
return headers
|
||||
}
|
||||
|
||||
function readHeader(headers: HeaderMap | undefined, name: string): string | undefined {
|
||||
if (!headers) return undefined
|
||||
const value = headers[name.toLowerCase()]
|
||||
if (Array.isArray(value)) {
|
||||
return value[0]
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
class NextResponseCapture {
|
||||
private _status = 200
|
||||
private _headers = new Headers()
|
||||
private _controller: ReadableStreamDefaultController<Uint8Array> | null = null
|
||||
private _pendingChunks: Uint8Array[] = []
|
||||
private _closeHandlers: Array<() => void> = []
|
||||
private _errorHandlers: Array<(error: Error) => void> = []
|
||||
private _headersWritten = false
|
||||
private _ended = false
|
||||
private _headersPromise: Promise<void>
|
||||
private _resolveHeaders: (() => void) | null = null
|
||||
private _endedPromise: Promise<void>
|
||||
private _resolveEnded: (() => void) | null = null
|
||||
readonly readable: ReadableStream<Uint8Array>
|
||||
|
||||
constructor() {
|
||||
this._headersPromise = new Promise<void>((resolve) => {
|
||||
this._resolveHeaders = resolve
|
||||
})
|
||||
|
||||
this._endedPromise = new Promise<void>((resolve) => {
|
||||
this._resolveEnded = resolve
|
||||
})
|
||||
|
||||
this.readable = new ReadableStream<Uint8Array>({
|
||||
start: (controller) => {
|
||||
this._controller = controller
|
||||
if (this._pendingChunks.length > 0) {
|
||||
for (const chunk of this._pendingChunks) {
|
||||
controller.enqueue(chunk)
|
||||
}
|
||||
this._pendingChunks = []
|
||||
}
|
||||
},
|
||||
cancel: () => {
|
||||
this._ended = true
|
||||
this._resolveEnded?.()
|
||||
this.triggerCloseHandlers()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
private markHeadersWritten(): void {
|
||||
if (this._headersWritten) return
|
||||
this._headersWritten = true
|
||||
this._resolveHeaders?.()
|
||||
}
|
||||
|
||||
private triggerCloseHandlers(): void {
|
||||
for (const handler of this._closeHandlers) {
|
||||
try {
|
||||
handler()
|
||||
} catch (error) {
|
||||
this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private triggerErrorHandlers(error: Error): void {
|
||||
for (const errorHandler of this._errorHandlers) {
|
||||
errorHandler(error)
|
||||
}
|
||||
}
|
||||
|
||||
private normalizeChunk(chunk: unknown): Uint8Array | null {
|
||||
if (typeof chunk === 'string') {
|
||||
return new TextEncoder().encode(chunk)
|
||||
}
|
||||
|
||||
if (chunk instanceof Uint8Array) {
|
||||
return chunk
|
||||
}
|
||||
|
||||
if (chunk === undefined || chunk === null) {
|
||||
return null
|
||||
}
|
||||
|
||||
return new TextEncoder().encode(String(chunk))
|
||||
}
|
||||
|
||||
writeHead(status: number, headers?: Record<string, string | number | string[]>): this {
|
||||
this._status = status
|
||||
|
||||
if (headers) {
|
||||
Object.entries(headers).forEach(([key, value]) => {
|
||||
if (Array.isArray(value)) {
|
||||
this._headers.set(key, value.join(', '))
|
||||
} else {
|
||||
this._headers.set(key, String(value))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
this.markHeadersWritten()
|
||||
return this
|
||||
}
|
||||
|
||||
flushHeaders(): this {
|
||||
this.markHeadersWritten()
|
||||
return this
|
||||
}
|
||||
|
||||
write(chunk: unknown): boolean {
|
||||
const normalized = this.normalizeChunk(chunk)
|
||||
if (!normalized) return true
|
||||
|
||||
this.markHeadersWritten()
|
||||
|
||||
if (this._controller) {
|
||||
try {
|
||||
this._controller.enqueue(normalized)
|
||||
} catch (error) {
|
||||
this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error)))
|
||||
}
|
||||
} else {
|
||||
this._pendingChunks.push(normalized)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
end(chunk?: unknown): this {
|
||||
if (chunk !== undefined) this.write(chunk)
|
||||
this.markHeadersWritten()
|
||||
if (this._ended) return this
|
||||
|
||||
this._ended = true
|
||||
this._resolveEnded?.()
|
||||
|
||||
if (this._controller) {
|
||||
try {
|
||||
this._controller.close()
|
||||
} catch (error) {
|
||||
this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error)))
|
||||
}
|
||||
}
|
||||
|
||||
this.triggerCloseHandlers()
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
async waitForHeaders(timeoutMs = 30000): Promise<void> {
|
||||
if (this._headersWritten) return
|
||||
|
||||
await Promise.race([
|
||||
this._headersPromise,
|
||||
new Promise<void>((resolve) => {
|
||||
setTimeout(resolve, timeoutMs)
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
async waitForEnd(timeoutMs = 30000): Promise<void> {
|
||||
if (this._ended) return
|
||||
|
||||
await Promise.race([
|
||||
this._endedPromise,
|
||||
new Promise<void>((resolve) => {
|
||||
setTimeout(resolve, timeoutMs)
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
on(event: 'close' | 'error', handler: (() => void) | ((error: Error) => void)): this {
|
||||
if (event === 'close') {
|
||||
this._closeHandlers.push(handler as () => void)
|
||||
}
|
||||
|
||||
if (event === 'error') {
|
||||
this._errorHandlers.push(handler as (error: Error) => void)
|
||||
}
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
toNextResponse(): NextResponse {
|
||||
return new NextResponse(this.readable, {
|
||||
status: this._status,
|
||||
headers: this._headers,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
function buildMcpServer(): Server {
|
||||
const server = new Server(
|
||||
{
|
||||
name: 'sim-copilot',
|
||||
version: '1.0.0',
|
||||
},
|
||||
{
|
||||
capabilities: { tools: {} },
|
||||
instructions: MCP_SERVER_INSTRUCTIONS,
|
||||
}
|
||||
)
|
||||
|
||||
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||
const directTools = DIRECT_TOOL_DEFS.map((tool) => ({
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
inputSchema: tool.inputSchema,
|
||||
}))
|
||||
|
||||
const subagentTools = SUBAGENT_TOOL_DEFS.map((tool) => ({
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
inputSchema: tool.inputSchema,
|
||||
}))
|
||||
|
||||
const result: ListToolsResult = {
|
||||
tools: [...directTools, ...subagentTools],
|
||||
}
|
||||
|
||||
return result
|
||||
})
|
||||
|
||||
server.setRequestHandler(CallToolRequestSchema, async (request, extra) => {
|
||||
const headers = (extra.requestInfo?.headers || {}) as HeaderMap
|
||||
const apiKeyHeader = readHeader(headers, 'x-api-key')
|
||||
|
||||
if (!apiKeyHeader) {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text' as const,
|
||||
text: 'AUTHENTICATION ERROR: No Copilot API key provided. The user must set their Copilot API key in the x-api-key header. They can generate one in the Sim app under Settings → Copilot. Do NOT retry — this will fail until the key is configured.',
|
||||
},
|
||||
],
|
||||
isError: true,
|
||||
}
|
||||
}
|
||||
|
||||
const authResult = await authenticateCopilotApiKey(apiKeyHeader)
|
||||
if (!authResult.success || !authResult.userId) {
|
||||
logger.warn('MCP copilot key auth failed', { method: request.method })
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text' as const,
|
||||
text: `AUTHENTICATION ERROR: ${authResult.error} Do NOT retry — this will fail until the user fixes their Copilot API key.`,
|
||||
},
|
||||
],
|
||||
isError: true,
|
||||
}
|
||||
}
|
||||
|
||||
const rateLimitResult = await mcpRateLimiter.checkRateLimitWithSubscription(
|
||||
authResult.userId,
|
||||
await getHighestPrioritySubscription(authResult.userId),
|
||||
'api-endpoint',
|
||||
false
|
||||
)
|
||||
|
||||
if (!rateLimitResult.allowed) {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text' as const,
|
||||
text: `RATE LIMIT: Too many requests. Please wait and retry after ${rateLimitResult.resetAt.toISOString()}.`,
|
||||
},
|
||||
],
|
||||
isError: true,
|
||||
}
|
||||
}
|
||||
|
||||
const params = request.params as { name?: string; arguments?: Record<string, unknown> } | undefined
|
||||
if (!params?.name) {
|
||||
throw new McpError(ErrorCode.InvalidParams, 'Tool name required')
|
||||
}
|
||||
|
||||
const result = await handleToolsCall(
|
||||
{
|
||||
name: params.name,
|
||||
arguments: params.arguments,
|
||||
},
|
||||
authResult.userId
|
||||
)
|
||||
|
||||
trackMcpCopilotCall(authResult.userId)
|
||||
|
||||
return result
|
||||
})
|
||||
|
||||
return server
|
||||
}
|
||||
|
||||
async function handleMcpRequestWithSdk(
|
||||
request: NextRequest,
|
||||
parsedBody: unknown
|
||||
): Promise<NextResponse> {
|
||||
const server = buildMcpServer()
|
||||
const transport = new StreamableHTTPServerTransport({
|
||||
sessionIdGenerator: undefined,
|
||||
enableJsonResponse: true,
|
||||
})
|
||||
|
||||
const responseCapture = new NextResponseCapture()
|
||||
const requestAdapter = {
|
||||
method: request.method,
|
||||
headers: normalizeRequestHeaders(request),
|
||||
}
|
||||
|
||||
await server.connect(transport)
|
||||
|
||||
try {
|
||||
await transport.handleRequest(requestAdapter as any, responseCapture as any, parsedBody)
|
||||
await responseCapture.waitForHeaders()
|
||||
await responseCapture.waitForEnd()
|
||||
return responseCapture.toNextResponse()
|
||||
} finally {
|
||||
await server.close().catch(() => {})
|
||||
await transport.close().catch(() => {})
|
||||
}
|
||||
}
|
||||
|
||||
export async function GET() {
|
||||
// Return 405 to signal that server-initiated SSE notifications are not
|
||||
// supported. Without this, clients like mcp-remote will repeatedly
|
||||
// reconnect trying to open an SSE stream, flooding the logs with GETs.
|
||||
return new NextResponse(null, { status: 405 })
|
||||
}
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
let parsedBody: unknown
|
||||
|
||||
try {
|
||||
parsedBody = await request.json()
|
||||
} catch {
|
||||
return NextResponse.json(createError(0, ErrorCode.ParseError, 'Invalid JSON body'), {
|
||||
status: 400,
|
||||
})
|
||||
}
|
||||
|
||||
return await handleMcpRequestWithSdk(request, parsedBody)
|
||||
} catch (error) {
|
||||
logger.error('Error handling MCP request', { error })
|
||||
return NextResponse.json(createError(0, ErrorCode.InternalError, 'Internal error'), {
|
||||
status: 500,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export async function DELETE(request: NextRequest) {
|
||||
void request
|
||||
return NextResponse.json(createError(0, -32000, 'Method not allowed.'), { status: 405 })
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment MCP copilot call counter in userStats (fire-and-forget).
|
||||
*/
|
||||
function trackMcpCopilotCall(userId: string): void {
|
||||
db.update(userStats)
|
||||
.set({
|
||||
totalMcpCopilotCalls: sql`total_mcp_copilot_calls + 1`,
|
||||
lastActive: new Date(),
|
||||
})
|
||||
.where(eq(userStats.userId, userId))
|
||||
.then(() => {})
|
||||
.catch((error) => {
|
||||
logger.error('Failed to track MCP copilot call', { error, userId })
|
||||
})
|
||||
}
|
||||
|
||||
async function handleToolsCall(
|
||||
params: { name: string; arguments?: Record<string, unknown> },
|
||||
userId: string
|
||||
): Promise<CallToolResult> {
|
||||
const args = params.arguments || {}
|
||||
|
||||
const directTool = DIRECT_TOOL_DEFS.find((tool) => tool.name === params.name)
|
||||
if (directTool) {
|
||||
return handleDirectToolCall(directTool, args, userId)
|
||||
}
|
||||
|
||||
const subagentTool = SUBAGENT_TOOL_DEFS.find((tool) => tool.name === params.name)
|
||||
if (subagentTool) {
|
||||
return handleSubagentToolCall(subagentTool, args, userId)
|
||||
}
|
||||
|
||||
throw new McpError(ErrorCode.MethodNotFound, `Tool not found: ${params.name}`)
|
||||
}
|
||||
|
||||
async function handleDirectToolCall(
|
||||
toolDef: (typeof DIRECT_TOOL_DEFS)[number],
|
||||
args: Record<string, unknown>,
|
||||
userId: string
|
||||
): Promise<CallToolResult> {
|
||||
try {
|
||||
const execContext = await prepareExecutionContext(userId, (args.workflowId as string) || '')
|
||||
|
||||
const toolCall = {
|
||||
id: randomUUID(),
|
||||
name: toolDef.toolId,
|
||||
status: 'pending' as const,
|
||||
params: args as Record<string, any>,
|
||||
startTime: Date.now(),
|
||||
}
|
||||
|
||||
const result = await executeToolServerSide(toolCall, execContext)
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(result.output ?? result, null, 2),
|
||||
},
|
||||
],
|
||||
isError: !result.success,
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Direct tool execution failed', { tool: toolDef.name, error })
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: `Tool execution failed: ${error instanceof Error ? error.message : String(error)}`,
|
||||
},
|
||||
],
|
||||
isError: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build mode uses the main chat orchestrator with the 'fast' command instead of
|
||||
* the subagent endpoint. In Go, 'build' is not a registered subagent — it's a mode
|
||||
* (ModeFast) on the main chat processor that bypasses subagent orchestration and
|
||||
* executes all tools directly.
|
||||
*/
|
||||
async function handleBuildToolCall(
|
||||
args: Record<string, unknown>,
|
||||
userId: string
|
||||
): Promise<CallToolResult> {
|
||||
try {
|
||||
const requestText = (args.request as string) || JSON.stringify(args)
|
||||
const { model } = getCopilotModel('chat')
|
||||
const workflowId = args.workflowId as string | undefined
|
||||
|
||||
const resolved = workflowId ? { workflowId } : await resolveWorkflowIdForUser(userId)
|
||||
|
||||
if (!resolved?.workflowId) {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(
|
||||
{
|
||||
success: false,
|
||||
error: 'workflowId is required for build. Call create_workflow first.',
|
||||
},
|
||||
null,
|
||||
2
|
||||
),
|
||||
},
|
||||
],
|
||||
isError: true,
|
||||
}
|
||||
}
|
||||
|
||||
const chatId = randomUUID()
|
||||
|
||||
const requestPayload = {
|
||||
message: requestText,
|
||||
workflowId: resolved.workflowId,
|
||||
userId,
|
||||
model,
|
||||
mode: 'agent',
|
||||
commands: ['fast'],
|
||||
messageId: randomUUID(),
|
||||
version: SIM_AGENT_VERSION,
|
||||
headless: true,
|
||||
chatId,
|
||||
source: 'mcp',
|
||||
}
|
||||
|
||||
const result = await orchestrateCopilotStream(requestPayload, {
|
||||
userId,
|
||||
workflowId: resolved.workflowId,
|
||||
chatId,
|
||||
autoExecuteTools: true,
|
||||
timeout: 300000,
|
||||
interactive: false,
|
||||
})
|
||||
|
||||
const responseData = {
|
||||
success: result.success,
|
||||
content: result.content,
|
||||
toolCalls: result.toolCalls,
|
||||
error: result.error,
|
||||
}
|
||||
|
||||
return {
|
||||
content: [{ type: 'text', text: JSON.stringify(responseData, null, 2) }],
|
||||
isError: !result.success,
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Build tool call failed', { error })
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: `Build failed: ${error instanceof Error ? error.message : String(error)}`,
|
||||
},
|
||||
],
|
||||
isError: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function handleSubagentToolCall(
|
||||
toolDef: (typeof SUBAGENT_TOOL_DEFS)[number],
|
||||
args: Record<string, unknown>,
|
||||
userId: string
|
||||
): Promise<CallToolResult> {
|
||||
if (toolDef.agentId === 'build') {
|
||||
return handleBuildToolCall(args, userId)
|
||||
}
|
||||
|
||||
try {
|
||||
const requestText =
|
||||
(args.request as string) ||
|
||||
(args.message as string) ||
|
||||
(args.error as string) ||
|
||||
JSON.stringify(args)
|
||||
|
||||
const context = (args.context as Record<string, unknown>) || {}
|
||||
if (args.plan && !context.plan) {
|
||||
context.plan = args.plan
|
||||
}
|
||||
|
||||
const { model } = getCopilotModel('chat')
|
||||
|
||||
const result = await orchestrateSubagentStream(
|
||||
toolDef.agentId,
|
||||
{
|
||||
message: requestText,
|
||||
workflowId: args.workflowId,
|
||||
workspaceId: args.workspaceId,
|
||||
context,
|
||||
model,
|
||||
headless: true,
|
||||
source: 'mcp',
|
||||
},
|
||||
{
|
||||
userId,
|
||||
workflowId: args.workflowId as string | undefined,
|
||||
workspaceId: args.workspaceId as string | undefined,
|
||||
}
|
||||
)
|
||||
|
||||
let responseData: unknown
|
||||
|
||||
if (result.structuredResult) {
|
||||
responseData = {
|
||||
success: result.structuredResult.success ?? result.success,
|
||||
type: result.structuredResult.type,
|
||||
summary: result.structuredResult.summary,
|
||||
data: result.structuredResult.data,
|
||||
}
|
||||
} else if (result.error) {
|
||||
responseData = {
|
||||
success: false,
|
||||
error: result.error,
|
||||
errors: result.errors,
|
||||
}
|
||||
} else {
|
||||
responseData = {
|
||||
success: result.success,
|
||||
content: result.content,
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(responseData, null, 2),
|
||||
},
|
||||
],
|
||||
isError: !result.success,
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Subagent tool call failed', {
|
||||
tool: toolDef.name,
|
||||
agentId: toolDef.agentId,
|
||||
error,
|
||||
})
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: `Subagent call failed: ${error instanceof Error ? error.message : String(error)}`,
|
||||
},
|
||||
],
|
||||
isError: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -24,6 +24,7 @@ const configSchema = z.object({
|
||||
hideFilesTab: z.boolean().optional(),
|
||||
disableMcpTools: z.boolean().optional(),
|
||||
disableCustomTools: z.boolean().optional(),
|
||||
disableSkills: z.boolean().optional(),
|
||||
hideTemplates: z.boolean().optional(),
|
||||
disableInvitations: z.boolean().optional(),
|
||||
hideDeployApi: z.boolean().optional(),
|
||||
|
||||
@@ -25,6 +25,7 @@ const configSchema = z.object({
|
||||
hideFilesTab: z.boolean().optional(),
|
||||
disableMcpTools: z.boolean().optional(),
|
||||
disableCustomTools: z.boolean().optional(),
|
||||
disableSkills: z.boolean().optional(),
|
||||
hideTemplates: z.boolean().optional(),
|
||||
disableInvitations: z.boolean().optional(),
|
||||
hideDeployApi: z.boolean().optional(),
|
||||
|
||||
182
apps/sim/app/api/skills/route.ts
Normal file
182
apps/sim/app/api/skills/route.ts
Normal file
@@ -0,0 +1,182 @@
|
||||
import { db } from '@sim/db'
|
||||
import { skill } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { and, desc, eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { upsertSkills } from '@/lib/workflows/skills/operations'
|
||||
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
|
||||
|
||||
const logger = createLogger('SkillsAPI')
|
||||
|
||||
const SkillSchema = z.object({
|
||||
skills: z.array(
|
||||
z.object({
|
||||
id: z.string().optional(),
|
||||
name: z
|
||||
.string()
|
||||
.min(1, 'Skill name is required')
|
||||
.max(64)
|
||||
.regex(/^[a-z0-9]+(-[a-z0-9]+)*$/, 'Name must be kebab-case (e.g. my-skill)'),
|
||||
description: z.string().min(1, 'Description is required').max(1024),
|
||||
content: z.string().min(1, 'Content is required').max(50000, 'Content is too large'),
|
||||
})
|
||||
),
|
||||
workspaceId: z.string().optional(),
|
||||
})
|
||||
|
||||
/** GET - Fetch all skills for a workspace */
|
||||
export async function GET(request: NextRequest) {
|
||||
const requestId = generateRequestId()
|
||||
const searchParams = request.nextUrl.searchParams
|
||||
const workspaceId = searchParams.get('workspaceId')
|
||||
|
||||
try {
|
||||
const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false })
|
||||
if (!authResult.success || !authResult.userId) {
|
||||
logger.warn(`[${requestId}] Unauthorized skills access attempt`)
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const userId = authResult.userId
|
||||
|
||||
if (!workspaceId) {
|
||||
logger.warn(`[${requestId}] Missing workspaceId`)
|
||||
return NextResponse.json({ error: 'workspaceId is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
const userPermission = await getUserEntityPermissions(userId, 'workspace', workspaceId)
|
||||
if (!userPermission) {
|
||||
logger.warn(`[${requestId}] User ${userId} does not have access to workspace ${workspaceId}`)
|
||||
return NextResponse.json({ error: 'Access denied' }, { status: 403 })
|
||||
}
|
||||
|
||||
const result = await db
|
||||
.select()
|
||||
.from(skill)
|
||||
.where(eq(skill.workspaceId, workspaceId))
|
||||
.orderBy(desc(skill.createdAt))
|
||||
|
||||
return NextResponse.json({ data: result }, { status: 200 })
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Error fetching skills:`, error)
|
||||
return NextResponse.json({ error: 'Failed to fetch skills' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
|
||||
/** POST - Create or update skills */
|
||||
export async function POST(req: NextRequest) {
|
||||
const requestId = generateRequestId()
|
||||
|
||||
try {
|
||||
const authResult = await checkSessionOrInternalAuth(req, { requireWorkflowId: false })
|
||||
if (!authResult.success || !authResult.userId) {
|
||||
logger.warn(`[${requestId}] Unauthorized skills update attempt`)
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const userId = authResult.userId
|
||||
const body = await req.json()
|
||||
|
||||
try {
|
||||
const { skills, workspaceId } = SkillSchema.parse(body)
|
||||
|
||||
if (!workspaceId) {
|
||||
logger.warn(`[${requestId}] Missing workspaceId in request body`)
|
||||
return NextResponse.json({ error: 'workspaceId is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
const userPermission = await getUserEntityPermissions(userId, 'workspace', workspaceId)
|
||||
if (!userPermission || (userPermission !== 'admin' && userPermission !== 'write')) {
|
||||
logger.warn(
|
||||
`[${requestId}] User ${userId} does not have write permission for workspace ${workspaceId}`
|
||||
)
|
||||
return NextResponse.json({ error: 'Write permission required' }, { status: 403 })
|
||||
}
|
||||
|
||||
const resultSkills = await upsertSkills({
|
||||
skills,
|
||||
workspaceId,
|
||||
userId,
|
||||
requestId,
|
||||
})
|
||||
|
||||
return NextResponse.json({ success: true, data: resultSkills })
|
||||
} catch (validationError) {
|
||||
if (validationError instanceof z.ZodError) {
|
||||
logger.warn(`[${requestId}] Invalid skills data`, {
|
||||
errors: validationError.errors,
|
||||
})
|
||||
return NextResponse.json(
|
||||
{ error: 'Invalid request data', details: validationError.errors },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
if (validationError instanceof Error && validationError.message.includes('already exists')) {
|
||||
return NextResponse.json({ error: validationError.message }, { status: 409 })
|
||||
}
|
||||
throw validationError
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Error updating skills`, error)
|
||||
return NextResponse.json({ error: 'Failed to update skills' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
|
||||
/** DELETE - Delete a skill by ID */
|
||||
export async function DELETE(request: NextRequest) {
|
||||
const requestId = generateRequestId()
|
||||
const searchParams = request.nextUrl.searchParams
|
||||
const skillId = searchParams.get('id')
|
||||
const workspaceId = searchParams.get('workspaceId')
|
||||
|
||||
try {
|
||||
const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false })
|
||||
if (!authResult.success || !authResult.userId) {
|
||||
logger.warn(`[${requestId}] Unauthorized skill deletion attempt`)
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const userId = authResult.userId
|
||||
|
||||
if (!skillId) {
|
||||
logger.warn(`[${requestId}] Missing skill ID for deletion`)
|
||||
return NextResponse.json({ error: 'Skill ID is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
if (!workspaceId) {
|
||||
logger.warn(`[${requestId}] Missing workspaceId for deletion`)
|
||||
return NextResponse.json({ error: 'workspaceId is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
const userPermission = await getUserEntityPermissions(userId, 'workspace', workspaceId)
|
||||
if (!userPermission || (userPermission !== 'admin' && userPermission !== 'write')) {
|
||||
logger.warn(
|
||||
`[${requestId}] User ${userId} does not have write permission for workspace ${workspaceId}`
|
||||
)
|
||||
return NextResponse.json({ error: 'Write permission required' }, { status: 403 })
|
||||
}
|
||||
|
||||
const existingSkill = await db.select().from(skill).where(eq(skill.id, skillId)).limit(1)
|
||||
|
||||
if (existingSkill.length === 0) {
|
||||
logger.warn(`[${requestId}] Skill not found: ${skillId}`)
|
||||
return NextResponse.json({ error: 'Skill not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
if (existingSkill[0].workspaceId !== workspaceId) {
|
||||
logger.warn(`[${requestId}] Skill ${skillId} does not belong to workspace ${workspaceId}`)
|
||||
return NextResponse.json({ error: 'Skill not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
await db.delete(skill).where(and(eq(skill.id, skillId), eq(skill.workspaceId, workspaceId)))
|
||||
|
||||
logger.info(`[${requestId}] Deleted skill: ${skillId}`)
|
||||
return NextResponse.json({ success: true })
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Error deleting skill:`, error)
|
||||
return NextResponse.json({ error: 'Failed to delete skill' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
114
apps/sim/app/api/v1/copilot/chat/route.ts
Normal file
114
apps/sim/app/api/v1/copilot/chat/route.ts
Normal file
@@ -0,0 +1,114 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { getCopilotModel } from '@/lib/copilot/config'
|
||||
import { SIM_AGENT_VERSION } from '@/lib/copilot/constants'
|
||||
import { COPILOT_REQUEST_MODES } from '@/lib/copilot/models'
|
||||
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
|
||||
import { resolveWorkflowIdForUser } from '@/lib/workflows/utils'
|
||||
import { authenticateV1Request } from '@/app/api/v1/auth'
|
||||
|
||||
const logger = createLogger('CopilotHeadlessAPI')
|
||||
|
||||
const RequestSchema = z.object({
|
||||
message: z.string().min(1, 'message is required'),
|
||||
workflowId: z.string().optional(),
|
||||
workflowName: z.string().optional(),
|
||||
chatId: z.string().optional(),
|
||||
mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'),
|
||||
model: z.string().optional(),
|
||||
autoExecuteTools: z.boolean().optional().default(true),
|
||||
timeout: z.number().optional().default(300000),
|
||||
})
|
||||
|
||||
/**
|
||||
* POST /api/v1/copilot/chat
|
||||
* Headless copilot endpoint for server-side orchestration.
|
||||
*
|
||||
* workflowId is optional - if not provided:
|
||||
* - If workflowName is provided, finds that workflow
|
||||
* - Otherwise uses the user's first workflow as context
|
||||
* - The copilot can still operate on any workflow using list_user_workflows
|
||||
*/
|
||||
export async function POST(req: NextRequest) {
|
||||
const auth = await authenticateV1Request(req)
|
||||
if (!auth.authenticated || !auth.userId) {
|
||||
return NextResponse.json(
|
||||
{ success: false, error: auth.error || 'Unauthorized' },
|
||||
{ status: 401 }
|
||||
)
|
||||
}
|
||||
|
||||
try {
|
||||
const body = await req.json()
|
||||
const parsed = RequestSchema.parse(body)
|
||||
const defaults = getCopilotModel('chat')
|
||||
const selectedModel = parsed.model || defaults.model
|
||||
|
||||
// Resolve workflow ID
|
||||
const resolved = await resolveWorkflowIdForUser(
|
||||
auth.userId,
|
||||
parsed.workflowId,
|
||||
parsed.workflowName
|
||||
)
|
||||
if (!resolved) {
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: 'No workflows found. Create a workflow first or provide a valid workflowId.',
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
// Transform mode to transport mode (same as client API)
|
||||
// build and agent both map to 'agent' on the backend
|
||||
const effectiveMode = parsed.mode === 'agent' ? 'build' : parsed.mode
|
||||
const transportMode = effectiveMode === 'build' ? 'agent' : effectiveMode
|
||||
|
||||
// Always generate a chatId - required for artifacts system to work with subagents
|
||||
const chatId = parsed.chatId || crypto.randomUUID()
|
||||
|
||||
const requestPayload = {
|
||||
message: parsed.message,
|
||||
workflowId: resolved.workflowId,
|
||||
userId: auth.userId,
|
||||
model: selectedModel,
|
||||
mode: transportMode,
|
||||
messageId: crypto.randomUUID(),
|
||||
version: SIM_AGENT_VERSION,
|
||||
headless: true,
|
||||
chatId,
|
||||
}
|
||||
|
||||
const result = await orchestrateCopilotStream(requestPayload, {
|
||||
userId: auth.userId,
|
||||
workflowId: resolved.workflowId,
|
||||
chatId,
|
||||
autoExecuteTools: parsed.autoExecuteTools,
|
||||
timeout: parsed.timeout,
|
||||
interactive: false,
|
||||
})
|
||||
|
||||
return NextResponse.json({
|
||||
success: result.success,
|
||||
content: result.content,
|
||||
toolCalls: result.toolCalls,
|
||||
chatId: result.chatId || chatId, // Return the chatId for conversation continuity
|
||||
conversationId: result.conversationId,
|
||||
error: result.error,
|
||||
})
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'Invalid request', details: error.errors },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.error('Headless copilot request failed', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return NextResponse.json({ success: false, error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
@@ -211,7 +211,7 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
|
||||
if (block.type === 'text') {
|
||||
const isLastTextBlock =
|
||||
index === message.contentBlocks!.length - 1 && block.type === 'text'
|
||||
const parsed = parseSpecialTags(block.content)
|
||||
const parsed = parseSpecialTags(block.content ?? '')
|
||||
// Mask credential IDs in the displayed content
|
||||
const cleanBlockContent = maskCredentialValue(
|
||||
parsed.cleanContent.replace(/\n{3,}/g, '\n\n')
|
||||
@@ -243,7 +243,7 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
|
||||
return (
|
||||
<div key={blockKey} className='w-full'>
|
||||
<ThinkingBlock
|
||||
content={maskCredentialValue(block.content)}
|
||||
content={maskCredentialValue(block.content ?? '')}
|
||||
isStreaming={isActivelyStreaming}
|
||||
hasFollowingContent={hasFollowingContent}
|
||||
hasSpecialTags={hasSpecialTags}
|
||||
@@ -251,7 +251,7 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
|
||||
</div>
|
||||
)
|
||||
}
|
||||
if (block.type === 'tool_call') {
|
||||
if (block.type === 'tool_call' && block.toolCall) {
|
||||
const blockKey = `tool-${block.toolCall.id}`
|
||||
|
||||
return (
|
||||
|
||||
@@ -1,20 +1,15 @@
|
||||
'use client'
|
||||
|
||||
import { memo, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import clsx from 'clsx'
|
||||
import { ChevronUp, LayoutList } from 'lucide-react'
|
||||
import Editor from 'react-simple-code-editor'
|
||||
import { Button, Code, getCodeEditorProps, highlight, languages } from '@/components/emcn'
|
||||
import { ClientToolCallState } from '@/lib/copilot/tools/client/base-tool'
|
||||
import { getClientTool } from '@/lib/copilot/tools/client/manager'
|
||||
import { getRegisteredTools } from '@/lib/copilot/tools/client/registry'
|
||||
import '@/lib/copilot/tools/client/init-tool-configs'
|
||||
import {
|
||||
getSubagentLabels as getSubagentLabelsFromConfig,
|
||||
getToolUIConfig,
|
||||
hasInterrupt as hasInterruptFromConfig,
|
||||
isSpecialTool as isSpecialToolFromConfig,
|
||||
} from '@/lib/copilot/tools/client/ui-config'
|
||||
ClientToolCallState,
|
||||
TOOL_DISPLAY_REGISTRY,
|
||||
} from '@/lib/copilot/tools/client/tool-display-registry'
|
||||
import { formatDuration } from '@/lib/core/utils/formatting'
|
||||
import { CopilotMarkdownRenderer } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components/markdown-renderer'
|
||||
import { SmoothStreamingText } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components/smooth-streaming'
|
||||
@@ -25,7 +20,6 @@ import { getDisplayValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/co
|
||||
import { getBlock } from '@/blocks/registry'
|
||||
import type { CopilotToolCall } from '@/stores/panel'
|
||||
import { useCopilotStore } from '@/stores/panel'
|
||||
import { CLASS_TOOL_METADATA } from '@/stores/panel/copilot/store'
|
||||
import type { SubAgentContentBlock } from '@/stores/panel/copilot/types'
|
||||
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
|
||||
|
||||
@@ -710,8 +704,8 @@ const ShimmerOverlayText = memo(function ShimmerOverlayText({
|
||||
* @returns The completion label from UI config, defaults to 'Thought'
|
||||
*/
|
||||
function getSubagentCompletionLabel(toolName: string): string {
|
||||
const labels = getSubagentLabelsFromConfig(toolName, false)
|
||||
return labels?.completed ?? 'Thought'
|
||||
const labels = TOOL_DISPLAY_REGISTRY[toolName]?.uiConfig?.subagentLabels
|
||||
return labels?.completed || 'Thought'
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -943,7 +937,7 @@ const SubagentContentRenderer = memo(function SubagentContentRenderer({
|
||||
* Determines if a tool call should display with special gradient styling.
|
||||
*/
|
||||
function isSpecialToolCall(toolCall: CopilotToolCall): boolean {
|
||||
return isSpecialToolFromConfig(toolCall.name)
|
||||
return TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.isSpecial === true
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1223,28 +1217,11 @@ const WorkflowEditSummary = memo(function WorkflowEditSummary({
|
||||
|
||||
/** Checks if a tool is server-side executed (not a client tool) */
|
||||
function isIntegrationTool(toolName: string): boolean {
|
||||
return !CLASS_TOOL_METADATA[toolName]
|
||||
return !TOOL_DISPLAY_REGISTRY[toolName]
|
||||
}
|
||||
|
||||
function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean {
|
||||
if (hasInterruptFromConfig(toolCall.name) && toolCall.state === 'pending') {
|
||||
return true
|
||||
}
|
||||
|
||||
const instance = getClientTool(toolCall.id)
|
||||
let hasInterrupt = !!instance?.getInterruptDisplays?.()
|
||||
if (!hasInterrupt) {
|
||||
try {
|
||||
const def = getRegisteredTools()[toolCall.name]
|
||||
if (def) {
|
||||
hasInterrupt =
|
||||
typeof def.hasInterrupt === 'function'
|
||||
? !!def.hasInterrupt(toolCall.params || {})
|
||||
: !!def.hasInterrupt
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
|
||||
const hasInterrupt = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.interrupt === true
|
||||
if (hasInterrupt && toolCall.state === 'pending') {
|
||||
return true
|
||||
}
|
||||
@@ -1257,109 +1234,50 @@ function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean {
|
||||
return false
|
||||
}
|
||||
|
||||
const toolCallLogger = createLogger('CopilotToolCall')
|
||||
|
||||
async function sendToolDecision(
|
||||
toolCallId: string,
|
||||
status: 'accepted' | 'rejected' | 'background'
|
||||
) {
|
||||
try {
|
||||
await fetch('/api/copilot/confirm', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ toolCallId, status }),
|
||||
})
|
||||
} catch (error) {
|
||||
toolCallLogger.warn('Failed to send tool decision', {
|
||||
toolCallId,
|
||||
status,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async function handleRun(
|
||||
toolCall: CopilotToolCall,
|
||||
setToolCallState: any,
|
||||
onStateChange?: any,
|
||||
editedParams?: any
|
||||
) {
|
||||
const instance = getClientTool(toolCall.id)
|
||||
|
||||
if (!instance && isIntegrationTool(toolCall.name)) {
|
||||
onStateChange?.('executing')
|
||||
try {
|
||||
await useCopilotStore.getState().executeIntegrationTool(toolCall.id)
|
||||
} catch (e) {
|
||||
setToolCallState(toolCall, 'error', { error: e instanceof Error ? e.message : String(e) })
|
||||
onStateChange?.('error')
|
||||
try {
|
||||
await fetch('/api/copilot/tools/mark-complete', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
status: 500,
|
||||
message: e instanceof Error ? e.message : 'Tool execution failed',
|
||||
data: { error: e instanceof Error ? e.message : String(e) },
|
||||
}),
|
||||
})
|
||||
} catch {
|
||||
console.error('[handleRun] Failed to notify backend of tool error:', toolCall.id)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if (!instance) return
|
||||
try {
|
||||
const mergedParams =
|
||||
editedParams ||
|
||||
(toolCall as any).params ||
|
||||
(toolCall as any).parameters ||
|
||||
(toolCall as any).input ||
|
||||
{}
|
||||
await instance.handleAccept?.(mergedParams)
|
||||
onStateChange?.('executing')
|
||||
} catch (e) {
|
||||
setToolCallState(toolCall, 'error', { error: e instanceof Error ? e.message : String(e) })
|
||||
}
|
||||
setToolCallState(toolCall, 'executing', editedParams ? { params: editedParams } : undefined)
|
||||
onStateChange?.('executing')
|
||||
await sendToolDecision(toolCall.id, 'accepted')
|
||||
}
|
||||
|
||||
async function handleSkip(toolCall: CopilotToolCall, setToolCallState: any, onStateChange?: any) {
|
||||
const instance = getClientTool(toolCall.id)
|
||||
|
||||
if (!instance && isIntegrationTool(toolCall.name)) {
|
||||
setToolCallState(toolCall, 'rejected')
|
||||
onStateChange?.('rejected')
|
||||
|
||||
let notified = false
|
||||
for (let attempt = 0; attempt < 3 && !notified; attempt++) {
|
||||
try {
|
||||
const res = await fetch('/api/copilot/tools/mark-complete', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
status: 400,
|
||||
message: 'Tool execution skipped by user',
|
||||
data: { skipped: true, reason: 'user_skipped' },
|
||||
}),
|
||||
})
|
||||
if (res.ok) {
|
||||
notified = true
|
||||
}
|
||||
} catch (e) {
|
||||
if (attempt < 2) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!notified) {
|
||||
console.error('[handleSkip] Failed to notify backend after 3 attempts:', toolCall.id)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if (instance) {
|
||||
try {
|
||||
await instance.handleReject?.()
|
||||
} catch {}
|
||||
}
|
||||
setToolCallState(toolCall, 'rejected')
|
||||
onStateChange?.('rejected')
|
||||
await sendToolDecision(toolCall.id, 'rejected')
|
||||
}
|
||||
|
||||
function getDisplayName(toolCall: CopilotToolCall): string {
|
||||
const fromStore = (toolCall as any).display?.text
|
||||
if (fromStore) return fromStore
|
||||
try {
|
||||
const def = getRegisteredTools()[toolCall.name] as any
|
||||
const byState = def?.metadata?.displayNames?.[toolCall.state]
|
||||
if (byState?.text) return byState.text
|
||||
} catch {}
|
||||
const registryEntry = TOOL_DISPLAY_REGISTRY[toolCall.name]
|
||||
const byState = registryEntry?.displayNames?.[toolCall.state as ClientToolCallState]
|
||||
if (byState?.text) return byState.text
|
||||
|
||||
const stateVerb = getStateVerb(toolCall.state)
|
||||
const formattedName = formatToolName(toolCall.name)
|
||||
@@ -1509,7 +1427,7 @@ export function ToolCall({
|
||||
// Check if this integration tool is auto-allowed
|
||||
// Subscribe to autoAllowedTools so we re-render when it changes
|
||||
const autoAllowedTools = useCopilotStore((s) => s.autoAllowedTools)
|
||||
const { removeAutoAllowedTool } = useCopilotStore()
|
||||
const { removeAutoAllowedTool, setToolCallState } = useCopilotStore()
|
||||
const isAutoAllowed = isIntegrationTool(toolCall.name) && autoAllowedTools.includes(toolCall.name)
|
||||
|
||||
// Update edited params when toolCall params change (deep comparison to avoid resetting user edits on ref change)
|
||||
@@ -1537,23 +1455,7 @@ export function ToolCall({
|
||||
return null
|
||||
|
||||
// Special rendering for subagent tools - show as thinking text with tool calls at top level
|
||||
const SUBAGENT_TOOLS = [
|
||||
'plan',
|
||||
'edit',
|
||||
'debug',
|
||||
'test',
|
||||
'deploy',
|
||||
'evaluate',
|
||||
'auth',
|
||||
'research',
|
||||
'knowledge',
|
||||
'custom_tool',
|
||||
'tour',
|
||||
'info',
|
||||
'workflow',
|
||||
'superagent',
|
||||
]
|
||||
const isSubagentTool = SUBAGENT_TOOLS.includes(toolCall.name)
|
||||
const isSubagentTool = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.subagent === true
|
||||
|
||||
// For ALL subagent tools, don't show anything until we have blocks with content
|
||||
if (isSubagentTool) {
|
||||
@@ -1593,17 +1495,18 @@ export function ToolCall({
|
||||
stateStr === 'aborted'
|
||||
|
||||
// Allow rendering if:
|
||||
// 1. Tool is in CLASS_TOOL_METADATA (client tools), OR
|
||||
// 1. Tool is in TOOL_DISPLAY_REGISTRY (client tools), OR
|
||||
// 2. We're in build mode (integration tools are executed server-side), OR
|
||||
// 3. Tool call is already completed (historical - should always render)
|
||||
const isClientTool = !!CLASS_TOOL_METADATA[toolCall.name]
|
||||
const isClientTool = !!TOOL_DISPLAY_REGISTRY[toolCall.name]
|
||||
const isIntegrationToolInBuildMode = mode === 'build' && !isClientTool
|
||||
|
||||
if (!isClientTool && !isIntegrationToolInBuildMode && !isCompletedToolCall) {
|
||||
return null
|
||||
}
|
||||
const toolUIConfig = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig
|
||||
// Check if tool has params table config (meaning it's expandable)
|
||||
const hasParamsTable = !!getToolUIConfig(toolCall.name)?.paramsTable
|
||||
const hasParamsTable = !!toolUIConfig?.paramsTable
|
||||
const isRunWorkflow = toolCall.name === 'run_workflow'
|
||||
const isExpandableTool =
|
||||
hasParamsTable ||
|
||||
@@ -1613,7 +1516,6 @@ export function ToolCall({
|
||||
const showButtons = isCurrentMessage && shouldShowRunSkipButtons(toolCall)
|
||||
|
||||
// Check UI config for secondary action - only show for current message tool calls
|
||||
const toolUIConfig = getToolUIConfig(toolCall.name)
|
||||
const secondaryAction = toolUIConfig?.secondaryAction
|
||||
const showSecondaryAction = secondaryAction?.showInStates.includes(
|
||||
toolCall.state as ClientToolCallState
|
||||
@@ -2211,16 +2113,9 @@ export function ToolCall({
|
||||
<div className='mt-[10px]'>
|
||||
<Button
|
||||
onClick={async () => {
|
||||
try {
|
||||
const instance = getClientTool(toolCall.id)
|
||||
instance?.setState?.((ClientToolCallState as any).background)
|
||||
await instance?.markToolComplete?.(
|
||||
200,
|
||||
'The user has chosen to move the workflow execution to the background. Check back with them later to know when the workflow execution is complete'
|
||||
)
|
||||
forceUpdate({})
|
||||
onStateChange?.('background')
|
||||
} catch {}
|
||||
setToolCallState(toolCall, ClientToolCallState.background)
|
||||
onStateChange?.('background')
|
||||
await sendToolDecision(toolCall.id, 'background')
|
||||
}}
|
||||
variant='tertiary'
|
||||
title='Move to Background'
|
||||
@@ -2232,21 +2127,9 @@ export function ToolCall({
|
||||
<div className='mt-[10px]'>
|
||||
<Button
|
||||
onClick={async () => {
|
||||
try {
|
||||
const instance = getClientTool(toolCall.id)
|
||||
const elapsedSeconds = instance?.getElapsedSeconds?.() || 0
|
||||
instance?.setState?.((ClientToolCallState as any).background, {
|
||||
result: { _elapsedSeconds: elapsedSeconds },
|
||||
})
|
||||
const { updateToolCallParams } = useCopilotStore.getState()
|
||||
updateToolCallParams?.(toolCall.id, { _elapsedSeconds: Math.round(elapsedSeconds) })
|
||||
await instance?.markToolComplete?.(
|
||||
200,
|
||||
`User woke you up after ${Math.round(elapsedSeconds)} seconds`
|
||||
)
|
||||
forceUpdate({})
|
||||
onStateChange?.('background')
|
||||
} catch {}
|
||||
setToolCallState(toolCall, ClientToolCallState.background)
|
||||
onStateChange?.('background')
|
||||
await sendToolDecision(toolCall.id, 'background')
|
||||
}}
|
||||
variant='tertiary'
|
||||
title='Wake'
|
||||
|
||||
@@ -246,6 +246,7 @@ export function getCommandDisplayLabel(commandId: string): string {
|
||||
* Model configuration options
|
||||
*/
|
||||
export const MODEL_OPTIONS = [
|
||||
{ value: 'claude-4.6-opus', label: 'Claude 4.6 Opus' },
|
||||
{ value: 'claude-4.5-opus', label: 'Claude 4.5 Opus' },
|
||||
{ value: 'claude-4.5-sonnet', label: 'Claude 4.5 Sonnet' },
|
||||
{ value: 'claude-4.5-haiku', label: 'Claude 4.5 Haiku' },
|
||||
|
||||
@@ -107,13 +107,13 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
|
||||
currentChat,
|
||||
selectChat,
|
||||
deleteChat,
|
||||
areChatsFresh,
|
||||
workflowId: copilotWorkflowId,
|
||||
setPlanTodos,
|
||||
closePlanTodos,
|
||||
clearPlanArtifact,
|
||||
savePlanArtifact,
|
||||
loadAutoAllowedTools,
|
||||
resumeActiveStream,
|
||||
} = useCopilotStore()
|
||||
|
||||
// Initialize copilot
|
||||
@@ -126,6 +126,7 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
|
||||
loadAutoAllowedTools,
|
||||
currentChat,
|
||||
isSendingMessage,
|
||||
resumeActiveStream,
|
||||
})
|
||||
|
||||
// Handle scroll management (80px stickiness for copilot)
|
||||
@@ -140,7 +141,6 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
|
||||
activeWorkflowId,
|
||||
copilotWorkflowId,
|
||||
loadChats,
|
||||
areChatsFresh,
|
||||
isSendingMessage,
|
||||
}
|
||||
)
|
||||
@@ -421,8 +421,8 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Show loading state until fully initialized */}
|
||||
{!isInitialized ? (
|
||||
{/* Show loading state until fully initialized, but skip if actively streaming (resume case) */}
|
||||
{!isInitialized && !isSendingMessage ? (
|
||||
<div className='flex h-full w-full items-center justify-center'>
|
||||
<div className='flex flex-col items-center gap-3'>
|
||||
<p className='text-muted-foreground text-sm'>Loading copilot</p>
|
||||
|
||||
@@ -10,7 +10,6 @@ interface UseChatHistoryProps {
|
||||
activeWorkflowId: string | null
|
||||
copilotWorkflowId: string | null
|
||||
loadChats: (forceRefresh: boolean) => Promise<void>
|
||||
areChatsFresh: (workflowId: string) => boolean
|
||||
isSendingMessage: boolean
|
||||
}
|
||||
|
||||
@@ -21,8 +20,7 @@ interface UseChatHistoryProps {
|
||||
* @returns Chat history utilities
|
||||
*/
|
||||
export function useChatHistory(props: UseChatHistoryProps) {
|
||||
const { chats, activeWorkflowId, copilotWorkflowId, loadChats, areChatsFresh, isSendingMessage } =
|
||||
props
|
||||
const { chats, activeWorkflowId, copilotWorkflowId, loadChats, isSendingMessage } = props
|
||||
|
||||
/** Groups chats by time period (Today, Yesterday, This Week, etc.) */
|
||||
const groupedChats = useMemo(() => {
|
||||
@@ -80,7 +78,7 @@ export function useChatHistory(props: UseChatHistoryProps) {
|
||||
/** Handles history dropdown opening and loads chats if needed (non-blocking) */
|
||||
const handleHistoryDropdownOpen = useCallback(
|
||||
(open: boolean) => {
|
||||
if (open && activeWorkflowId && !isSendingMessage && !areChatsFresh(activeWorkflowId)) {
|
||||
if (open && activeWorkflowId && !isSendingMessage) {
|
||||
loadChats(false).catch((error) => {
|
||||
logger.error('Failed to load chat history:', error)
|
||||
})
|
||||
@@ -90,7 +88,7 @@ export function useChatHistory(props: UseChatHistoryProps) {
|
||||
logger.info('Chat history opened during stream - showing cached data only')
|
||||
}
|
||||
},
|
||||
[activeWorkflowId, areChatsFresh, isSendingMessage, loadChats]
|
||||
[activeWorkflowId, isSendingMessage, loadChats]
|
||||
)
|
||||
|
||||
return {
|
||||
|
||||
@@ -14,6 +14,7 @@ interface UseCopilotInitializationProps {
|
||||
loadAutoAllowedTools: () => Promise<void>
|
||||
currentChat: any
|
||||
isSendingMessage: boolean
|
||||
resumeActiveStream: () => Promise<boolean>
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -32,11 +33,13 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) {
|
||||
loadAutoAllowedTools,
|
||||
currentChat,
|
||||
isSendingMessage,
|
||||
resumeActiveStream,
|
||||
} = props
|
||||
|
||||
const [isInitialized, setIsInitialized] = useState(false)
|
||||
const lastWorkflowIdRef = useRef<string | null>(null)
|
||||
const hasMountedRef = useRef(false)
|
||||
const hasResumedRef = useRef(false)
|
||||
|
||||
/** Initialize on mount - loads chats if needed. Never loads during streaming */
|
||||
useEffect(() => {
|
||||
@@ -105,6 +108,16 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) {
|
||||
isSendingMessage,
|
||||
])
|
||||
|
||||
/** Try to resume active stream on mount - runs early, before waiting for chats */
|
||||
useEffect(() => {
|
||||
if (hasResumedRef.current || isSendingMessage) return
|
||||
hasResumedRef.current = true
|
||||
// Resume immediately on mount - don't wait for isInitialized
|
||||
resumeActiveStream().catch((err) => {
|
||||
logger.warn('[Copilot] Failed to resume active stream', err)
|
||||
})
|
||||
}, [isSendingMessage, resumeActiveStream])
|
||||
|
||||
/** Load auto-allowed tools once on mount - runs immediately, independent of workflow */
|
||||
const hasLoadedAutoAllowedToolsRef = useRef(false)
|
||||
useEffect(() => {
|
||||
|
||||
@@ -24,6 +24,7 @@ export { ResponseFormat } from './response/response-format'
|
||||
export { ScheduleInfo } from './schedule-info/schedule-info'
|
||||
export { SheetSelectorInput } from './sheet-selector/sheet-selector-input'
|
||||
export { ShortInput } from './short-input/short-input'
|
||||
export { SkillInput } from './skill-input/skill-input'
|
||||
export { SlackSelectorInput } from './slack-selector/slack-selector-input'
|
||||
export { SliderInput } from './slider-input/slider-input'
|
||||
export { InputFormat } from './starter/input-format'
|
||||
|
||||
@@ -0,0 +1,181 @@
|
||||
'use client'
|
||||
|
||||
import { useCallback, useMemo, useState } from 'react'
|
||||
import { Plus, XIcon } from 'lucide-react'
|
||||
import { useParams } from 'next/navigation'
|
||||
import { Combobox, type ComboboxOptionGroup } from '@/components/emcn'
|
||||
import { AgentSkillsIcon } from '@/components/icons'
|
||||
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/hooks/use-sub-block-value'
|
||||
import { SkillModal } from '@/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/skills/components/skill-modal'
|
||||
import type { SkillDefinition } from '@/hooks/queries/skills'
|
||||
import { useSkills } from '@/hooks/queries/skills'
|
||||
import { usePermissionConfig } from '@/hooks/use-permission-config'
|
||||
|
||||
interface StoredSkill {
|
||||
skillId: string
|
||||
name?: string
|
||||
}
|
||||
|
||||
interface SkillInputProps {
|
||||
blockId: string
|
||||
subBlockId: string
|
||||
isPreview?: boolean
|
||||
previewValue?: unknown
|
||||
disabled?: boolean
|
||||
}
|
||||
|
||||
export function SkillInput({
|
||||
blockId,
|
||||
subBlockId,
|
||||
isPreview,
|
||||
previewValue,
|
||||
disabled,
|
||||
}: SkillInputProps) {
|
||||
const params = useParams()
|
||||
const workspaceId = params.workspaceId as string
|
||||
|
||||
const { config: permissionConfig } = usePermissionConfig()
|
||||
const { data: workspaceSkills = [] } = useSkills(workspaceId)
|
||||
const [value, setValue] = useSubBlockValue<StoredSkill[]>(blockId, subBlockId)
|
||||
const [showCreateModal, setShowCreateModal] = useState(false)
|
||||
const [editingSkill, setEditingSkill] = useState<SkillDefinition | null>(null)
|
||||
const [open, setOpen] = useState(false)
|
||||
|
||||
const selectedSkills: StoredSkill[] = useMemo(() => {
|
||||
if (isPreview && previewValue) {
|
||||
return Array.isArray(previewValue) ? previewValue : []
|
||||
}
|
||||
return Array.isArray(value) ? value : []
|
||||
}, [isPreview, previewValue, value])
|
||||
|
||||
const selectedIds = useMemo(() => new Set(selectedSkills.map((s) => s.skillId)), [selectedSkills])
|
||||
|
||||
const skillsDisabled = permissionConfig.disableSkills
|
||||
|
||||
const skillGroups = useMemo((): ComboboxOptionGroup[] => {
|
||||
const groups: ComboboxOptionGroup[] = []
|
||||
|
||||
if (!skillsDisabled) {
|
||||
groups.push({
|
||||
items: [
|
||||
{
|
||||
label: 'Create Skill',
|
||||
value: 'action-create-skill',
|
||||
icon: Plus,
|
||||
onSelect: () => {
|
||||
setShowCreateModal(true)
|
||||
setOpen(false)
|
||||
},
|
||||
disabled: isPreview,
|
||||
},
|
||||
],
|
||||
})
|
||||
}
|
||||
|
||||
const availableSkills = workspaceSkills.filter((s) => !selectedIds.has(s.id))
|
||||
if (!skillsDisabled && availableSkills.length > 0) {
|
||||
groups.push({
|
||||
section: 'Skills',
|
||||
items: availableSkills.map((s) => {
|
||||
return {
|
||||
label: s.name,
|
||||
value: `skill-${s.id}`,
|
||||
icon: AgentSkillsIcon,
|
||||
onSelect: () => {
|
||||
const newSkills: StoredSkill[] = [...selectedSkills, { skillId: s.id, name: s.name }]
|
||||
setValue(newSkills)
|
||||
setOpen(false)
|
||||
},
|
||||
}
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
return groups
|
||||
}, [workspaceSkills, selectedIds, selectedSkills, setValue, isPreview, skillsDisabled])
|
||||
|
||||
const handleRemove = useCallback(
|
||||
(skillId: string) => {
|
||||
const newSkills = selectedSkills.filter((s) => s.skillId !== skillId)
|
||||
setValue(newSkills)
|
||||
},
|
||||
[selectedSkills, setValue]
|
||||
)
|
||||
|
||||
const handleSkillSaved = useCallback(() => {
|
||||
setShowCreateModal(false)
|
||||
setEditingSkill(null)
|
||||
}, [])
|
||||
|
||||
const resolveSkillName = useCallback(
|
||||
(stored: StoredSkill): string => {
|
||||
const found = workspaceSkills.find((s) => s.id === stored.skillId)
|
||||
return found?.name ?? stored.name ?? stored.skillId
|
||||
},
|
||||
[workspaceSkills]
|
||||
)
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className='w-full space-y-[8px]'>
|
||||
<Combobox
|
||||
options={[]}
|
||||
groups={skillGroups}
|
||||
placeholder='Add skill...'
|
||||
disabled={disabled}
|
||||
searchable
|
||||
searchPlaceholder='Search skills...'
|
||||
maxHeight={240}
|
||||
emptyMessage='No skills found'
|
||||
onOpenChange={setOpen}
|
||||
/>
|
||||
|
||||
{selectedSkills.length > 0 && (
|
||||
<div className='flex flex-wrap gap-[4px]'>
|
||||
{selectedSkills.map((stored) => {
|
||||
const fullSkill = workspaceSkills.find((s) => s.id === stored.skillId)
|
||||
return (
|
||||
<div
|
||||
key={stored.skillId}
|
||||
className='flex cursor-pointer items-center gap-[4px] rounded-[4px] border border-[var(--border-1)] bg-[var(--surface-5)] px-[6px] py-[2px] font-medium text-[12px] text-[var(--text-secondary)] hover:bg-[var(--surface-6)]'
|
||||
onClick={() => {
|
||||
if (fullSkill && !disabled && !isPreview) {
|
||||
setEditingSkill(fullSkill)
|
||||
}
|
||||
}}
|
||||
>
|
||||
<AgentSkillsIcon className='h-[10px] w-[10px] text-[var(--text-tertiary)]' />
|
||||
<span className='max-w-[140px] truncate'>{resolveSkillName(stored)}</span>
|
||||
{!disabled && !isPreview && (
|
||||
<button
|
||||
type='button'
|
||||
onClick={(e) => {
|
||||
e.stopPropagation()
|
||||
handleRemove(stored.skillId)
|
||||
}}
|
||||
className='ml-[2px] rounded-[2px] p-[1px] text-[var(--text-tertiary)] hover:bg-[var(--surface-7)] hover:text-[var(--text-secondary)]'
|
||||
>
|
||||
<XIcon className='h-[10px] w-[10px]' />
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<SkillModal
|
||||
open={showCreateModal || !!editingSkill}
|
||||
onOpenChange={(isOpen) => {
|
||||
if (!isOpen) {
|
||||
setShowCreateModal(false)
|
||||
setEditingSkill(null)
|
||||
}
|
||||
}}
|
||||
onSave={handleSkillSaved}
|
||||
initialValues={editingSkill ?? undefined}
|
||||
/>
|
||||
</>
|
||||
)
|
||||
}
|
||||
@@ -32,6 +32,7 @@ import {
|
||||
ScheduleInfo,
|
||||
SheetSelectorInput,
|
||||
ShortInput,
|
||||
SkillInput,
|
||||
SlackSelectorInput,
|
||||
SliderInput,
|
||||
Switch,
|
||||
@@ -687,6 +688,17 @@ function SubBlockComponent({
|
||||
/>
|
||||
)
|
||||
|
||||
case 'skill-input':
|
||||
return (
|
||||
<SkillInput
|
||||
blockId={blockId}
|
||||
subBlockId={config.id}
|
||||
isPreview={isPreview}
|
||||
previewValue={previewValue}
|
||||
disabled={isDisabled}
|
||||
/>
|
||||
)
|
||||
|
||||
case 'checkbox-list':
|
||||
return (
|
||||
<CheckboxList
|
||||
|
||||
@@ -18,7 +18,7 @@ import 'reactflow/dist/style.css'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { useShallow } from 'zustand/react/shallow'
|
||||
import { useSession } from '@/lib/auth/auth-client'
|
||||
import type { OAuthConnectEventDetail } from '@/lib/copilot/tools/client/other/oauth-request-access'
|
||||
import type { OAuthConnectEventDetail } from '@/lib/copilot/tools/client/base-tool'
|
||||
import type { OAuthProvider } from '@/lib/oauth'
|
||||
import { BLOCK_DIMENSIONS, CONTAINER_DIMENSIONS } from '@/lib/workflows/blocks/block-dimensions'
|
||||
import { TriggerUtils } from '@/lib/workflows/triggers/triggers'
|
||||
|
||||
@@ -9,6 +9,7 @@ export { Files as FileUploads } from './files/files'
|
||||
export { General } from './general/general'
|
||||
export { Integrations } from './integrations/integrations'
|
||||
export { MCP } from './mcp/mcp'
|
||||
export { Skills } from './skills/skills'
|
||||
export { Subscription } from './subscription/subscription'
|
||||
export { TeamManagement } from './team-management/team-management'
|
||||
export { WorkflowMcpServers } from './workflow-mcp-servers/workflow-mcp-servers'
|
||||
|
||||
@@ -0,0 +1,201 @@
|
||||
'use client'
|
||||
|
||||
import type { ChangeEvent } from 'react'
|
||||
import { useEffect, useMemo, useState } from 'react'
|
||||
import { useParams } from 'next/navigation'
|
||||
import {
|
||||
Button,
|
||||
Input,
|
||||
Label,
|
||||
Modal,
|
||||
ModalBody,
|
||||
ModalContent,
|
||||
ModalFooter,
|
||||
ModalHeader,
|
||||
Textarea,
|
||||
} from '@/components/emcn'
|
||||
import type { SkillDefinition } from '@/hooks/queries/skills'
|
||||
import { useCreateSkill, useUpdateSkill } from '@/hooks/queries/skills'
|
||||
|
||||
interface SkillModalProps {
|
||||
open: boolean
|
||||
onOpenChange: (open: boolean) => void
|
||||
onSave: () => void
|
||||
onDelete?: (skillId: string) => void
|
||||
initialValues?: SkillDefinition
|
||||
}
|
||||
|
||||
const KEBAB_CASE_REGEX = /^[a-z0-9]+(-[a-z0-9]+)*$/
|
||||
|
||||
export function SkillModal({
|
||||
open,
|
||||
onOpenChange,
|
||||
onSave,
|
||||
onDelete,
|
||||
initialValues,
|
||||
}: SkillModalProps) {
|
||||
const params = useParams()
|
||||
const workspaceId = params.workspaceId as string
|
||||
|
||||
const createSkill = useCreateSkill()
|
||||
const updateSkill = useUpdateSkill()
|
||||
|
||||
const [name, setName] = useState('')
|
||||
const [description, setDescription] = useState('')
|
||||
const [content, setContent] = useState('')
|
||||
const [formError, setFormError] = useState('')
|
||||
const [saving, setSaving] = useState(false)
|
||||
|
||||
useEffect(() => {
|
||||
if (open) {
|
||||
if (initialValues) {
|
||||
setName(initialValues.name)
|
||||
setDescription(initialValues.description)
|
||||
setContent(initialValues.content)
|
||||
} else {
|
||||
setName('')
|
||||
setDescription('')
|
||||
setContent('')
|
||||
}
|
||||
setFormError('')
|
||||
}
|
||||
}, [open, initialValues])
|
||||
|
||||
const hasChanges = useMemo(() => {
|
||||
if (!initialValues) return true
|
||||
return (
|
||||
name !== initialValues.name ||
|
||||
description !== initialValues.description ||
|
||||
content !== initialValues.content
|
||||
)
|
||||
}, [name, description, content, initialValues])
|
||||
|
||||
const handleSave = async () => {
|
||||
if (!name.trim()) {
|
||||
setFormError('Name is required')
|
||||
return
|
||||
}
|
||||
if (name.length > 64) {
|
||||
setFormError('Name must be 64 characters or less')
|
||||
return
|
||||
}
|
||||
if (!KEBAB_CASE_REGEX.test(name)) {
|
||||
setFormError('Name must be kebab-case (e.g. my-skill)')
|
||||
return
|
||||
}
|
||||
if (!description.trim()) {
|
||||
setFormError('Description is required')
|
||||
return
|
||||
}
|
||||
if (!content.trim()) {
|
||||
setFormError('Content is required')
|
||||
return
|
||||
}
|
||||
|
||||
setSaving(true)
|
||||
|
||||
try {
|
||||
if (initialValues) {
|
||||
await updateSkill.mutateAsync({
|
||||
workspaceId,
|
||||
skillId: initialValues.id,
|
||||
updates: { name, description, content },
|
||||
})
|
||||
} else {
|
||||
await createSkill.mutateAsync({
|
||||
workspaceId,
|
||||
skill: { name, description, content },
|
||||
})
|
||||
}
|
||||
onSave()
|
||||
} catch (error) {
|
||||
const message =
|
||||
error instanceof Error && error.message.includes('already exists')
|
||||
? error.message
|
||||
: 'Failed to save skill. Please try again.'
|
||||
setFormError(message)
|
||||
} finally {
|
||||
setSaving(false)
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Modal open={open} onOpenChange={onOpenChange}>
|
||||
<ModalContent size='xl'>
|
||||
<ModalHeader>{initialValues ? 'Edit Skill' : 'Create Skill'}</ModalHeader>
|
||||
<ModalBody>
|
||||
<div className='flex flex-col gap-[16px]'>
|
||||
<div className='flex flex-col gap-[4px]'>
|
||||
<Label htmlFor='skill-name' className='font-medium text-[13px]'>
|
||||
Name
|
||||
</Label>
|
||||
<Input
|
||||
id='skill-name'
|
||||
placeholder='my-skill-name'
|
||||
value={name}
|
||||
onChange={(e) => {
|
||||
setName(e.target.value)
|
||||
if (formError) setFormError('')
|
||||
}}
|
||||
/>
|
||||
<span className='text-[11px] text-[var(--text-muted)]'>
|
||||
Lowercase letters, numbers, and hyphens (e.g. my-skill)
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<div className='flex flex-col gap-[4px]'>
|
||||
<Label htmlFor='skill-description' className='font-medium text-[13px]'>
|
||||
Description
|
||||
</Label>
|
||||
<Input
|
||||
id='skill-description'
|
||||
placeholder='What this skill does and when to use it...'
|
||||
value={description}
|
||||
onChange={(e) => {
|
||||
setDescription(e.target.value)
|
||||
if (formError) setFormError('')
|
||||
}}
|
||||
maxLength={1024}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className='flex flex-col gap-[4px]'>
|
||||
<Label htmlFor='skill-content' className='font-medium text-[13px]'>
|
||||
Content
|
||||
</Label>
|
||||
<Textarea
|
||||
id='skill-content'
|
||||
placeholder='Skill instructions in markdown...'
|
||||
value={content}
|
||||
onChange={(e: ChangeEvent<HTMLTextAreaElement>) => {
|
||||
setContent(e.target.value)
|
||||
if (formError) setFormError('')
|
||||
}}
|
||||
className='min-h-[200px] resize-y font-mono text-[13px]'
|
||||
/>
|
||||
</div>
|
||||
|
||||
{formError && <span className='text-[11px] text-[var(--text-error)]'>{formError}</span>}
|
||||
</div>
|
||||
</ModalBody>
|
||||
<ModalFooter className='items-center justify-between'>
|
||||
{initialValues && onDelete ? (
|
||||
<Button variant='destructive' onClick={() => onDelete(initialValues.id)}>
|
||||
Delete
|
||||
</Button>
|
||||
) : (
|
||||
<div />
|
||||
)}
|
||||
<div className='flex gap-2'>
|
||||
<Button variant='default' onClick={() => onOpenChange(false)}>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button variant='tertiary' onClick={handleSave} disabled={saving || !hasChanges}>
|
||||
{saving ? 'Saving...' : initialValues ? 'Update' : 'Create'}
|
||||
</Button>
|
||||
</div>
|
||||
</ModalFooter>
|
||||
</ModalContent>
|
||||
</Modal>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,219 @@
|
||||
'use client'
|
||||
|
||||
import { useState } from 'react'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { Plus, Search } from 'lucide-react'
|
||||
import { useParams } from 'next/navigation'
|
||||
import {
|
||||
Button,
|
||||
Input,
|
||||
Modal,
|
||||
ModalBody,
|
||||
ModalContent,
|
||||
ModalFooter,
|
||||
ModalHeader,
|
||||
} from '@/components/emcn'
|
||||
import { Skeleton } from '@/components/ui'
|
||||
import { cn } from '@/lib/core/utils/cn'
|
||||
import { SkillModal } from '@/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/skills/components/skill-modal'
|
||||
import type { SkillDefinition } from '@/hooks/queries/skills'
|
||||
import { useDeleteSkill, useSkills } from '@/hooks/queries/skills'
|
||||
|
||||
const logger = createLogger('SkillsSettings')
|
||||
|
||||
function SkillSkeleton() {
|
||||
return (
|
||||
<div className='flex items-center justify-between gap-[12px]'>
|
||||
<div className='flex min-w-0 flex-col justify-center gap-[1px]'>
|
||||
<Skeleton className='h-[14px] w-[100px]' />
|
||||
<Skeleton className='h-[13px] w-[200px]' />
|
||||
</div>
|
||||
<div className='flex flex-shrink-0 items-center gap-[8px]'>
|
||||
<Skeleton className='h-[30px] w-[40px] rounded-[4px]' />
|
||||
<Skeleton className='h-[30px] w-[54px] rounded-[4px]' />
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export function Skills() {
|
||||
const params = useParams()
|
||||
const workspaceId = params.workspaceId as string
|
||||
|
||||
const { data: skills = [], isLoading, error, refetch: refetchSkills } = useSkills(workspaceId)
|
||||
const deleteSkillMutation = useDeleteSkill()
|
||||
|
||||
const [searchTerm, setSearchTerm] = useState('')
|
||||
const [deletingSkills, setDeletingSkills] = useState<Set<string>>(new Set())
|
||||
const [editingSkill, setEditingSkill] = useState<SkillDefinition | null>(null)
|
||||
const [showAddForm, setShowAddForm] = useState(false)
|
||||
const [skillToDelete, setSkillToDelete] = useState<{ id: string; name: string } | null>(null)
|
||||
const [showDeleteDialog, setShowDeleteDialog] = useState(false)
|
||||
|
||||
const filteredSkills = skills.filter((s) => {
|
||||
if (!searchTerm.trim()) return true
|
||||
const searchLower = searchTerm.toLowerCase()
|
||||
return (
|
||||
s.name.toLowerCase().includes(searchLower) ||
|
||||
s.description.toLowerCase().includes(searchLower)
|
||||
)
|
||||
})
|
||||
|
||||
const handleDeleteClick = (skillId: string) => {
|
||||
const s = skills.find((sk) => sk.id === skillId)
|
||||
if (!s) return
|
||||
|
||||
setSkillToDelete({ id: skillId, name: s.name })
|
||||
setShowDeleteDialog(true)
|
||||
}
|
||||
|
||||
const handleDeleteSkill = async () => {
|
||||
if (!skillToDelete) return
|
||||
|
||||
setDeletingSkills((prev) => new Set(prev).add(skillToDelete.id))
|
||||
setShowDeleteDialog(false)
|
||||
|
||||
try {
|
||||
await deleteSkillMutation.mutateAsync({
|
||||
workspaceId,
|
||||
skillId: skillToDelete.id,
|
||||
})
|
||||
logger.info(`Deleted skill: ${skillToDelete.id}`)
|
||||
} catch (error) {
|
||||
logger.error('Error deleting skill:', error)
|
||||
} finally {
|
||||
setDeletingSkills((prev) => {
|
||||
const next = new Set(prev)
|
||||
next.delete(skillToDelete.id)
|
||||
return next
|
||||
})
|
||||
setSkillToDelete(null)
|
||||
}
|
||||
}
|
||||
|
||||
const handleSkillSaved = () => {
|
||||
setShowAddForm(false)
|
||||
setEditingSkill(null)
|
||||
refetchSkills()
|
||||
}
|
||||
|
||||
const hasSkills = skills && skills.length > 0
|
||||
const showEmptyState = !hasSkills && !showAddForm && !editingSkill
|
||||
const showNoResults = searchTerm.trim() && filteredSkills.length === 0 && skills.length > 0
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className='flex h-full flex-col gap-[16px]'>
|
||||
<div className='flex items-center gap-[8px]'>
|
||||
<div
|
||||
className={cn(
|
||||
'flex flex-1 items-center gap-[8px] rounded-[8px] border border-[var(--border)] bg-transparent px-[8px] py-[5px] transition-colors duration-100 dark:bg-[var(--surface-4)] dark:hover:border-[var(--border-1)] dark:hover:bg-[var(--surface-5)]',
|
||||
isLoading && 'opacity-50'
|
||||
)}
|
||||
>
|
||||
<Search
|
||||
className='h-[14px] w-[14px] flex-shrink-0 text-[var(--text-tertiary)]'
|
||||
strokeWidth={2}
|
||||
/>
|
||||
<Input
|
||||
placeholder='Search skills...'
|
||||
value={searchTerm}
|
||||
onChange={(e) => setSearchTerm(e.target.value)}
|
||||
disabled={isLoading}
|
||||
className='h-auto flex-1 border-0 bg-transparent p-0 font-base leading-none placeholder:text-[var(--text-tertiary)] focus-visible:ring-0 focus-visible:ring-offset-0 disabled:cursor-not-allowed disabled:opacity-100'
|
||||
/>
|
||||
</div>
|
||||
<Button onClick={() => setShowAddForm(true)} disabled={isLoading} variant='tertiary'>
|
||||
<Plus className='mr-[6px] h-[13px] w-[13px]' />
|
||||
Add
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
<div className='min-h-0 flex-1 overflow-y-auto'>
|
||||
{error ? (
|
||||
<div className='flex h-full flex-col items-center justify-center gap-[8px]'>
|
||||
<p className='text-[#DC2626] text-[11px] leading-tight dark:text-[#F87171]'>
|
||||
{error instanceof Error ? error.message : 'Failed to load skills'}
|
||||
</p>
|
||||
</div>
|
||||
) : isLoading ? (
|
||||
<div className='flex flex-col gap-[8px]'>
|
||||
<SkillSkeleton />
|
||||
<SkillSkeleton />
|
||||
<SkillSkeleton />
|
||||
</div>
|
||||
) : showEmptyState ? (
|
||||
<div className='flex h-full items-center justify-center text-[13px] text-[var(--text-muted)]'>
|
||||
Click "Add" above to get started
|
||||
</div>
|
||||
) : (
|
||||
<div className='flex flex-col gap-[8px]'>
|
||||
{filteredSkills.map((s) => (
|
||||
<div key={s.id} className='flex items-center justify-between gap-[12px]'>
|
||||
<div className='flex min-w-0 flex-col justify-center gap-[1px]'>
|
||||
<span className='truncate font-medium text-[14px]'>{s.name}</span>
|
||||
<p className='truncate text-[13px] text-[var(--text-muted)]'>{s.description}</p>
|
||||
</div>
|
||||
<div className='flex flex-shrink-0 items-center gap-[8px]'>
|
||||
<Button variant='default' onClick={() => setEditingSkill(s)}>
|
||||
Edit
|
||||
</Button>
|
||||
<Button
|
||||
variant='ghost'
|
||||
onClick={() => handleDeleteClick(s.id)}
|
||||
disabled={deletingSkills.has(s.id)}
|
||||
>
|
||||
{deletingSkills.has(s.id) ? 'Deleting...' : 'Delete'}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
{showNoResults && (
|
||||
<div className='py-[16px] text-center text-[13px] text-[var(--text-muted)]'>
|
||||
No skills found matching "{searchTerm}"
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<SkillModal
|
||||
open={showAddForm || !!editingSkill}
|
||||
onOpenChange={(open) => {
|
||||
if (!open) {
|
||||
setShowAddForm(false)
|
||||
setEditingSkill(null)
|
||||
}
|
||||
}}
|
||||
onSave={handleSkillSaved}
|
||||
onDelete={(skillId) => {
|
||||
setEditingSkill(null)
|
||||
handleDeleteClick(skillId)
|
||||
}}
|
||||
initialValues={editingSkill ?? undefined}
|
||||
/>
|
||||
|
||||
<Modal open={showDeleteDialog} onOpenChange={setShowDeleteDialog}>
|
||||
<ModalContent size='sm'>
|
||||
<ModalHeader>Delete Skill</ModalHeader>
|
||||
<ModalBody>
|
||||
<p className='text-[12px] text-[var(--text-secondary)]'>
|
||||
Are you sure you want to delete{' '}
|
||||
<span className='font-medium text-[var(--text-primary)]'>{skillToDelete?.name}</span>?{' '}
|
||||
<span className='text-[var(--text-error)]'>This action cannot be undone.</span>
|
||||
</p>
|
||||
</ModalBody>
|
||||
<ModalFooter>
|
||||
<Button variant='default' onClick={() => setShowDeleteDialog(false)}>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button variant='destructive' onClick={handleDeleteSkill}>
|
||||
Delete
|
||||
</Button>
|
||||
</ModalFooter>
|
||||
</ModalContent>
|
||||
</Modal>
|
||||
</>
|
||||
)
|
||||
}
|
||||
@@ -34,7 +34,7 @@ import {
|
||||
SModalSidebarSection,
|
||||
SModalSidebarSectionTitle,
|
||||
} from '@/components/emcn'
|
||||
import { McpIcon } from '@/components/icons'
|
||||
import { AgentSkillsIcon, McpIcon } from '@/components/icons'
|
||||
import { useSession } from '@/lib/auth/auth-client'
|
||||
import { getSubscriptionStatus } from '@/lib/billing/client'
|
||||
import { getEnv, isTruthy } from '@/lib/core/config/env'
|
||||
@@ -52,6 +52,7 @@ import {
|
||||
General,
|
||||
Integrations,
|
||||
MCP,
|
||||
Skills,
|
||||
Subscription,
|
||||
TeamManagement,
|
||||
WorkflowMcpServers,
|
||||
@@ -93,6 +94,7 @@ type SettingsSection =
|
||||
| 'copilot'
|
||||
| 'mcp'
|
||||
| 'custom-tools'
|
||||
| 'skills'
|
||||
| 'workflow-mcp-servers'
|
||||
| 'debug'
|
||||
|
||||
@@ -156,6 +158,7 @@ const allNavigationItems: NavigationItem[] = [
|
||||
},
|
||||
{ id: 'integrations', label: 'Integrations', icon: Connections, section: 'tools' },
|
||||
{ id: 'custom-tools', label: 'Custom Tools', icon: Wrench, section: 'tools' },
|
||||
{ id: 'skills', label: 'Skills', icon: AgentSkillsIcon, section: 'tools' },
|
||||
{ id: 'mcp', label: 'MCP Tools', icon: McpIcon, section: 'tools' },
|
||||
{ id: 'environment', label: 'Environment', icon: FolderCode, section: 'system' },
|
||||
{ id: 'apikeys', label: 'API Keys', icon: Key, section: 'system' },
|
||||
@@ -265,6 +268,9 @@ export function SettingsModal({ open, onOpenChange }: SettingsModalProps) {
|
||||
if (item.id === 'custom-tools' && permissionConfig.disableCustomTools) {
|
||||
return false
|
||||
}
|
||||
if (item.id === 'skills' && permissionConfig.disableSkills) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Self-hosted override allows showing the item when not on hosted
|
||||
if (item.selfHostedOverride && !isHosted) {
|
||||
@@ -556,6 +562,7 @@ export function SettingsModal({ open, onOpenChange }: SettingsModalProps) {
|
||||
{effectiveActiveSection === 'copilot' && <Copilot />}
|
||||
{effectiveActiveSection === 'mcp' && <MCP initialServerId={pendingMcpServerId} />}
|
||||
{effectiveActiveSection === 'custom-tools' && <CustomTools />}
|
||||
{effectiveActiveSection === 'skills' && <Skills />}
|
||||
{effectiveActiveSection === 'workflow-mcp-servers' && <WorkflowMcpServers />}
|
||||
{effectiveActiveSection === 'debug' && <Debug />}
|
||||
</SModalMainBody>
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
|
||||
// Use the real registry module, not the global mock from vitest.setup.ts
|
||||
vi.unmock('@/blocks/registry')
|
||||
|
||||
import { generateRouterPrompt } from '@/blocks/blocks/router'
|
||||
@@ -15,7 +14,7 @@ import {
|
||||
} from '@/blocks/registry'
|
||||
import { AuthMode } from '@/blocks/types'
|
||||
|
||||
describe('Blocks Module', () => {
|
||||
describe.concurrent('Blocks Module', () => {
|
||||
describe('Registry', () => {
|
||||
it('should have a non-empty registry of blocks', () => {
|
||||
expect(Object.keys(registry).length).toBeGreaterThan(0)
|
||||
@@ -409,6 +408,7 @@ describe('Blocks Module', () => {
|
||||
'workflow-input-mapper',
|
||||
'text',
|
||||
'router-input',
|
||||
'skill-input',
|
||||
]
|
||||
|
||||
const blocks = getAllBlocks()
|
||||
|
||||
@@ -407,6 +407,12 @@ Return ONLY the JSON array.`,
|
||||
type: 'tool-input',
|
||||
defaultValue: [],
|
||||
},
|
||||
{
|
||||
id: 'skills',
|
||||
title: 'Skills',
|
||||
type: 'skill-input',
|
||||
defaultValue: [],
|
||||
},
|
||||
{
|
||||
id: 'apiKey',
|
||||
title: 'API Key',
|
||||
@@ -769,6 +775,7 @@ Example 3 (Array Input):
|
||||
description: 'Thinking level for models with extended thinking (Anthropic Claude, Gemini 3)',
|
||||
},
|
||||
tools: { type: 'json', description: 'Available tools configuration' },
|
||||
skills: { type: 'json', description: 'Selected skills configuration' },
|
||||
},
|
||||
outputs: {
|
||||
content: { type: 'string', description: 'Generated response content' },
|
||||
|
||||
102
apps/sim/blocks/blocks/airweave.ts
Normal file
102
apps/sim/blocks/blocks/airweave.ts
Normal file
@@ -0,0 +1,102 @@
|
||||
import { AirweaveIcon } from '@/components/icons'
|
||||
import type { BlockConfig } from '@/blocks/types'
|
||||
import { AuthMode } from '@/blocks/types'
|
||||
import type { AirweaveSearchResponse } from '@/tools/airweave/types'
|
||||
|
||||
export const AirweaveBlock: BlockConfig<AirweaveSearchResponse> = {
|
||||
type: 'airweave',
|
||||
name: 'Airweave',
|
||||
description: 'Search your synced data collections',
|
||||
authMode: AuthMode.ApiKey,
|
||||
longDescription:
|
||||
'Search across your synced data sources using Airweave. Supports semantic search with hybrid, neural, or keyword retrieval strategies. Optionally generate AI-powered answers from search results.',
|
||||
docsLink: 'https://docs.airweave.ai',
|
||||
category: 'tools',
|
||||
bgColor: '#6366F1',
|
||||
icon: AirweaveIcon,
|
||||
subBlocks: [
|
||||
{
|
||||
id: 'collectionId',
|
||||
title: 'Collection ID',
|
||||
type: 'short-input',
|
||||
placeholder: 'Enter your collection readable ID...',
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
id: 'query',
|
||||
title: 'Search Query',
|
||||
type: 'long-input',
|
||||
placeholder: 'Enter your search query...',
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
id: 'limit',
|
||||
title: 'Max Results',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: '10', id: '10' },
|
||||
{ label: '25', id: '25' },
|
||||
{ label: '50', id: '50' },
|
||||
{ label: '100', id: '100' },
|
||||
],
|
||||
value: () => '25',
|
||||
},
|
||||
{
|
||||
id: 'retrievalStrategy',
|
||||
title: 'Retrieval Strategy',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'Hybrid (Default)', id: 'hybrid' },
|
||||
{ label: 'Neural', id: 'neural' },
|
||||
{ label: 'Keyword', id: 'keyword' },
|
||||
],
|
||||
value: () => 'hybrid',
|
||||
},
|
||||
{
|
||||
id: 'expandQuery',
|
||||
title: 'Expand Query',
|
||||
type: 'switch',
|
||||
description: 'Generate query variations to improve recall',
|
||||
},
|
||||
{
|
||||
id: 'rerank',
|
||||
title: 'Rerank Results',
|
||||
type: 'switch',
|
||||
description: 'Reorder results for improved relevance using LLM',
|
||||
},
|
||||
{
|
||||
id: 'generateAnswer',
|
||||
title: 'Generate Answer',
|
||||
type: 'switch',
|
||||
description: 'Generate a natural-language answer from results',
|
||||
},
|
||||
{
|
||||
id: 'apiKey',
|
||||
title: 'API Key',
|
||||
type: 'short-input',
|
||||
placeholder: 'Enter your Airweave API key',
|
||||
password: true,
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
tools: {
|
||||
access: ['airweave_search'],
|
||||
},
|
||||
inputs: {
|
||||
collectionId: { type: 'string', description: 'Airweave collection readable ID' },
|
||||
query: { type: 'string', description: 'Search query text' },
|
||||
apiKey: { type: 'string', description: 'Airweave API key' },
|
||||
limit: { type: 'number', description: 'Maximum number of results' },
|
||||
retrievalStrategy: {
|
||||
type: 'string',
|
||||
description: 'Retrieval strategy (hybrid/neural/keyword)',
|
||||
},
|
||||
expandQuery: { type: 'boolean', description: 'Generate query variations' },
|
||||
rerank: { type: 'boolean', description: 'Rerank results with LLM' },
|
||||
generateAnswer: { type: 'boolean', description: 'Generate AI answer' },
|
||||
},
|
||||
outputs: {
|
||||
results: { type: 'json', description: 'Search results with content and metadata' },
|
||||
completion: { type: 'string', description: 'AI-generated answer (when enabled)' },
|
||||
},
|
||||
}
|
||||
@@ -2,6 +2,7 @@ import { A2ABlock } from '@/blocks/blocks/a2a'
|
||||
import { AgentBlock } from '@/blocks/blocks/agent'
|
||||
import { AhrefsBlock } from '@/blocks/blocks/ahrefs'
|
||||
import { AirtableBlock } from '@/blocks/blocks/airtable'
|
||||
import { AirweaveBlock } from '@/blocks/blocks/airweave'
|
||||
import { ApiBlock } from '@/blocks/blocks/api'
|
||||
import { ApiTriggerBlock } from '@/blocks/blocks/api_trigger'
|
||||
import { ApifyBlock } from '@/blocks/blocks/apify'
|
||||
@@ -167,6 +168,7 @@ export const registry: Record<string, BlockConfig> = {
|
||||
agent: AgentBlock,
|
||||
ahrefs: AhrefsBlock,
|
||||
airtable: AirtableBlock,
|
||||
airweave: AirweaveBlock,
|
||||
api: ApiBlock,
|
||||
api_trigger: ApiTriggerBlock,
|
||||
apify: ApifyBlock,
|
||||
|
||||
@@ -51,6 +51,7 @@ export type SubBlockType =
|
||||
| 'code' // Code editor
|
||||
| 'switch' // Toggle button
|
||||
| 'tool-input' // Tool configuration
|
||||
| 'skill-input' // Skill selection for agent blocks
|
||||
| 'checkbox-list' // Multiple selection
|
||||
| 'grouped-checkbox-list' // Grouped, scrollable checkbox list with select all
|
||||
| 'condition-input' // Conditional logic
|
||||
|
||||
@@ -1131,6 +1131,32 @@ export function AirtableIcon(props: SVGProps<SVGSVGElement>) {
|
||||
)
|
||||
}
|
||||
|
||||
export function AirweaveIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg
|
||||
{...props}
|
||||
width='143'
|
||||
height='143'
|
||||
viewBox='0 0 143 143'
|
||||
fill='none'
|
||||
xmlns='http://www.w3.org/2000/svg'
|
||||
>
|
||||
<path
|
||||
d='M89.8854 128.872C79.9165 123.339 66.7502 115.146 60.5707 107.642L60.0432 107.018C58.7836 105.5 57.481 104.014 56.1676 102.593C51.9152 97.9641 47.3614 93.7978 42.646 90.2021C40.7405 88.7487 38.7704 87.3492 36.8111 86.0789C35.7991 85.4222 34.8302 84.8193 33.9151 84.2703C31.6221 82.903 28.8338 82.5263 26.2716 83.2476C23.8385 83.9366 21.89 85.5406 20.7596 87.7476C18.5634 92.0323 20.0814 97.3289 24.2046 99.805C27.5204 101.786 30.7608 104.111 33.8398 106.717C34.2381 107.05 34.3996 107.578 34.2596 108.062C33.1292 112.185 31.9989 118.957 31.5682 121.67C30.6424 127.429 33.4737 133.081 38.5982 135.751L38.7812 135.848C41.0204 137 43.6472 136.946 45.8219 135.697C47.9858 134.459 49.353 132.231 49.4822 129.733C49.536 128.657 49.6006 127.58 49.676 126.59C49.719 126.062 50.042 125.632 50.5264 125.459C50.6772 125.406 50.8494 125.373 51.0001 125.373C51.3554 125.373 51.6784 125.513 51.9475 125.782C56.243 130.185 60.8829 134.169 65.7167 137.625C70.3674 140.951 75.8686 142.706 81.639 142.706C83.7383 142.706 85.8376 142.469 87.8938 141.995L88.1199 141.942C90.9943 141.274 93.029 139.024 93.4488 136.085C93.8687 133.146 92.4476 130.315 89.8747 128.883H89.8639L89.8854 128.872Z'
|
||||
fill='currentColor'
|
||||
/>
|
||||
<path
|
||||
d='M142.551 58.1747L142.529 58.0563C142.045 55.591 140.118 53.7069 137.598 53.2548C135.112 52.8134 132.754 53.8577 131.484 55.9893L131.408 56.1077C126.704 64.1604 120.061 71.6101 111.653 78.2956C109.446 80.0504 107.293 81.902 105.226 83.8075C103.644 85.2717 101.265 85.53 99.4452 84.4212C97.6474 83.3339 95.8495 82.1389 94.1055 80.8686C90.3268 78.1233 86.6772 74.9475 83.2753 71.4271C81.4989 69.597 79.798 67.6915 78.1939 65.7321C76.0408 63.1161 73.7477 60.5539 71.3685 58.1316C66.3195 52.9857 56.6089 45.9127 53.7453 43.878C53.3792 43.6304 53.1639 43.2428 53.0993 42.8014C53.0455 42.3601 53.1639 41.9509 53.4546 41.6064C55.274 39.4318 56.9965 37.1818 58.5683 34.921C60.2369 32.5311 60.786 29.6028 60.0862 26.8899C59.408 24.2523 57.6424 22.11 55.134 20.8827C50.9139 18.7942 45.8972 20.0968 43.2273 23.9293C40.8373 27.3636 38.0167 30.7332 34.8732 33.9306C34.5718 34.232 34.1304 34.3397 33.7213 34.1889C30.5239 33.1447 27.2296 32.2942 23.9461 31.659C23.7093 31.616 23.354 31.5514 22.9126 31.4975C16.4102 30.5286 10.1123 33.7798 7.21639 39.5717L7.1195 39.7548C6.18289 41.628 6.26902 43.8349 7.32405 45.6651C8.40061 47.5167 10.3277 48.701 12.4592 48.8194C13.4604 48.8732 14.4401 48.9378 15.3659 49.0024C15.7966 49.0347 16.1411 49.2823 16.3025 49.6914C16.4533 50.1112 16.3671 50.5419 16.0657 50.8541C12.147 54.8804 8.60515 59.1974 5.5262 63.6867C1.1446 70.0814 -0.481008 78.2095 1.08 85.9822L1.10154 86.1006C1.70441 89.0719 4.05131 91.2035 7.07644 91.5264C9.98315 91.8386 12.6099 90.3208 13.7619 87.6724L13.8265 87.5109C18.6925 75.8625 26.7559 65.5168 37.7907 56.7536C38.3182 56.3445 39.0072 56.28 39.567 56.5922C45.3373 59.768 50.8601 63.902 55.9738 68.8864C56.5982 69.4893 56.6089 70.5013 56.0168 71.1257C53.4761 73.8063 51.0862 76.6054 48.9115 79.469C47.2106 81.7083 47.5335 84.8949 49.6221 86.7358L53.3254 89.9977L53.2824 90.0409C53.8637 90.5576 54.445 91.0744 55.0264 91.5911L55.8123 92.194C56.9319 93.1844 58.3529 93.6365 59.8386 93.4858C61.3027 93.3351 62.67 92.56 63.5635 91.3758C65.1353 89.2873 66.8578 87.2525 68.6556 85.304C68.957 84.9702 69.3661 84.798 69.8075 84.7872C70.2705 84.7872 70.6257 84.9379 70.9164 85.2286C75.8147 90.0624 81.1114 94.3686 86.6772 97.9966C88.8626 99.4176 89.4978 102.26 88.1306 104.477C86.9248 106.448 85.7729 108.493 84.7179 110.539C83.5014 112.918 83.2968 115.738 84.1688 118.257C84.9978 120.68 86.7095 122.585 88.981 123.64C90.2514 124.232 91.5971 124.534 92.9859 124.534C96.5062 124.534 99.682 122.596 101.286 119.452C102.729 116.61 104.419 113.8 106.281 111.131C107.369 109.559 109.36 108.838 111.255 109.322C115.26 110.355 120.643 111.421 124.454 112.143C128.308 112.864 132.119 111.023 133.96 107.578L134.143 107.233C135.521 104.628 135.531 101.506 134.164 98.8901C132.786 96.2526 130.181 94.4655 127.21 94.121C126.478 94.0349 125.778 93.9488 125.11 93.8626C124.97 93.8411 124.852 93.8196 124.744 93.798L123.356 93.4751L124.357 92.4523C124.432 92.377 124.529 92.2801 124.658 92.194C128.771 88.8028 132.571 85.1963 135.962 81.4714C141.668 75.1951 144.122 66.4965 142.518 58.1747H142.529H142.551Z'
|
||||
fill='currentColor'
|
||||
/>
|
||||
<path
|
||||
d='M56.6506 14.3371C65.5861 19.6338 77.4067 27.3743 82.9833 34.1674C83.64 34.9532 84.2967 35.7391 84.9534 36.4927C86.1591 37.8815 86.2991 39.8731 85.2979 41.4233C83.4892 44.2116 81.4115 46.9569 79.1399 49.5945C77.4713 51.5107 77.4067 54.3098 78.9785 56.2476L79.0431 56.323C79.2261 56.5598 79.4306 56.8074 79.6136 57.0442C81.2931 59.1758 83.0801 61.2213 84.9211 63.1375C85.9007 64.1603 87.2249 64.7309 88.6352 64.7309L88.7644 65.5275L88.7429 64.7309C90.207 64.6986 91.6173 64.0526 92.5969 62.933C94.8362 60.4031 96.9247 57.744 98.8302 55.0633C100.133 53.2224 102.63 52.8026 104.525 54.1052C106.463 55.4402 108.465 56.7105 110.457 57.8839C112.793 59.2511 115.614 59.5095 118.165 58.5621C120.749 57.604 122.762 55.5694 123.656 52.9533C125.055 48.9055 123.257 44.2547 119.382 41.9078C116.755 40.3145 114.15 38.5166 111.674 36.5788C110.382 35.5561 109.833 33.8767 110.296 32.2941C111.437 28.3001 112.481 23.1218 113.148 19.4831C113.837 15.7259 112.147 11.8826 108.939 9.94477L108.562 9.72944C105.871 8.12537 102.587 8.00696 99.7668 9.40649C96.9247 10.8168 95.03 13.5405 94.6855 16.6733L94.6639 16.867C94.6209 17.2546 94.384 17.5453 94.018 17.6637C93.652 17.7821 93.2859 17.6852 93.0168 17.4269C89.0012 13.1422 84.738 9.25576 80.3134 5.8646C74.3708 1.31075 66.7811 -0.583999 59.4928 0.675575L59.1805 0.729423C56.1124 1.2677 53.7547 3.60383 53.1949 6.68279C52.6351 9.72946 53.9915 12.7223 56.6722 14.3048H56.6614L56.6506 14.3371Z'
|
||||
fill='currentColor'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function GoogleDocsIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg
|
||||
@@ -5436,3 +5462,24 @@ export function EnrichSoIcon(props: SVGProps<SVGSVGElement>) {
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function AgentSkillsIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg
|
||||
{...props}
|
||||
xmlns='http://www.w3.org/2000/svg'
|
||||
width='24'
|
||||
height='24'
|
||||
viewBox='0 0 32 32'
|
||||
fill='none'
|
||||
>
|
||||
<path d='M16 0.5L29.4234 8.25V23.75L16 31.5L2.57661 23.75V8.25L16 0.5Z' fill='currentColor' />
|
||||
<path
|
||||
d='M16 6L24.6603 11V21L16 26L7.33975 21V11L16 6Z'
|
||||
fill='currentColor'
|
||||
stroke='var(--background, white)'
|
||||
strokeWidth='3'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -5,10 +5,43 @@ import { CheckCircle, ChevronDown, ChevronRight, Loader2, Settings, XCircle } fr
|
||||
import { Badge } from '@/components/emcn'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { Collapsible, CollapsibleContent, CollapsibleTrigger } from '@/components/ui/collapsible'
|
||||
import type { ToolCallGroup, ToolCallState } from '@/lib/copilot/types'
|
||||
import { cn } from '@/lib/core/utils/cn'
|
||||
import { formatDuration } from '@/lib/core/utils/formatting'
|
||||
|
||||
interface ToolCallState {
|
||||
id: string
|
||||
name: string
|
||||
displayName?: string
|
||||
parameters?: Record<string, unknown>
|
||||
state:
|
||||
| 'detecting'
|
||||
| 'pending'
|
||||
| 'executing'
|
||||
| 'completed'
|
||||
| 'error'
|
||||
| 'rejected'
|
||||
| 'applied'
|
||||
| 'ready_for_review'
|
||||
| 'aborted'
|
||||
| 'skipped'
|
||||
| 'background'
|
||||
startTime?: number
|
||||
endTime?: number
|
||||
duration?: number
|
||||
result?: unknown
|
||||
error?: string
|
||||
progress?: string
|
||||
}
|
||||
|
||||
interface ToolCallGroup {
|
||||
id: string
|
||||
toolCalls: ToolCallState[]
|
||||
status: 'pending' | 'in_progress' | 'completed' | 'error'
|
||||
startTime?: number
|
||||
endTime?: number
|
||||
summary?: string
|
||||
}
|
||||
|
||||
interface ToolCallProps {
|
||||
toolCall: ToolCallState
|
||||
isCompact?: boolean
|
||||
|
||||
@@ -367,6 +367,12 @@ export function AccessControl() {
|
||||
category: 'Tools',
|
||||
configKey: 'disableCustomTools' as const,
|
||||
},
|
||||
{
|
||||
id: 'disable-skills',
|
||||
label: 'Skills',
|
||||
category: 'Tools',
|
||||
configKey: 'disableSkills' as const,
|
||||
},
|
||||
{
|
||||
id: 'hide-trace-spans',
|
||||
label: 'Trace Spans',
|
||||
@@ -950,6 +956,7 @@ export function AccessControl() {
|
||||
!editingConfig?.hideFilesTab &&
|
||||
!editingConfig?.disableMcpTools &&
|
||||
!editingConfig?.disableCustomTools &&
|
||||
!editingConfig?.disableSkills &&
|
||||
!editingConfig?.hideTraceSpans &&
|
||||
!editingConfig?.disableInvitations &&
|
||||
!editingConfig?.hideDeployApi &&
|
||||
@@ -969,6 +976,7 @@ export function AccessControl() {
|
||||
hideFilesTab: allVisible,
|
||||
disableMcpTools: allVisible,
|
||||
disableCustomTools: allVisible,
|
||||
disableSkills: allVisible,
|
||||
hideTraceSpans: allVisible,
|
||||
disableInvitations: allVisible,
|
||||
hideDeployApi: allVisible,
|
||||
@@ -989,6 +997,7 @@ export function AccessControl() {
|
||||
!editingConfig?.hideFilesTab &&
|
||||
!editingConfig?.disableMcpTools &&
|
||||
!editingConfig?.disableCustomTools &&
|
||||
!editingConfig?.disableSkills &&
|
||||
!editingConfig?.hideTraceSpans &&
|
||||
!editingConfig?.disableInvitations &&
|
||||
!editingConfig?.hideDeployApi &&
|
||||
|
||||
@@ -43,6 +43,13 @@ export class CustomToolsNotAllowedError extends Error {
|
||||
}
|
||||
}
|
||||
|
||||
export class SkillsNotAllowedError extends Error {
|
||||
constructor() {
|
||||
super('Skills are not allowed based on your permission group settings')
|
||||
this.name = 'SkillsNotAllowedError'
|
||||
}
|
||||
}
|
||||
|
||||
export class InvitationsNotAllowedError extends Error {
|
||||
constructor() {
|
||||
super('Invitations are not allowed based on your permission group settings')
|
||||
@@ -201,6 +208,26 @@ export async function validateCustomToolsAllowed(
|
||||
}
|
||||
}
|
||||
|
||||
export async function validateSkillsAllowed(
|
||||
userId: string | undefined,
|
||||
ctx?: ExecutionContext
|
||||
): Promise<void> {
|
||||
if (!userId) {
|
||||
return
|
||||
}
|
||||
|
||||
const config = await getPermissionConfig(userId, ctx)
|
||||
|
||||
if (!config) {
|
||||
return
|
||||
}
|
||||
|
||||
if (config.disableSkills) {
|
||||
logger.warn('Skills blocked by permission group', { userId })
|
||||
throw new SkillsNotAllowedError()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates if the user is allowed to send invitations.
|
||||
* Also checks the global feature flag.
|
||||
|
||||
@@ -4,6 +4,7 @@ import { BlockType } from '@/executor/constants'
|
||||
import type { DAG } from '@/executor/dag/builder'
|
||||
import type { EdgeManager } from '@/executor/execution/edge-manager'
|
||||
import { serializePauseSnapshot } from '@/executor/execution/snapshot-serializer'
|
||||
import type { SerializableExecutionState } from '@/executor/execution/types'
|
||||
import type { NodeExecutionOrchestrator } from '@/executor/orchestrators/node'
|
||||
import type {
|
||||
ExecutionContext,
|
||||
@@ -135,6 +136,7 @@ export class ExecutionEngine {
|
||||
success: false,
|
||||
output: this.finalOutput,
|
||||
logs: this.context.blockLogs,
|
||||
executionState: this.getSerializableExecutionState(),
|
||||
metadata: this.context.metadata,
|
||||
status: 'cancelled',
|
||||
}
|
||||
@@ -144,6 +146,7 @@ export class ExecutionEngine {
|
||||
success: true,
|
||||
output: this.finalOutput,
|
||||
logs: this.context.blockLogs,
|
||||
executionState: this.getSerializableExecutionState(),
|
||||
metadata: this.context.metadata,
|
||||
}
|
||||
} catch (error) {
|
||||
@@ -157,6 +160,7 @@ export class ExecutionEngine {
|
||||
success: false,
|
||||
output: this.finalOutput,
|
||||
logs: this.context.blockLogs,
|
||||
executionState: this.getSerializableExecutionState(),
|
||||
metadata: this.context.metadata,
|
||||
status: 'cancelled',
|
||||
}
|
||||
@@ -459,6 +463,7 @@ export class ExecutionEngine {
|
||||
success: true,
|
||||
output: this.collectPauseResponses(),
|
||||
logs: this.context.blockLogs,
|
||||
executionState: this.getSerializableExecutionState(snapshotSeed),
|
||||
metadata: this.context.metadata,
|
||||
status: 'paused',
|
||||
pausePoints,
|
||||
@@ -466,6 +471,24 @@ export class ExecutionEngine {
|
||||
}
|
||||
}
|
||||
|
||||
private getSerializableExecutionState(snapshotSeed?: {
|
||||
snapshot: string
|
||||
}): SerializableExecutionState | undefined {
|
||||
try {
|
||||
const serializedSnapshot =
|
||||
snapshotSeed?.snapshot ?? serializePauseSnapshot(this.context, [], this.dag).snapshot
|
||||
const parsedSnapshot = JSON.parse(serializedSnapshot) as {
|
||||
state?: SerializableExecutionState
|
||||
}
|
||||
return parsedSnapshot.state
|
||||
} catch (error) {
|
||||
logger.warn('Failed to serialize execution state', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
private collectPauseResponses(): NormalizedBlockOutput {
|
||||
const responses = Array.from(this.pausedBlocks.values()).map((pause) => pause.response)
|
||||
|
||||
|
||||
@@ -11,9 +11,15 @@ import {
|
||||
validateCustomToolsAllowed,
|
||||
validateMcpToolsAllowed,
|
||||
validateModelProvider,
|
||||
validateSkillsAllowed,
|
||||
} from '@/ee/access-control/utils/permission-check'
|
||||
import { AGENT, BlockType, DEFAULTS, REFERENCE, stripCustomToolPrefix } from '@/executor/constants'
|
||||
import { memoryService } from '@/executor/handlers/agent/memory'
|
||||
import {
|
||||
buildLoadSkillTool,
|
||||
buildSkillsSystemPromptSection,
|
||||
resolveSkillMetadata,
|
||||
} from '@/executor/handlers/agent/skills-resolver'
|
||||
import type {
|
||||
AgentInputs,
|
||||
Message,
|
||||
@@ -57,8 +63,21 @@ export class AgentBlockHandler implements BlockHandler {
|
||||
|
||||
const providerId = getProviderFromModel(model)
|
||||
const formattedTools = await this.formatTools(ctx, filteredInputs.tools || [])
|
||||
|
||||
// Resolve skill metadata for progressive disclosure
|
||||
const skillInputs = filteredInputs.skills ?? []
|
||||
let skillMetadata: Array<{ name: string; description: string }> = []
|
||||
if (skillInputs.length > 0 && ctx.workspaceId) {
|
||||
await validateSkillsAllowed(ctx.userId, ctx)
|
||||
skillMetadata = await resolveSkillMetadata(skillInputs, ctx.workspaceId)
|
||||
if (skillMetadata.length > 0) {
|
||||
const skillNames = skillMetadata.map((s) => s.name)
|
||||
formattedTools.push(buildLoadSkillTool(skillNames))
|
||||
}
|
||||
}
|
||||
|
||||
const streamingConfig = this.getStreamingConfig(ctx, block)
|
||||
const messages = await this.buildMessages(ctx, filteredInputs)
|
||||
const messages = await this.buildMessages(ctx, filteredInputs, skillMetadata)
|
||||
|
||||
const providerRequest = this.buildProviderRequest({
|
||||
ctx,
|
||||
@@ -723,7 +742,8 @@ export class AgentBlockHandler implements BlockHandler {
|
||||
|
||||
private async buildMessages(
|
||||
ctx: ExecutionContext,
|
||||
inputs: AgentInputs
|
||||
inputs: AgentInputs,
|
||||
skillMetadata: Array<{ name: string; description: string }> = []
|
||||
): Promise<Message[] | undefined> {
|
||||
const messages: Message[] = []
|
||||
const memoryEnabled = inputs.memoryType && inputs.memoryType !== 'none'
|
||||
@@ -803,6 +823,20 @@ export class AgentBlockHandler implements BlockHandler {
|
||||
messages.unshift(...systemMessages)
|
||||
}
|
||||
|
||||
// 8. Inject skill metadata into the system message (progressive disclosure)
|
||||
if (skillMetadata.length > 0) {
|
||||
const skillSection = buildSkillsSystemPromptSection(skillMetadata)
|
||||
const systemIdx = messages.findIndex((m) => m.role === 'system')
|
||||
if (systemIdx >= 0) {
|
||||
messages[systemIdx] = {
|
||||
...messages[systemIdx],
|
||||
content: messages[systemIdx].content + skillSection,
|
||||
}
|
||||
} else {
|
||||
messages.unshift({ role: 'system', content: skillSection.trim() })
|
||||
}
|
||||
}
|
||||
|
||||
return messages.length > 0 ? messages : undefined
|
||||
}
|
||||
|
||||
|
||||
122
apps/sim/executor/handlers/agent/skills-resolver.ts
Normal file
122
apps/sim/executor/handlers/agent/skills-resolver.ts
Normal file
@@ -0,0 +1,122 @@
|
||||
import { db } from '@sim/db'
|
||||
import { skill } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { and, eq, inArray } from 'drizzle-orm'
|
||||
import type { SkillInput } from '@/executor/handlers/agent/types'
|
||||
|
||||
const logger = createLogger('SkillsResolver')
|
||||
|
||||
function escapeXml(str: string): string {
|
||||
return str
|
||||
.replace(/&/g, '&')
|
||||
.replace(/</g, '<')
|
||||
.replace(/>/g, '>')
|
||||
.replace(/"/g, '"')
|
||||
}
|
||||
|
||||
interface SkillMetadata {
|
||||
name: string
|
||||
description: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch skill metadata (name + description) for system prompt injection.
|
||||
* Only returns lightweight data so the LLM knows what skills are available.
|
||||
*/
|
||||
export async function resolveSkillMetadata(
|
||||
skillInputs: SkillInput[],
|
||||
workspaceId: string
|
||||
): Promise<SkillMetadata[]> {
|
||||
if (!skillInputs.length || !workspaceId) return []
|
||||
|
||||
const skillIds = skillInputs.map((s) => s.skillId)
|
||||
|
||||
try {
|
||||
const rows = await db
|
||||
.select({ name: skill.name, description: skill.description })
|
||||
.from(skill)
|
||||
.where(and(eq(skill.workspaceId, workspaceId), inArray(skill.id, skillIds)))
|
||||
|
||||
return rows
|
||||
} catch (error) {
|
||||
logger.error('Failed to resolve skill metadata', { error, skillIds, workspaceId })
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch full skill content for a load_skill tool response.
|
||||
* Called when the LLM decides a skill is relevant and invokes load_skill.
|
||||
*/
|
||||
export async function resolveSkillContent(
|
||||
skillName: string,
|
||||
workspaceId: string
|
||||
): Promise<string | null> {
|
||||
if (!skillName || !workspaceId) return null
|
||||
|
||||
try {
|
||||
const rows = await db
|
||||
.select({ content: skill.content, name: skill.name })
|
||||
.from(skill)
|
||||
.where(and(eq(skill.workspaceId, workspaceId), eq(skill.name, skillName)))
|
||||
.limit(1)
|
||||
|
||||
if (rows.length === 0) {
|
||||
logger.warn('Skill not found', { skillName, workspaceId })
|
||||
return null
|
||||
}
|
||||
|
||||
return rows[0].content
|
||||
} catch (error) {
|
||||
logger.error('Failed to resolve skill content', { error, skillName, workspaceId })
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the system prompt section that lists available skills.
|
||||
* Uses XML format per the agentskills.io integration guide.
|
||||
*/
|
||||
export function buildSkillsSystemPromptSection(skills: SkillMetadata[]): string {
|
||||
if (!skills.length) return ''
|
||||
|
||||
const skillEntries = skills
|
||||
.map(
|
||||
(s) =>
|
||||
` <skill name="${escapeXml(s.name)}">\n <description>${escapeXml(s.description)}</description>\n </skill>`
|
||||
)
|
||||
.join('\n')
|
||||
|
||||
return [
|
||||
'',
|
||||
'You have access to the following skills. Use the load_skill tool to activate a skill when relevant.',
|
||||
'',
|
||||
'<available_skills>',
|
||||
skillEntries,
|
||||
'</available_skills>',
|
||||
].join('\n')
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the load_skill tool definition for injection into the tools array.
|
||||
* Returns a ProviderToolConfig-compatible object so all providers can process it.
|
||||
*/
|
||||
export function buildLoadSkillTool(skillNames: string[]) {
|
||||
return {
|
||||
id: 'load_skill',
|
||||
name: 'load_skill',
|
||||
description: `Load a skill to get specialized instructions. Available skills: ${skillNames.join(', ')}`,
|
||||
params: {},
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
skill_name: {
|
||||
type: 'string',
|
||||
description: 'Name of the skill to load',
|
||||
enum: skillNames,
|
||||
},
|
||||
},
|
||||
required: ['skill_name'],
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,14 @@
|
||||
export interface SkillInput {
|
||||
skillId: string
|
||||
name?: string
|
||||
description?: string
|
||||
}
|
||||
|
||||
export interface AgentInputs {
|
||||
model?: string
|
||||
responseFormat?: string | object
|
||||
tools?: ToolInput[]
|
||||
skills?: SkillInput[]
|
||||
// Legacy inputs (backward compatible)
|
||||
systemPrompt?: string
|
||||
userPrompt?: string | object
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import type { TraceSpan } from '@/lib/logs/types'
|
||||
import type { PermissionGroupConfig } from '@/lib/permission-groups/types'
|
||||
import type { BlockOutput } from '@/blocks/types'
|
||||
import type { SerializableExecutionState } from '@/executor/execution/types'
|
||||
import type { RunFromBlockContext } from '@/executor/utils/run-from-block'
|
||||
import type { SerializedBlock, SerializedWorkflow } from '@/serializer/types'
|
||||
|
||||
@@ -302,6 +303,7 @@ export interface ExecutionResult {
|
||||
output: NormalizedBlockOutput
|
||||
error?: string
|
||||
logs?: BlockLog[]
|
||||
executionState?: SerializableExecutionState
|
||||
metadata?: ExecutionMetadata
|
||||
status?: 'completed' | 'paused' | 'cancelled'
|
||||
pausePoints?: PausePoint[]
|
||||
|
||||
263
apps/sim/hooks/queries/skills.ts
Normal file
263
apps/sim/hooks/queries/skills.ts
Normal file
@@ -0,0 +1,263 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { keepPreviousData, useMutation, useQuery, useQueryClient } from '@tanstack/react-query'
|
||||
|
||||
const logger = createLogger('SkillsQueries')
|
||||
const API_ENDPOINT = '/api/skills'
|
||||
|
||||
export interface SkillDefinition {
|
||||
id: string
|
||||
workspaceId: string | null
|
||||
userId: string | null
|
||||
name: string
|
||||
description: string
|
||||
content: string
|
||||
createdAt: string
|
||||
updatedAt?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Query key factories for skills queries
|
||||
*/
|
||||
export const skillsKeys = {
|
||||
all: ['skills'] as const,
|
||||
lists: () => [...skillsKeys.all, 'list'] as const,
|
||||
list: (workspaceId: string) => [...skillsKeys.lists(), workspaceId] as const,
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch skills for a workspace
|
||||
*/
|
||||
async function fetchSkills(workspaceId: string): Promise<SkillDefinition[]> {
|
||||
const response = await fetch(`${API_ENDPOINT}?workspaceId=${workspaceId}`)
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.json().catch(() => ({}))
|
||||
throw new Error(errorData.error || `Failed to fetch skills: ${response.statusText}`)
|
||||
}
|
||||
|
||||
const { data } = await response.json()
|
||||
|
||||
if (!Array.isArray(data)) {
|
||||
throw new Error('Invalid response format')
|
||||
}
|
||||
|
||||
return data.map((s: Record<string, unknown>) => ({
|
||||
id: s.id as string,
|
||||
workspaceId: (s.workspaceId as string) ?? null,
|
||||
userId: (s.userId as string) ?? null,
|
||||
name: s.name as string,
|
||||
description: s.description as string,
|
||||
content: s.content as string,
|
||||
createdAt: (s.createdAt as string) ?? new Date().toISOString(),
|
||||
updatedAt: s.updatedAt as string | undefined,
|
||||
}))
|
||||
}
|
||||
|
||||
/**
|
||||
* Hook to fetch skills for a workspace
|
||||
*/
|
||||
export function useSkills(workspaceId: string) {
|
||||
return useQuery<SkillDefinition[]>({
|
||||
queryKey: skillsKeys.list(workspaceId),
|
||||
queryFn: () => fetchSkills(workspaceId),
|
||||
enabled: !!workspaceId,
|
||||
staleTime: 60 * 1000,
|
||||
placeholderData: keepPreviousData,
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Create skill mutation
|
||||
*/
|
||||
interface CreateSkillParams {
|
||||
workspaceId: string
|
||||
skill: {
|
||||
name: string
|
||||
description: string
|
||||
content: string
|
||||
}
|
||||
}
|
||||
|
||||
export function useCreateSkill() {
|
||||
const queryClient = useQueryClient()
|
||||
|
||||
return useMutation({
|
||||
mutationFn: async ({ workspaceId, skill: s }: CreateSkillParams) => {
|
||||
logger.info(`Creating skill: ${s.name} in workspace ${workspaceId}`)
|
||||
|
||||
const response = await fetch(API_ENDPOINT, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
skills: [{ name: s.name, description: s.description, content: s.content }],
|
||||
workspaceId,
|
||||
}),
|
||||
})
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(data.error || 'Failed to create skill')
|
||||
}
|
||||
|
||||
if (!data.data || !Array.isArray(data.data)) {
|
||||
throw new Error('Invalid API response: missing skills data')
|
||||
}
|
||||
|
||||
logger.info(`Created skill: ${s.name}`)
|
||||
return data.data
|
||||
},
|
||||
onSuccess: (_data, variables) => {
|
||||
queryClient.invalidateQueries({ queryKey: skillsKeys.list(variables.workspaceId) })
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Update skill mutation
|
||||
*/
|
||||
interface UpdateSkillParams {
|
||||
workspaceId: string
|
||||
skillId: string
|
||||
updates: {
|
||||
name?: string
|
||||
description?: string
|
||||
content?: string
|
||||
}
|
||||
}
|
||||
|
||||
export function useUpdateSkill() {
|
||||
const queryClient = useQueryClient()
|
||||
|
||||
return useMutation({
|
||||
mutationFn: async ({ workspaceId, skillId, updates }: UpdateSkillParams) => {
|
||||
logger.info(`Updating skill: ${skillId} in workspace ${workspaceId}`)
|
||||
|
||||
const currentSkills = queryClient.getQueryData<SkillDefinition[]>(
|
||||
skillsKeys.list(workspaceId)
|
||||
)
|
||||
const currentSkill = currentSkills?.find((s) => s.id === skillId)
|
||||
|
||||
if (!currentSkill) {
|
||||
throw new Error('Skill not found')
|
||||
}
|
||||
|
||||
const response = await fetch(API_ENDPOINT, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
skills: [
|
||||
{
|
||||
id: skillId,
|
||||
name: updates.name ?? currentSkill.name,
|
||||
description: updates.description ?? currentSkill.description,
|
||||
content: updates.content ?? currentSkill.content,
|
||||
},
|
||||
],
|
||||
workspaceId,
|
||||
}),
|
||||
})
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(data.error || 'Failed to update skill')
|
||||
}
|
||||
|
||||
if (!data.data || !Array.isArray(data.data)) {
|
||||
throw new Error('Invalid API response: missing skills data')
|
||||
}
|
||||
|
||||
logger.info(`Updated skill: ${skillId}`)
|
||||
return data.data
|
||||
},
|
||||
onMutate: async ({ workspaceId, skillId, updates }) => {
|
||||
await queryClient.cancelQueries({ queryKey: skillsKeys.list(workspaceId) })
|
||||
|
||||
const previousSkills = queryClient.getQueryData<SkillDefinition[]>(
|
||||
skillsKeys.list(workspaceId)
|
||||
)
|
||||
|
||||
if (previousSkills) {
|
||||
queryClient.setQueryData<SkillDefinition[]>(
|
||||
skillsKeys.list(workspaceId),
|
||||
previousSkills.map((s) =>
|
||||
s.id === skillId
|
||||
? {
|
||||
...s,
|
||||
name: updates.name ?? s.name,
|
||||
description: updates.description ?? s.description,
|
||||
content: updates.content ?? s.content,
|
||||
}
|
||||
: s
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
return { previousSkills }
|
||||
},
|
||||
onError: (_err, variables, context) => {
|
||||
if (context?.previousSkills) {
|
||||
queryClient.setQueryData(skillsKeys.list(variables.workspaceId), context.previousSkills)
|
||||
}
|
||||
},
|
||||
onSettled: (_data, _error, variables) => {
|
||||
queryClient.invalidateQueries({ queryKey: skillsKeys.list(variables.workspaceId) })
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete skill mutation
|
||||
*/
|
||||
interface DeleteSkillParams {
|
||||
workspaceId: string
|
||||
skillId: string
|
||||
}
|
||||
|
||||
export function useDeleteSkill() {
|
||||
const queryClient = useQueryClient()
|
||||
|
||||
return useMutation({
|
||||
mutationFn: async ({ workspaceId, skillId }: DeleteSkillParams) => {
|
||||
logger.info(`Deleting skill: ${skillId}`)
|
||||
|
||||
const response = await fetch(`${API_ENDPOINT}?id=${skillId}&workspaceId=${workspaceId}`, {
|
||||
method: 'DELETE',
|
||||
})
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(data.error || 'Failed to delete skill')
|
||||
}
|
||||
|
||||
logger.info(`Deleted skill: ${skillId}`)
|
||||
return data
|
||||
},
|
||||
onMutate: async ({ workspaceId, skillId }) => {
|
||||
await queryClient.cancelQueries({ queryKey: skillsKeys.list(workspaceId) })
|
||||
|
||||
const previousSkills = queryClient.getQueryData<SkillDefinition[]>(
|
||||
skillsKeys.list(workspaceId)
|
||||
)
|
||||
|
||||
if (previousSkills) {
|
||||
queryClient.setQueryData<SkillDefinition[]>(
|
||||
skillsKeys.list(workspaceId),
|
||||
previousSkills.filter((s) => s.id !== skillId)
|
||||
)
|
||||
}
|
||||
|
||||
return { previousSkills }
|
||||
},
|
||||
onError: (_err, variables, context) => {
|
||||
if (context?.previousSkills) {
|
||||
queryClient.setQueryData(skillsKeys.list(variables.workspaceId), context.previousSkills)
|
||||
}
|
||||
},
|
||||
onSettled: (_data, _error, variables) => {
|
||||
queryClient.invalidateQueries({ queryKey: skillsKeys.list(variables.workspaceId) })
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -1,5 +1,12 @@
|
||||
import { useCallback } from 'react'
|
||||
import { createLogger } from '@sim/logger'
|
||||
|
||||
declare global {
|
||||
interface Window {
|
||||
__skipDiffRecording?: boolean
|
||||
}
|
||||
}
|
||||
|
||||
import type { Edge } from 'reactflow'
|
||||
import { useSession } from '@/lib/auth/auth-client'
|
||||
import { enqueueReplaceWorkflowState } from '@/lib/workflows/operations/socket-operations'
|
||||
@@ -908,7 +915,7 @@ export function useUndoRedo() {
|
||||
|
||||
// Set flag to skip recording during this operation
|
||||
|
||||
;(window as any).__skipDiffRecording = true
|
||||
window.__skipDiffRecording = true
|
||||
try {
|
||||
// Restore baseline state and broadcast to everyone
|
||||
if (baselineSnapshot && activeWorkflowId) {
|
||||
@@ -945,7 +952,7 @@ export function useUndoRedo() {
|
||||
logger.info('Clearing diff UI state')
|
||||
useWorkflowDiffStore.getState().clearDiff({ restoreBaseline: false })
|
||||
} finally {
|
||||
;(window as any).__skipDiffRecording = false
|
||||
window.__skipDiffRecording = false
|
||||
}
|
||||
|
||||
logger.info('Undid apply-diff operation successfully')
|
||||
@@ -965,7 +972,7 @@ export function useUndoRedo() {
|
||||
|
||||
// Set flag to skip recording during this operation
|
||||
|
||||
;(window as any).__skipDiffRecording = true
|
||||
window.__skipDiffRecording = true
|
||||
try {
|
||||
// Apply the before-accept state (with markers for this user)
|
||||
useWorkflowStore.getState().replaceWorkflowState(beforeAccept)
|
||||
@@ -1004,7 +1011,7 @@ export function useUndoRedo() {
|
||||
diffAnalysis: diffAnalysis,
|
||||
})
|
||||
} finally {
|
||||
;(window as any).__skipDiffRecording = false
|
||||
window.__skipDiffRecording = false
|
||||
}
|
||||
|
||||
logger.info('Undid accept-diff operation - restored diff view')
|
||||
@@ -1018,7 +1025,7 @@ export function useUndoRedo() {
|
||||
const { useWorkflowStore } = await import('@/stores/workflows/workflow/store')
|
||||
const { useSubBlockStore } = await import('@/stores/workflows/subblock/store')
|
||||
|
||||
;(window as any).__skipDiffRecording = true
|
||||
window.__skipDiffRecording = true
|
||||
try {
|
||||
// Apply the before-reject state (with markers for this user)
|
||||
useWorkflowStore.getState().replaceWorkflowState(beforeReject)
|
||||
@@ -1055,7 +1062,7 @@ export function useUndoRedo() {
|
||||
diffAnalysis: diffAnalysis,
|
||||
})
|
||||
} finally {
|
||||
;(window as any).__skipDiffRecording = false
|
||||
window.__skipDiffRecording = false
|
||||
}
|
||||
|
||||
logger.info('Undid reject-diff operation - restored diff view')
|
||||
@@ -1526,7 +1533,7 @@ export function useUndoRedo() {
|
||||
|
||||
// Set flag to skip recording during this operation
|
||||
|
||||
;(window as any).__skipDiffRecording = true
|
||||
window.__skipDiffRecording = true
|
||||
try {
|
||||
// Manually apply the proposed state and set up diff store (similar to setProposedChanges but with original baseline)
|
||||
const diffStore = useWorkflowDiffStore.getState()
|
||||
@@ -1567,7 +1574,7 @@ export function useUndoRedo() {
|
||||
diffAnalysis: diffAnalysis,
|
||||
})
|
||||
} finally {
|
||||
;(window as any).__skipDiffRecording = false
|
||||
window.__skipDiffRecording = false
|
||||
}
|
||||
|
||||
logger.info('Redid apply-diff operation')
|
||||
@@ -1583,7 +1590,7 @@ export function useUndoRedo() {
|
||||
|
||||
// Set flag to skip recording during this operation
|
||||
|
||||
;(window as any).__skipDiffRecording = true
|
||||
window.__skipDiffRecording = true
|
||||
try {
|
||||
// Clear diff state FIRST to prevent flash of colors (local UI only)
|
||||
// Use setState directly to ensure synchronous clearing
|
||||
@@ -1621,7 +1628,7 @@ export function useUndoRedo() {
|
||||
operationId: opId,
|
||||
})
|
||||
} finally {
|
||||
;(window as any).__skipDiffRecording = false
|
||||
window.__skipDiffRecording = false
|
||||
}
|
||||
|
||||
logger.info('Redid accept-diff operation - cleared diff view')
|
||||
@@ -1635,7 +1642,7 @@ export function useUndoRedo() {
|
||||
const { useWorkflowStore } = await import('@/stores/workflows/workflow/store')
|
||||
const { useSubBlockStore } = await import('@/stores/workflows/subblock/store')
|
||||
|
||||
;(window as any).__skipDiffRecording = true
|
||||
window.__skipDiffRecording = true
|
||||
try {
|
||||
// Clear diff state FIRST to prevent flash of colors (local UI only)
|
||||
// Use setState directly to ensure synchronous clearing
|
||||
@@ -1673,7 +1680,7 @@ export function useUndoRedo() {
|
||||
operationId: opId,
|
||||
})
|
||||
} finally {
|
||||
;(window as any).__skipDiffRecording = false
|
||||
window.__skipDiffRecording = false
|
||||
}
|
||||
|
||||
logger.info('Redid reject-diff operation - cleared diff view')
|
||||
|
||||
@@ -14,7 +14,7 @@ export type UsageLogCategory = 'model' | 'fixed'
|
||||
/**
|
||||
* Usage log source types
|
||||
*/
|
||||
export type UsageLogSource = 'workflow' | 'wand' | 'copilot'
|
||||
export type UsageLogSource = 'workflow' | 'wand' | 'copilot' | 'mcp_copilot'
|
||||
|
||||
/**
|
||||
* Metadata for 'model' category charges
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { COPILOT_CHAT_API_PATH, COPILOT_CHAT_STREAM_API_PATH } from '@/lib/copilot/constants'
|
||||
import type { CopilotMode, CopilotModelId, CopilotTransportMode } from '@/lib/copilot/models'
|
||||
|
||||
const logger = createLogger('CopilotAPI')
|
||||
@@ -82,6 +83,7 @@ export interface SendMessageRequest {
|
||||
executionId?: string
|
||||
}>
|
||||
commands?: string[]
|
||||
resumeFromEventId?: number
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -120,7 +122,7 @@ export async function sendStreamingMessage(
|
||||
request: SendMessageRequest
|
||||
): Promise<StreamingResponse> {
|
||||
try {
|
||||
const { abortSignal, ...requestBody } = request
|
||||
const { abortSignal, resumeFromEventId, ...requestBody } = request
|
||||
try {
|
||||
const preview = Array.isArray((requestBody as any).contexts)
|
||||
? (requestBody as any).contexts.map((c: any) => ({
|
||||
@@ -136,9 +138,56 @@ export async function sendStreamingMessage(
|
||||
? (requestBody as any).contexts.length
|
||||
: 0,
|
||||
contextsPreview: preview,
|
||||
resumeFromEventId,
|
||||
})
|
||||
} catch {}
|
||||
const response = await fetch('/api/copilot/chat', {
|
||||
} catch (error) {
|
||||
logger.warn('Failed to log streaming message context preview', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
|
||||
const streamId = request.userMessageId
|
||||
if (typeof resumeFromEventId === 'number') {
|
||||
if (!streamId) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'streamId is required to resume a stream',
|
||||
status: 400,
|
||||
}
|
||||
}
|
||||
const url = `${COPILOT_CHAT_STREAM_API_PATH}?streamId=${encodeURIComponent(
|
||||
streamId
|
||||
)}&from=${encodeURIComponent(String(resumeFromEventId))}`
|
||||
const response = await fetch(url, {
|
||||
method: 'GET',
|
||||
signal: abortSignal,
|
||||
credentials: 'include',
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorMessage = await handleApiError(response, 'Failed to resume streaming message')
|
||||
return {
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
status: response.status,
|
||||
}
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'No response body received',
|
||||
status: 500,
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
stream: response.body,
|
||||
}
|
||||
}
|
||||
|
||||
const response = await fetch(COPILOT_CHAT_API_PATH, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ ...requestBody, stream: true }),
|
||||
|
||||
66
apps/sim/lib/copilot/chat-context.ts
Normal file
66
apps/sim/lib/copilot/chat-context.ts
Normal file
@@ -0,0 +1,66 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { CopilotFiles } from '@/lib/uploads'
|
||||
import { createFileContent } from '@/lib/uploads/utils/file-utils'
|
||||
|
||||
const logger = createLogger('CopilotChatContext')
|
||||
|
||||
/**
|
||||
* Build conversation history from stored chat messages.
|
||||
*/
|
||||
export function buildConversationHistory(
|
||||
messages: unknown[],
|
||||
conversationId?: string
|
||||
): { history: unknown[]; conversationId?: string } {
|
||||
const history = Array.isArray(messages) ? messages : []
|
||||
return {
|
||||
history,
|
||||
...(conversationId ? { conversationId } : {}),
|
||||
}
|
||||
}
|
||||
|
||||
export interface FileAttachmentInput {
|
||||
id: string
|
||||
key: string
|
||||
name?: string
|
||||
filename?: string
|
||||
mimeType?: string
|
||||
media_type?: string
|
||||
size: number
|
||||
}
|
||||
|
||||
export interface FileContent {
|
||||
type: string
|
||||
[key: string]: unknown
|
||||
}
|
||||
|
||||
/**
|
||||
* Process file attachments into content for the payload.
|
||||
*/
|
||||
export async function processFileAttachments(
|
||||
fileAttachments: FileAttachmentInput[],
|
||||
userId: string
|
||||
): Promise<FileContent[]> {
|
||||
if (!Array.isArray(fileAttachments) || fileAttachments.length === 0) return []
|
||||
|
||||
const processedFileContents: FileContent[] = []
|
||||
const requestId = `copilot-${userId}-${Date.now()}`
|
||||
const processedAttachments = await CopilotFiles.processCopilotAttachments(
|
||||
fileAttachments as Parameters<typeof CopilotFiles.processCopilotAttachments>[0],
|
||||
requestId
|
||||
)
|
||||
|
||||
for (const { buffer, attachment } of processedAttachments) {
|
||||
const fileContent = createFileContent(buffer, attachment.media_type)
|
||||
if (fileContent) {
|
||||
processedFileContents.push(fileContent as FileContent)
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug('Processed file attachments for payload', {
|
||||
userId,
|
||||
inputCount: fileAttachments.length,
|
||||
outputCount: processedFileContents.length,
|
||||
})
|
||||
|
||||
return processedFileContents
|
||||
}
|
||||
69
apps/sim/lib/copilot/chat-lifecycle.ts
Normal file
69
apps/sim/lib/copilot/chat-lifecycle.ts
Normal file
@@ -0,0 +1,69 @@
|
||||
import { db } from '@sim/db'
|
||||
import { copilotChats } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { and, eq } from 'drizzle-orm'
|
||||
|
||||
const logger = createLogger('CopilotChatLifecycle')
|
||||
|
||||
export interface ChatLoadResult {
|
||||
chatId: string
|
||||
chat: typeof copilotChats.$inferSelect | null
|
||||
conversationHistory: unknown[]
|
||||
isNew: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve or create a copilot chat session.
|
||||
* If chatId is provided, loads the existing chat. Otherwise creates a new one.
|
||||
*/
|
||||
export async function resolveOrCreateChat(params: {
|
||||
chatId?: string
|
||||
userId: string
|
||||
workflowId: string
|
||||
model: string
|
||||
}): Promise<ChatLoadResult> {
|
||||
const { chatId, userId, workflowId, model } = params
|
||||
|
||||
if (chatId) {
|
||||
const [chat] = await db
|
||||
.select()
|
||||
.from(copilotChats)
|
||||
.where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, userId)))
|
||||
.limit(1)
|
||||
|
||||
return {
|
||||
chatId,
|
||||
chat: chat ?? null,
|
||||
conversationHistory: chat && Array.isArray(chat.messages) ? chat.messages : [],
|
||||
isNew: false,
|
||||
}
|
||||
}
|
||||
|
||||
const [newChat] = await db
|
||||
.insert(copilotChats)
|
||||
.values({
|
||||
userId,
|
||||
workflowId,
|
||||
title: null,
|
||||
model,
|
||||
messages: [],
|
||||
})
|
||||
.returning()
|
||||
|
||||
if (!newChat) {
|
||||
logger.warn('Failed to create new copilot chat row', { userId, workflowId })
|
||||
return {
|
||||
chatId: '',
|
||||
chat: null,
|
||||
conversationHistory: [],
|
||||
isNew: true,
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
chatId: newChat.id,
|
||||
chat: newChat,
|
||||
conversationHistory: [],
|
||||
isNew: true,
|
||||
}
|
||||
}
|
||||
237
apps/sim/lib/copilot/chat-payload.ts
Normal file
237
apps/sim/lib/copilot/chat-payload.ts
Normal file
@@ -0,0 +1,237 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { processFileAttachments } from '@/lib/copilot/chat-context'
|
||||
import { getCopilotModel } from '@/lib/copilot/config'
|
||||
import { SIM_AGENT_VERSION } from '@/lib/copilot/constants'
|
||||
import { getCredentialsServerTool } from '@/lib/copilot/tools/server/user/get-credentials'
|
||||
import type { CopilotProviderConfig } from '@/lib/copilot/types'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import { tools } from '@/tools/registry'
|
||||
import { getLatestVersionTools, stripVersionSuffix } from '@/tools/utils'
|
||||
|
||||
const logger = createLogger('CopilotChatPayload')
|
||||
|
||||
export interface BuildPayloadParams {
|
||||
message: string
|
||||
workflowId: string
|
||||
userId: string
|
||||
userMessageId: string
|
||||
mode: string
|
||||
model: string
|
||||
conversationHistory?: unknown[]
|
||||
contexts?: Array<{ type: string; content: string }>
|
||||
fileAttachments?: Array<{ id: string; key: string; size: number; [key: string]: unknown }>
|
||||
commands?: string[]
|
||||
chatId?: string
|
||||
implicitFeedback?: string
|
||||
}
|
||||
|
||||
interface ToolSchema {
|
||||
name: string
|
||||
description: string
|
||||
input_schema: Record<string, unknown>
|
||||
defer_loading?: boolean
|
||||
executeLocally?: boolean
|
||||
oauth?: { required: boolean; provider: string }
|
||||
}
|
||||
|
||||
interface CredentialsPayload {
|
||||
oauth: Record<
|
||||
string,
|
||||
{ accessToken: string; accountId: string; name: string; expiresAt?: string }
|
||||
>
|
||||
apiKeys: string[]
|
||||
metadata?: {
|
||||
connectedOAuth: Array<{ provider: string; name: string; scopes?: string[] }>
|
||||
configuredApiKeys: string[]
|
||||
}
|
||||
}
|
||||
|
||||
type MessageContent = string | Array<{ type: string; text?: string; [key: string]: unknown }>
|
||||
|
||||
interface ConversationMessage {
|
||||
role: string
|
||||
content: MessageContent
|
||||
}
|
||||
|
||||
function buildProviderConfig(selectedModel: string): CopilotProviderConfig | undefined {
|
||||
const defaults = getCopilotModel('chat')
|
||||
const envModel = env.COPILOT_MODEL || defaults.model
|
||||
const providerEnv = env.COPILOT_PROVIDER
|
||||
|
||||
if (!providerEnv) return undefined
|
||||
|
||||
if (providerEnv === 'azure-openai') {
|
||||
return {
|
||||
provider: 'azure-openai',
|
||||
model: envModel,
|
||||
apiKey: env.AZURE_OPENAI_API_KEY,
|
||||
apiVersion: 'preview',
|
||||
endpoint: env.AZURE_OPENAI_ENDPOINT,
|
||||
}
|
||||
}
|
||||
|
||||
if (providerEnv === 'vertex') {
|
||||
return {
|
||||
provider: 'vertex',
|
||||
model: envModel,
|
||||
apiKey: env.COPILOT_API_KEY,
|
||||
vertexProject: env.VERTEX_PROJECT,
|
||||
vertexLocation: env.VERTEX_LOCATION,
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
provider: providerEnv as Exclude<string, 'azure-openai' | 'vertex'>,
|
||||
model: selectedModel,
|
||||
apiKey: env.COPILOT_API_KEY,
|
||||
} as CopilotProviderConfig
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the request payload for the copilot backend.
|
||||
*/
|
||||
export async function buildCopilotRequestPayload(
|
||||
params: BuildPayloadParams,
|
||||
options: {
|
||||
providerConfig?: CopilotProviderConfig
|
||||
selectedModel: string
|
||||
}
|
||||
): Promise<Record<string, unknown>> {
|
||||
const {
|
||||
message,
|
||||
workflowId,
|
||||
userId,
|
||||
userMessageId,
|
||||
mode,
|
||||
conversationHistory = [],
|
||||
contexts,
|
||||
fileAttachments,
|
||||
commands,
|
||||
chatId,
|
||||
implicitFeedback,
|
||||
} = params
|
||||
|
||||
const selectedModel = options.selectedModel
|
||||
const providerConfig = options.providerConfig ?? buildProviderConfig(selectedModel)
|
||||
|
||||
const effectiveMode = mode === 'agent' ? 'build' : mode
|
||||
const transportMode = effectiveMode === 'build' ? 'agent' : effectiveMode
|
||||
|
||||
const processedFileContents = await processFileAttachments(fileAttachments ?? [], userId)
|
||||
|
||||
const messages: ConversationMessage[] = []
|
||||
for (const msg of conversationHistory as Array<Record<string, unknown>>) {
|
||||
const msgAttachments = msg.fileAttachments as Array<Record<string, unknown>> | undefined
|
||||
if (Array.isArray(msgAttachments) && msgAttachments.length > 0) {
|
||||
const content: Array<{ type: string; text?: string; [key: string]: unknown }> = [
|
||||
{ type: 'text', text: msg.content as string },
|
||||
]
|
||||
const processedHistoricalAttachments = await processFileAttachments(
|
||||
(msgAttachments as BuildPayloadParams['fileAttachments']) ?? [],
|
||||
userId
|
||||
)
|
||||
for (const fileContent of processedHistoricalAttachments) {
|
||||
content.push(fileContent)
|
||||
}
|
||||
messages.push({ role: msg.role as string, content })
|
||||
} else {
|
||||
messages.push({ role: msg.role as string, content: msg.content as string })
|
||||
}
|
||||
}
|
||||
|
||||
if (implicitFeedback) {
|
||||
messages.push({ role: 'system', content: implicitFeedback })
|
||||
}
|
||||
|
||||
if (processedFileContents.length > 0) {
|
||||
const content: Array<{ type: string; text?: string; [key: string]: unknown }> = [
|
||||
{ type: 'text', text: message },
|
||||
]
|
||||
for (const fileContent of processedFileContents) {
|
||||
content.push(fileContent)
|
||||
}
|
||||
messages.push({ role: 'user', content })
|
||||
} else {
|
||||
messages.push({ role: 'user', content: message })
|
||||
}
|
||||
|
||||
let integrationTools: ToolSchema[] = []
|
||||
let credentials: CredentialsPayload | null = null
|
||||
|
||||
if (effectiveMode === 'build') {
|
||||
// function_execute sandbox tool is now defined in Go — no need to send it
|
||||
|
||||
try {
|
||||
const rawCredentials = await getCredentialsServerTool.execute({ workflowId }, { userId })
|
||||
|
||||
const oauthMap: CredentialsPayload['oauth'] = {}
|
||||
const connectedOAuth: Array<{ provider: string; name: string; scopes?: string[] }> = []
|
||||
for (const cred of rawCredentials?.oauth?.connected?.credentials ?? []) {
|
||||
if (cred.accessToken) {
|
||||
oauthMap[cred.provider] = {
|
||||
accessToken: cred.accessToken,
|
||||
accountId: cred.id,
|
||||
name: cred.name,
|
||||
}
|
||||
connectedOAuth.push({ provider: cred.provider, name: cred.name })
|
||||
}
|
||||
}
|
||||
|
||||
credentials = {
|
||||
oauth: oauthMap,
|
||||
apiKeys: rawCredentials?.environment?.variableNames ?? [],
|
||||
metadata: {
|
||||
connectedOAuth,
|
||||
configuredApiKeys: rawCredentials?.environment?.variableNames ?? [],
|
||||
},
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Failed to fetch credentials for build payload', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
|
||||
try {
|
||||
const { createUserToolSchema } = await import('@/tools/params')
|
||||
const latestTools = getLatestVersionTools(tools)
|
||||
|
||||
integrationTools = Object.entries(latestTools).map(([toolId, toolConfig]) => {
|
||||
const userSchema = createUserToolSchema(toolConfig)
|
||||
const strippedName = stripVersionSuffix(toolId)
|
||||
return {
|
||||
name: strippedName,
|
||||
description: toolConfig.description || toolConfig.name || strippedName,
|
||||
input_schema: userSchema as unknown as Record<string, unknown>,
|
||||
defer_loading: true,
|
||||
...(toolConfig.oauth?.required && {
|
||||
oauth: {
|
||||
required: true,
|
||||
provider: toolConfig.oauth.provider,
|
||||
},
|
||||
}),
|
||||
}
|
||||
})
|
||||
} catch (error) {
|
||||
logger.warn('Failed to build tool schemas for payload', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
message,
|
||||
workflowId,
|
||||
userId,
|
||||
model: selectedModel,
|
||||
mode: transportMode,
|
||||
messageId: userMessageId,
|
||||
version: SIM_AGENT_VERSION,
|
||||
...(providerConfig ? { provider: providerConfig } : {}),
|
||||
...(contexts && contexts.length > 0 ? { context: contexts } : {}),
|
||||
...(chatId ? { chatId } : {}),
|
||||
...(processedFileContents.length > 0 ? { fileAttachments: processedFileContents } : {}),
|
||||
...(integrationTools.length > 0 ? { integrationTools } : {}),
|
||||
...(credentials ? { credentials } : {}),
|
||||
...(commands && commands.length > 0 ? { commands } : {}),
|
||||
}
|
||||
}
|
||||
147
apps/sim/lib/copilot/client-sse/content-blocks.ts
Normal file
147
apps/sim/lib/copilot/client-sse/content-blocks.ts
Normal file
@@ -0,0 +1,147 @@
|
||||
import type {
|
||||
ChatContext,
|
||||
CopilotMessage,
|
||||
MessageFileAttachment,
|
||||
} from '@/stores/panel/copilot/types'
|
||||
import type { ClientContentBlock, ClientStreamingContext } from './types'
|
||||
|
||||
const TEXT_BLOCK_TYPE = 'text'
|
||||
const THINKING_BLOCK_TYPE = 'thinking'
|
||||
const CONTINUE_OPTIONS_TAG = '<options>{"1":"Continue"}</options>'
|
||||
|
||||
export function createUserMessage(
|
||||
content: string,
|
||||
fileAttachments?: MessageFileAttachment[],
|
||||
contexts?: ChatContext[],
|
||||
messageId?: string
|
||||
): CopilotMessage {
|
||||
return {
|
||||
id: messageId || crypto.randomUUID(),
|
||||
role: 'user',
|
||||
content,
|
||||
timestamp: new Date().toISOString(),
|
||||
...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }),
|
||||
...(contexts && contexts.length > 0 && { contexts }),
|
||||
...(contexts &&
|
||||
contexts.length > 0 && {
|
||||
contentBlocks: [{ type: 'contexts', contexts, timestamp: Date.now() }],
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
export function createStreamingMessage(): CopilotMessage {
|
||||
return {
|
||||
id: crypto.randomUUID(),
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
timestamp: new Date().toISOString(),
|
||||
}
|
||||
}
|
||||
|
||||
export function createErrorMessage(
|
||||
messageId: string,
|
||||
content: string,
|
||||
errorType?: 'usage_limit' | 'unauthorized' | 'forbidden' | 'rate_limit' | 'upgrade_required'
|
||||
): CopilotMessage {
|
||||
return {
|
||||
id: messageId,
|
||||
role: 'assistant',
|
||||
content,
|
||||
timestamp: new Date().toISOString(),
|
||||
contentBlocks: [
|
||||
{
|
||||
type: 'text',
|
||||
content,
|
||||
timestamp: Date.now(),
|
||||
},
|
||||
],
|
||||
errorType,
|
||||
}
|
||||
}
|
||||
|
||||
export function appendTextBlock(context: ClientStreamingContext, text: string) {
|
||||
if (!text) return
|
||||
context.accumulatedContent += text
|
||||
if (context.currentTextBlock && context.contentBlocks.length > 0) {
|
||||
const lastBlock = context.contentBlocks[context.contentBlocks.length - 1]
|
||||
if (lastBlock.type === TEXT_BLOCK_TYPE && lastBlock === context.currentTextBlock) {
|
||||
lastBlock.content += text
|
||||
return
|
||||
}
|
||||
}
|
||||
const newBlock: ClientContentBlock = { type: 'text', content: text, timestamp: Date.now() }
|
||||
context.currentTextBlock = newBlock
|
||||
context.contentBlocks.push(newBlock)
|
||||
}
|
||||
|
||||
export function appendContinueOption(content: string): string {
|
||||
if (/<options>/i.test(content)) return content
|
||||
const suffix = content.trim().length > 0 ? '\n\n' : ''
|
||||
return `${content}${suffix}${CONTINUE_OPTIONS_TAG}`
|
||||
}
|
||||
|
||||
export function appendContinueOptionBlock(blocks: ClientContentBlock[]): ClientContentBlock[] {
|
||||
if (!Array.isArray(blocks)) return blocks
|
||||
const hasOptions = blocks.some(
|
||||
(block) =>
|
||||
block?.type === TEXT_BLOCK_TYPE &&
|
||||
typeof block.content === 'string' &&
|
||||
/<options>/i.test(block.content)
|
||||
)
|
||||
if (hasOptions) return blocks
|
||||
return [
|
||||
...blocks,
|
||||
{
|
||||
type: TEXT_BLOCK_TYPE,
|
||||
content: CONTINUE_OPTIONS_TAG,
|
||||
timestamp: Date.now(),
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
export function stripContinueOption(content: string): string {
|
||||
if (!content || !content.includes(CONTINUE_OPTIONS_TAG)) return content
|
||||
const next = content.replace(CONTINUE_OPTIONS_TAG, '')
|
||||
return next.replace(/\n{2,}\s*$/g, '\n').trimEnd()
|
||||
}
|
||||
|
||||
export function stripContinueOptionFromBlocks(blocks: ClientContentBlock[]): ClientContentBlock[] {
|
||||
if (!Array.isArray(blocks)) return blocks
|
||||
return blocks.flatMap((block) => {
|
||||
if (
|
||||
block?.type === TEXT_BLOCK_TYPE &&
|
||||
typeof block.content === 'string' &&
|
||||
block.content.includes(CONTINUE_OPTIONS_TAG)
|
||||
) {
|
||||
const nextContent = stripContinueOption(block.content)
|
||||
if (!nextContent.trim()) return []
|
||||
return [{ ...block, content: nextContent }]
|
||||
}
|
||||
return [block]
|
||||
})
|
||||
}
|
||||
|
||||
export function beginThinkingBlock(context: ClientStreamingContext) {
|
||||
if (!context.currentThinkingBlock) {
|
||||
const newBlock: ClientContentBlock = {
|
||||
type: 'thinking',
|
||||
content: '',
|
||||
timestamp: Date.now(),
|
||||
startTime: Date.now(),
|
||||
}
|
||||
context.currentThinkingBlock = newBlock
|
||||
context.contentBlocks.push(newBlock)
|
||||
}
|
||||
context.isInThinkingBlock = true
|
||||
context.currentTextBlock = null
|
||||
}
|
||||
|
||||
export function finalizeThinkingBlock(context: ClientStreamingContext) {
|
||||
if (context.currentThinkingBlock) {
|
||||
context.currentThinkingBlock.duration =
|
||||
Date.now() - (context.currentThinkingBlock.startTime || Date.now())
|
||||
}
|
||||
context.isInThinkingBlock = false
|
||||
context.currentThinkingBlock = null
|
||||
context.currentTextBlock = null
|
||||
}
|
||||
752
apps/sim/lib/copilot/client-sse/handlers.ts
Normal file
752
apps/sim/lib/copilot/client-sse/handlers.ts
Normal file
@@ -0,0 +1,752 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { STREAM_STORAGE_KEY } from '@/lib/copilot/constants'
|
||||
import { asRecord } from '@/lib/copilot/orchestrator/sse-utils'
|
||||
import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
|
||||
import {
|
||||
isBackgroundState,
|
||||
isRejectedState,
|
||||
isReviewState,
|
||||
resolveToolDisplay,
|
||||
} from '@/lib/copilot/store-utils'
|
||||
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
|
||||
import type { CopilotStore, CopilotStreamInfo, CopilotToolCall } from '@/stores/panel/copilot/types'
|
||||
import { useWorkflowDiffStore } from '@/stores/workflow-diff/store'
|
||||
import type { WorkflowState } from '@/stores/workflows/workflow/types'
|
||||
import { appendTextBlock, beginThinkingBlock, finalizeThinkingBlock } from './content-blocks'
|
||||
import type { ClientContentBlock, ClientStreamingContext } from './types'
|
||||
|
||||
const logger = createLogger('CopilotClientSseHandlers')
|
||||
const TEXT_BLOCK_TYPE = 'text'
|
||||
const MAX_BATCH_INTERVAL = 50
|
||||
const MIN_BATCH_INTERVAL = 16
|
||||
const MAX_QUEUE_SIZE = 5
|
||||
|
||||
function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void {
|
||||
if (typeof window === 'undefined') return
|
||||
try {
|
||||
if (!info) {
|
||||
window.sessionStorage.removeItem(STREAM_STORAGE_KEY)
|
||||
return
|
||||
}
|
||||
window.sessionStorage.setItem(STREAM_STORAGE_KEY, JSON.stringify(info))
|
||||
} catch (error) {
|
||||
logger.warn('Failed to write active stream to storage', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type StoreSet = (
|
||||
partial: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)
|
||||
) => void
|
||||
|
||||
export type SSEHandler = (
|
||||
data: SSEEvent,
|
||||
context: ClientStreamingContext,
|
||||
get: () => CopilotStore,
|
||||
set: StoreSet
|
||||
) => Promise<void> | void
|
||||
|
||||
const streamingUpdateQueue = new Map<string, ClientStreamingContext>()
|
||||
let streamingUpdateRAF: number | null = null
|
||||
let lastBatchTime = 0
|
||||
|
||||
export function stopStreamingUpdates() {
|
||||
if (streamingUpdateRAF !== null) {
|
||||
cancelAnimationFrame(streamingUpdateRAF)
|
||||
streamingUpdateRAF = null
|
||||
}
|
||||
streamingUpdateQueue.clear()
|
||||
}
|
||||
|
||||
function createOptimizedContentBlocks(contentBlocks: ClientContentBlock[]): ClientContentBlock[] {
|
||||
const result: ClientContentBlock[] = new Array(contentBlocks.length)
|
||||
for (let i = 0; i < contentBlocks.length; i++) {
|
||||
const block = contentBlocks[i]
|
||||
result[i] = { ...block }
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
export function flushStreamingUpdates(set: StoreSet) {
|
||||
if (streamingUpdateRAF !== null) {
|
||||
cancelAnimationFrame(streamingUpdateRAF)
|
||||
streamingUpdateRAF = null
|
||||
}
|
||||
if (streamingUpdateQueue.size === 0) return
|
||||
|
||||
const updates = new Map(streamingUpdateQueue)
|
||||
streamingUpdateQueue.clear()
|
||||
|
||||
set((state: CopilotStore) => {
|
||||
if (updates.size === 0) return state
|
||||
return {
|
||||
messages: state.messages.map((msg) => {
|
||||
const update = updates.get(msg.id)
|
||||
if (update) {
|
||||
return {
|
||||
...msg,
|
||||
content: '',
|
||||
contentBlocks:
|
||||
update.contentBlocks.length > 0
|
||||
? createOptimizedContentBlocks(update.contentBlocks)
|
||||
: [],
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
export function updateStreamingMessage(set: StoreSet, context: ClientStreamingContext) {
|
||||
if (context.suppressStreamingUpdates) return
|
||||
const now = performance.now()
|
||||
streamingUpdateQueue.set(context.messageId, context)
|
||||
const timeSinceLastBatch = now - lastBatchTime
|
||||
const shouldFlushImmediately =
|
||||
streamingUpdateQueue.size >= MAX_QUEUE_SIZE || timeSinceLastBatch > MAX_BATCH_INTERVAL
|
||||
|
||||
if (streamingUpdateRAF === null) {
|
||||
const scheduleUpdate = () => {
|
||||
streamingUpdateRAF = requestAnimationFrame(() => {
|
||||
const updates = new Map(streamingUpdateQueue)
|
||||
streamingUpdateQueue.clear()
|
||||
streamingUpdateRAF = null
|
||||
lastBatchTime = performance.now()
|
||||
set((state: CopilotStore) => {
|
||||
if (updates.size === 0) return state
|
||||
const messages = state.messages
|
||||
const lastMessage = messages[messages.length - 1]
|
||||
const lastMessageUpdate = lastMessage ? updates.get(lastMessage.id) : null
|
||||
if (updates.size === 1 && lastMessageUpdate) {
|
||||
const newMessages = [...messages]
|
||||
newMessages[messages.length - 1] = {
|
||||
...lastMessage,
|
||||
content: '',
|
||||
contentBlocks:
|
||||
lastMessageUpdate.contentBlocks.length > 0
|
||||
? createOptimizedContentBlocks(lastMessageUpdate.contentBlocks)
|
||||
: [],
|
||||
}
|
||||
return { messages: newMessages }
|
||||
}
|
||||
return {
|
||||
messages: messages.map((msg) => {
|
||||
const update = updates.get(msg.id)
|
||||
if (update) {
|
||||
return {
|
||||
...msg,
|
||||
content: '',
|
||||
contentBlocks:
|
||||
update.contentBlocks.length > 0
|
||||
? createOptimizedContentBlocks(update.contentBlocks)
|
||||
: [],
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}),
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
if (shouldFlushImmediately) scheduleUpdate()
|
||||
else setTimeout(scheduleUpdate, Math.max(0, MIN_BATCH_INTERVAL - timeSinceLastBatch))
|
||||
}
|
||||
}
|
||||
|
||||
export function upsertToolCallBlock(context: ClientStreamingContext, toolCall: CopilotToolCall) {
|
||||
let found = false
|
||||
for (let i = 0; i < context.contentBlocks.length; i++) {
|
||||
const b = context.contentBlocks[i]
|
||||
if (b.type === 'tool_call' && b.toolCall?.id === toolCall.id) {
|
||||
context.contentBlocks[i] = { ...b, toolCall }
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
context.contentBlocks.push({ type: 'tool_call', toolCall, timestamp: Date.now() })
|
||||
}
|
||||
}
|
||||
|
||||
function stripThinkingTags(text: string): string {
|
||||
return text.replace(/<\/?thinking[^>]*>/gi, '').replace(/<\/?thinking[^&]*>/gi, '')
|
||||
}
|
||||
|
||||
function appendThinkingContent(context: ClientStreamingContext, text: string) {
|
||||
if (!text) return
|
||||
const cleanedText = stripThinkingTags(text)
|
||||
if (!cleanedText) return
|
||||
if (context.currentThinkingBlock) {
|
||||
context.currentThinkingBlock.content += cleanedText
|
||||
} else {
|
||||
const newBlock: ClientContentBlock = {
|
||||
type: 'thinking',
|
||||
content: cleanedText,
|
||||
timestamp: Date.now(),
|
||||
startTime: Date.now(),
|
||||
}
|
||||
context.currentThinkingBlock = newBlock
|
||||
context.contentBlocks.push(newBlock)
|
||||
}
|
||||
context.isInThinkingBlock = true
|
||||
context.currentTextBlock = null
|
||||
}
|
||||
|
||||
export const sseHandlers: Record<string, SSEHandler> = {
|
||||
chat_id: async (data, context, get, set) => {
|
||||
context.newChatId = data.chatId
|
||||
const { currentChat, activeStream } = get()
|
||||
if (!currentChat && context.newChatId) {
|
||||
await get().handleNewChatCreation(context.newChatId)
|
||||
}
|
||||
if (activeStream && context.newChatId && !activeStream.chatId) {
|
||||
const updatedStream = { ...activeStream, chatId: context.newChatId }
|
||||
set({ activeStream: updatedStream })
|
||||
writeActiveStreamToStorage(updatedStream)
|
||||
}
|
||||
},
|
||||
title_updated: (_data, _context, get, set) => {
|
||||
const title = _data.title
|
||||
if (!title) return
|
||||
const { currentChat, chats } = get()
|
||||
if (currentChat) {
|
||||
set({
|
||||
currentChat: { ...currentChat, title },
|
||||
chats: chats.map((c) => (c.id === currentChat.id ? { ...c, title } : c)),
|
||||
})
|
||||
}
|
||||
},
|
||||
tool_result: (data, context, get, set) => {
|
||||
try {
|
||||
const eventData = asRecord(data?.data)
|
||||
const toolCallId: string | undefined =
|
||||
data?.toolCallId || (eventData.id as string | undefined)
|
||||
const success: boolean | undefined = data?.success
|
||||
const failedDependency: boolean = data?.failedDependency === true
|
||||
const resultObj = asRecord(data?.result)
|
||||
const skipped: boolean = resultObj.skipped === true
|
||||
if (!toolCallId) return
|
||||
const { toolCallsById } = get()
|
||||
const current = toolCallsById[toolCallId]
|
||||
if (current) {
|
||||
if (
|
||||
isRejectedState(current.state) ||
|
||||
isReviewState(current.state) ||
|
||||
isBackgroundState(current.state)
|
||||
) {
|
||||
return
|
||||
}
|
||||
const targetState = success
|
||||
? ClientToolCallState.success
|
||||
: failedDependency || skipped
|
||||
? ClientToolCallState.rejected
|
||||
: ClientToolCallState.error
|
||||
const updatedMap = { ...toolCallsById }
|
||||
updatedMap[toolCallId] = {
|
||||
...current,
|
||||
state: targetState,
|
||||
display: resolveToolDisplay(current.name, targetState, current.id, current.params),
|
||||
}
|
||||
set({ toolCallsById: updatedMap })
|
||||
|
||||
if (targetState === ClientToolCallState.success && current.name === 'checkoff_todo') {
|
||||
try {
|
||||
const result = asRecord(data?.result) || asRecord(eventData.result)
|
||||
const input = asRecord(current.params || current.input)
|
||||
const todoId = (input.id || input.todoId || result.id || result.todoId) as
|
||||
| string
|
||||
| undefined
|
||||
if (todoId) {
|
||||
get().updatePlanTodoStatus(todoId, 'completed')
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Failed to process checkoff_todo tool result', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
toolCallId,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
targetState === ClientToolCallState.success &&
|
||||
current.name === 'mark_todo_in_progress'
|
||||
) {
|
||||
try {
|
||||
const result = asRecord(data?.result) || asRecord(eventData.result)
|
||||
const input = asRecord(current.params || current.input)
|
||||
const todoId = (input.id || input.todoId || result.id || result.todoId) as
|
||||
| string
|
||||
| undefined
|
||||
if (todoId) {
|
||||
get().updatePlanTodoStatus(todoId, 'executing')
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Failed to process mark_todo_in_progress tool result', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
toolCallId,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if (current.name === 'edit_workflow') {
|
||||
try {
|
||||
const resultPayload = asRecord(
|
||||
data?.result || eventData.result || eventData.data || data?.data
|
||||
)
|
||||
const workflowState = asRecord(resultPayload?.workflowState)
|
||||
const hasWorkflowState = !!resultPayload?.workflowState
|
||||
logger.info('[SSE] edit_workflow result received', {
|
||||
hasWorkflowState,
|
||||
blockCount: hasWorkflowState ? Object.keys(workflowState.blocks ?? {}).length : 0,
|
||||
edgeCount: Array.isArray(workflowState.edges) ? workflowState.edges.length : 0,
|
||||
})
|
||||
if (hasWorkflowState) {
|
||||
const diffStore = useWorkflowDiffStore.getState()
|
||||
diffStore
|
||||
.setProposedChanges(resultPayload.workflowState as WorkflowState)
|
||||
.catch((err) => {
|
||||
logger.error('[SSE] Failed to apply edit_workflow diff', {
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
})
|
||||
}
|
||||
} catch (err) {
|
||||
logger.error('[SSE] edit_workflow result handling failed', {
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (let i = 0; i < context.contentBlocks.length; i++) {
|
||||
const b = context.contentBlocks[i]
|
||||
if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) {
|
||||
if (
|
||||
isRejectedState(b.toolCall?.state) ||
|
||||
isReviewState(b.toolCall?.state) ||
|
||||
isBackgroundState(b.toolCall?.state)
|
||||
)
|
||||
break
|
||||
const targetState = success
|
||||
? ClientToolCallState.success
|
||||
: failedDependency || skipped
|
||||
? ClientToolCallState.rejected
|
||||
: ClientToolCallState.error
|
||||
context.contentBlocks[i] = {
|
||||
...b,
|
||||
toolCall: {
|
||||
...b.toolCall,
|
||||
state: targetState,
|
||||
display: resolveToolDisplay(
|
||||
b.toolCall?.name,
|
||||
targetState,
|
||||
toolCallId,
|
||||
b.toolCall?.params
|
||||
),
|
||||
},
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
updateStreamingMessage(set, context)
|
||||
} catch (error) {
|
||||
logger.warn('Failed to process tool_result SSE event', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
},
|
||||
tool_error: (data, context, get, set) => {
|
||||
try {
|
||||
const errorData = asRecord(data?.data)
|
||||
const toolCallId: string | undefined =
|
||||
data?.toolCallId || (errorData.id as string | undefined)
|
||||
const failedDependency: boolean = data?.failedDependency === true
|
||||
if (!toolCallId) return
|
||||
const { toolCallsById } = get()
|
||||
const current = toolCallsById[toolCallId]
|
||||
if (current) {
|
||||
if (
|
||||
isRejectedState(current.state) ||
|
||||
isReviewState(current.state) ||
|
||||
isBackgroundState(current.state)
|
||||
) {
|
||||
return
|
||||
}
|
||||
const targetState = failedDependency
|
||||
? ClientToolCallState.rejected
|
||||
: ClientToolCallState.error
|
||||
const updatedMap = { ...toolCallsById }
|
||||
updatedMap[toolCallId] = {
|
||||
...current,
|
||||
state: targetState,
|
||||
display: resolveToolDisplay(current.name, targetState, current.id, current.params),
|
||||
}
|
||||
set({ toolCallsById: updatedMap })
|
||||
}
|
||||
for (let i = 0; i < context.contentBlocks.length; i++) {
|
||||
const b = context.contentBlocks[i]
|
||||
if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) {
|
||||
if (
|
||||
isRejectedState(b.toolCall?.state) ||
|
||||
isReviewState(b.toolCall?.state) ||
|
||||
isBackgroundState(b.toolCall?.state)
|
||||
)
|
||||
break
|
||||
const targetState = failedDependency
|
||||
? ClientToolCallState.rejected
|
||||
: ClientToolCallState.error
|
||||
context.contentBlocks[i] = {
|
||||
...b,
|
||||
toolCall: {
|
||||
...b.toolCall,
|
||||
state: targetState,
|
||||
display: resolveToolDisplay(
|
||||
b.toolCall?.name,
|
||||
targetState,
|
||||
toolCallId,
|
||||
b.toolCall?.params
|
||||
),
|
||||
},
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
updateStreamingMessage(set, context)
|
||||
} catch (error) {
|
||||
logger.warn('Failed to process tool_error SSE event', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
},
|
||||
tool_generating: (data, context, get, set) => {
|
||||
const { toolCallId, toolName } = data
|
||||
if (!toolCallId || !toolName) return
|
||||
const { toolCallsById } = get()
|
||||
|
||||
if (!toolCallsById[toolCallId]) {
|
||||
const initialState = ClientToolCallState.pending
|
||||
const tc: CopilotToolCall = {
|
||||
id: toolCallId,
|
||||
name: toolName,
|
||||
state: initialState,
|
||||
display: resolveToolDisplay(toolName, initialState, toolCallId),
|
||||
}
|
||||
const updated = { ...toolCallsById, [toolCallId]: tc }
|
||||
set({ toolCallsById: updated })
|
||||
logger.info('[toolCallsById] map updated', updated)
|
||||
|
||||
upsertToolCallBlock(context, tc)
|
||||
updateStreamingMessage(set, context)
|
||||
}
|
||||
},
|
||||
tool_call: (data, context, get, set) => {
|
||||
const toolData = asRecord(data?.data)
|
||||
const id: string | undefined = (toolData.id as string | undefined) || data?.toolCallId
|
||||
const name: string | undefined = (toolData.name as string | undefined) || data?.toolName
|
||||
if (!id) return
|
||||
const args = toolData.arguments as Record<string, unknown> | undefined
|
||||
const isPartial = toolData.partial === true
|
||||
const { toolCallsById } = get()
|
||||
|
||||
const existing = toolCallsById[id]
|
||||
const next: CopilotToolCall = existing
|
||||
? {
|
||||
...existing,
|
||||
state: ClientToolCallState.pending,
|
||||
...(args ? { params: args } : {}),
|
||||
display: resolveToolDisplay(name, ClientToolCallState.pending, id, args),
|
||||
}
|
||||
: {
|
||||
id,
|
||||
name: name || 'unknown_tool',
|
||||
state: ClientToolCallState.pending,
|
||||
...(args ? { params: args } : {}),
|
||||
display: resolveToolDisplay(name, ClientToolCallState.pending, id, args),
|
||||
}
|
||||
const updated = { ...toolCallsById, [id]: next }
|
||||
set({ toolCallsById: updated })
|
||||
logger.info('[toolCallsById] → pending', { id, name, params: args })
|
||||
|
||||
upsertToolCallBlock(context, next)
|
||||
updateStreamingMessage(set, context)
|
||||
|
||||
if (isPartial) {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
},
|
||||
reasoning: (data, context, _get, set) => {
|
||||
const phase = (data && (data.phase || data?.data?.phase)) as string | undefined
|
||||
if (phase === 'start') {
|
||||
beginThinkingBlock(context)
|
||||
updateStreamingMessage(set, context)
|
||||
return
|
||||
}
|
||||
if (phase === 'end') {
|
||||
finalizeThinkingBlock(context)
|
||||
updateStreamingMessage(set, context)
|
||||
return
|
||||
}
|
||||
const chunk: string = typeof data?.data === 'string' ? data.data : data?.content || ''
|
||||
if (!chunk) return
|
||||
appendThinkingContent(context, chunk)
|
||||
updateStreamingMessage(set, context)
|
||||
},
|
||||
content: (data, context, get, set) => {
|
||||
if (!data.data) return
|
||||
context.pendingContent += data.data
|
||||
|
||||
let contentToProcess = context.pendingContent
|
||||
let hasProcessedContent = false
|
||||
|
||||
const thinkingStartRegex = /<thinking>/
|
||||
const thinkingEndRegex = /<\/thinking>/
|
||||
const designWorkflowStartRegex = /<design_workflow>/
|
||||
const designWorkflowEndRegex = /<\/design_workflow>/
|
||||
|
||||
const splitTrailingPartialTag = (
|
||||
text: string,
|
||||
tags: string[]
|
||||
): { text: string; remaining: string } => {
|
||||
const partialIndex = text.lastIndexOf('<')
|
||||
if (partialIndex < 0) {
|
||||
return { text, remaining: '' }
|
||||
}
|
||||
const possibleTag = text.substring(partialIndex)
|
||||
const matchesTagStart = tags.some((tag) => tag.startsWith(possibleTag))
|
||||
if (!matchesTagStart) {
|
||||
return { text, remaining: '' }
|
||||
}
|
||||
return {
|
||||
text: text.substring(0, partialIndex),
|
||||
remaining: possibleTag,
|
||||
}
|
||||
}
|
||||
|
||||
while (contentToProcess.length > 0) {
|
||||
if (context.isInDesignWorkflowBlock) {
|
||||
const endMatch = designWorkflowEndRegex.exec(contentToProcess)
|
||||
if (endMatch) {
|
||||
const designContent = contentToProcess.substring(0, endMatch.index)
|
||||
context.designWorkflowContent += designContent
|
||||
context.isInDesignWorkflowBlock = false
|
||||
|
||||
logger.info('[design_workflow] Tag complete, setting plan content', {
|
||||
contentLength: context.designWorkflowContent.length,
|
||||
})
|
||||
set({ streamingPlanContent: context.designWorkflowContent })
|
||||
|
||||
contentToProcess = contentToProcess.substring(endMatch.index + endMatch[0].length)
|
||||
hasProcessedContent = true
|
||||
} else {
|
||||
const { text, remaining } = splitTrailingPartialTag(contentToProcess, [
|
||||
'</design_workflow>',
|
||||
])
|
||||
context.designWorkflowContent += text
|
||||
|
||||
set({ streamingPlanContent: context.designWorkflowContent })
|
||||
|
||||
contentToProcess = remaining
|
||||
hasProcessedContent = true
|
||||
if (remaining) {
|
||||
break
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if (!context.isInThinkingBlock && !context.isInDesignWorkflowBlock) {
|
||||
const designStartMatch = designWorkflowStartRegex.exec(contentToProcess)
|
||||
if (designStartMatch) {
|
||||
const textBeforeDesign = contentToProcess.substring(0, designStartMatch.index)
|
||||
if (textBeforeDesign) {
|
||||
appendTextBlock(context, textBeforeDesign)
|
||||
hasProcessedContent = true
|
||||
}
|
||||
context.isInDesignWorkflowBlock = true
|
||||
context.designWorkflowContent = ''
|
||||
contentToProcess = contentToProcess.substring(
|
||||
designStartMatch.index + designStartMatch[0].length
|
||||
)
|
||||
hasProcessedContent = true
|
||||
continue
|
||||
}
|
||||
|
||||
const nextMarkIndex = contentToProcess.indexOf('<marktodo>')
|
||||
const nextCheckIndex = contentToProcess.indexOf('<checkofftodo>')
|
||||
const hasMark = nextMarkIndex >= 0
|
||||
const hasCheck = nextCheckIndex >= 0
|
||||
|
||||
const nextTagIndex =
|
||||
hasMark && hasCheck
|
||||
? Math.min(nextMarkIndex, nextCheckIndex)
|
||||
: hasMark
|
||||
? nextMarkIndex
|
||||
: hasCheck
|
||||
? nextCheckIndex
|
||||
: -1
|
||||
|
||||
if (nextTagIndex >= 0) {
|
||||
const isMarkTodo = hasMark && nextMarkIndex === nextTagIndex
|
||||
const tagStart = isMarkTodo ? '<marktodo>' : '<checkofftodo>'
|
||||
const tagEnd = isMarkTodo ? '</marktodo>' : '</checkofftodo>'
|
||||
const closingIndex = contentToProcess.indexOf(tagEnd, nextTagIndex + tagStart.length)
|
||||
|
||||
if (closingIndex === -1) {
|
||||
break
|
||||
}
|
||||
|
||||
const todoId = contentToProcess
|
||||
.substring(nextTagIndex + tagStart.length, closingIndex)
|
||||
.trim()
|
||||
logger.info(
|
||||
isMarkTodo ? '[TODO] Detected marktodo tag' : '[TODO] Detected checkofftodo tag',
|
||||
{ todoId }
|
||||
)
|
||||
|
||||
if (todoId) {
|
||||
try {
|
||||
get().updatePlanTodoStatus(todoId, isMarkTodo ? 'executing' : 'completed')
|
||||
logger.info(
|
||||
isMarkTodo
|
||||
? '[TODO] Successfully marked todo in progress'
|
||||
: '[TODO] Successfully checked off todo',
|
||||
{ todoId }
|
||||
)
|
||||
} catch (e) {
|
||||
logger.error(
|
||||
isMarkTodo
|
||||
? '[TODO] Failed to mark todo in progress'
|
||||
: '[TODO] Failed to checkoff todo',
|
||||
{ todoId, error: e }
|
||||
)
|
||||
}
|
||||
} else {
|
||||
logger.warn('[TODO] Empty todoId extracted from todo tag', { tagType: tagStart })
|
||||
}
|
||||
|
||||
let beforeTag = contentToProcess.substring(0, nextTagIndex)
|
||||
let afterTag = contentToProcess.substring(closingIndex + tagEnd.length)
|
||||
|
||||
const hadNewlineBefore = /(\r?\n)+$/.test(beforeTag)
|
||||
const hadNewlineAfter = /^(\r?\n)+/.test(afterTag)
|
||||
|
||||
beforeTag = beforeTag.replace(/(\r?\n)+$/, '')
|
||||
afterTag = afterTag.replace(/^(\r?\n)+/, '')
|
||||
|
||||
contentToProcess =
|
||||
beforeTag + (hadNewlineBefore && hadNewlineAfter ? '\n' : '') + afterTag
|
||||
context.currentTextBlock = null
|
||||
hasProcessedContent = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if (context.isInThinkingBlock) {
|
||||
const endMatch = thinkingEndRegex.exec(contentToProcess)
|
||||
if (endMatch) {
|
||||
const thinkingContent = contentToProcess.substring(0, endMatch.index)
|
||||
appendThinkingContent(context, thinkingContent)
|
||||
finalizeThinkingBlock(context)
|
||||
contentToProcess = contentToProcess.substring(endMatch.index + endMatch[0].length)
|
||||
hasProcessedContent = true
|
||||
} else {
|
||||
const { text, remaining } = splitTrailingPartialTag(contentToProcess, ['</thinking>'])
|
||||
if (text) {
|
||||
appendThinkingContent(context, text)
|
||||
hasProcessedContent = true
|
||||
}
|
||||
contentToProcess = remaining
|
||||
if (remaining) {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const startMatch = thinkingStartRegex.exec(contentToProcess)
|
||||
if (startMatch) {
|
||||
const textBeforeThinking = contentToProcess.substring(0, startMatch.index)
|
||||
if (textBeforeThinking) {
|
||||
appendTextBlock(context, textBeforeThinking)
|
||||
hasProcessedContent = true
|
||||
}
|
||||
context.isInThinkingBlock = true
|
||||
context.currentTextBlock = null
|
||||
contentToProcess = contentToProcess.substring(startMatch.index + startMatch[0].length)
|
||||
hasProcessedContent = true
|
||||
} else {
|
||||
let partialTagIndex = contentToProcess.lastIndexOf('<')
|
||||
|
||||
const partialMarkTodo = contentToProcess.lastIndexOf('<marktodo')
|
||||
const partialCheckoffTodo = contentToProcess.lastIndexOf('<checkofftodo')
|
||||
|
||||
if (partialMarkTodo > partialTagIndex) {
|
||||
partialTagIndex = partialMarkTodo
|
||||
}
|
||||
if (partialCheckoffTodo > partialTagIndex) {
|
||||
partialTagIndex = partialCheckoffTodo
|
||||
}
|
||||
|
||||
let textToAdd = contentToProcess
|
||||
let remaining = ''
|
||||
if (partialTagIndex >= 0 && partialTagIndex > contentToProcess.length - 50) {
|
||||
textToAdd = contentToProcess.substring(0, partialTagIndex)
|
||||
remaining = contentToProcess.substring(partialTagIndex)
|
||||
}
|
||||
if (textToAdd) {
|
||||
appendTextBlock(context, textToAdd)
|
||||
hasProcessedContent = true
|
||||
}
|
||||
contentToProcess = remaining
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
context.pendingContent = contentToProcess
|
||||
if (hasProcessedContent) {
|
||||
updateStreamingMessage(set, context)
|
||||
}
|
||||
},
|
||||
done: (_data, context) => {
|
||||
logger.info('[SSE] DONE EVENT RECEIVED', {
|
||||
doneEventCount: context.doneEventCount,
|
||||
data: _data,
|
||||
})
|
||||
context.doneEventCount++
|
||||
if (context.doneEventCount >= 1) {
|
||||
logger.info('[SSE] Setting streamComplete = true, stream will terminate')
|
||||
context.streamComplete = true
|
||||
}
|
||||
},
|
||||
error: (data, context, _get, set) => {
|
||||
logger.error('Stream error:', data.error)
|
||||
set((state: CopilotStore) => ({
|
||||
messages: state.messages.map((msg) =>
|
||||
msg.id === context.messageId
|
||||
? {
|
||||
...msg,
|
||||
content: context.accumulatedContent || 'An error occurred.',
|
||||
error: data.error,
|
||||
}
|
||||
: msg
|
||||
),
|
||||
}))
|
||||
context.streamComplete = true
|
||||
},
|
||||
stream_end: (_data, context, _get, set) => {
|
||||
if (context.pendingContent) {
|
||||
if (context.isInThinkingBlock && context.currentThinkingBlock) {
|
||||
appendThinkingContent(context, context.pendingContent)
|
||||
} else if (context.pendingContent.trim()) {
|
||||
appendTextBlock(context, context.pendingContent)
|
||||
}
|
||||
context.pendingContent = ''
|
||||
}
|
||||
finalizeThinkingBlock(context)
|
||||
updateStreamingMessage(set, context)
|
||||
},
|
||||
default: () => {},
|
||||
}
|
||||
3
apps/sim/lib/copilot/client-sse/index.ts
Normal file
3
apps/sim/lib/copilot/client-sse/index.ts
Normal file
@@ -0,0 +1,3 @@
|
||||
export type { SSEHandler } from './handlers'
|
||||
export { sseHandlers } from './handlers'
|
||||
export { applySseEvent, subAgentSSEHandlers } from './subagent-handlers'
|
||||
374
apps/sim/lib/copilot/client-sse/subagent-handlers.ts
Normal file
374
apps/sim/lib/copilot/client-sse/subagent-handlers.ts
Normal file
@@ -0,0 +1,374 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import {
|
||||
asRecord,
|
||||
normalizeSseEvent,
|
||||
shouldSkipToolCallEvent,
|
||||
shouldSkipToolResultEvent,
|
||||
} from '@/lib/copilot/orchestrator/sse-utils'
|
||||
import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
|
||||
import { resolveToolDisplay } from '@/lib/copilot/store-utils'
|
||||
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
|
||||
import type { CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types'
|
||||
import { type SSEHandler, sseHandlers, updateStreamingMessage } from './handlers'
|
||||
import type { ClientStreamingContext } from './types'
|
||||
|
||||
const logger = createLogger('CopilotClientSubagentHandlers')
|
||||
|
||||
type StoreSet = (
|
||||
partial: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)
|
||||
) => void
|
||||
|
||||
export function appendSubAgentContent(
|
||||
context: ClientStreamingContext,
|
||||
parentToolCallId: string,
|
||||
text: string
|
||||
) {
|
||||
if (!context.subAgentContent[parentToolCallId]) {
|
||||
context.subAgentContent[parentToolCallId] = ''
|
||||
}
|
||||
if (!context.subAgentBlocks[parentToolCallId]) {
|
||||
context.subAgentBlocks[parentToolCallId] = []
|
||||
}
|
||||
context.subAgentContent[parentToolCallId] += text
|
||||
const blocks = context.subAgentBlocks[parentToolCallId]
|
||||
const lastBlock = blocks[blocks.length - 1]
|
||||
if (lastBlock && lastBlock.type === 'subagent_text') {
|
||||
lastBlock.content = (lastBlock.content || '') + text
|
||||
} else {
|
||||
blocks.push({
|
||||
type: 'subagent_text',
|
||||
content: text,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export function updateToolCallWithSubAgentData(
|
||||
context: ClientStreamingContext,
|
||||
get: () => CopilotStore,
|
||||
set: StoreSet,
|
||||
parentToolCallId: string
|
||||
) {
|
||||
const { toolCallsById } = get()
|
||||
const parentToolCall = toolCallsById[parentToolCallId]
|
||||
if (!parentToolCall) {
|
||||
logger.warn('[SubAgent] updateToolCallWithSubAgentData: parent tool call not found', {
|
||||
parentToolCallId,
|
||||
availableToolCallIds: Object.keys(toolCallsById),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const blocks = context.subAgentBlocks[parentToolCallId] ?? []
|
||||
|
||||
const updatedToolCall: CopilotToolCall = {
|
||||
...parentToolCall,
|
||||
subAgentContent: context.subAgentContent[parentToolCallId] || '',
|
||||
subAgentToolCalls: context.subAgentToolCalls[parentToolCallId] ?? [],
|
||||
subAgentBlocks: blocks,
|
||||
subAgentStreaming: true,
|
||||
}
|
||||
|
||||
logger.info('[SubAgent] Updating tool call with subagent data', {
|
||||
parentToolCallId,
|
||||
parentToolName: parentToolCall.name,
|
||||
subAgentContentLength: updatedToolCall.subAgentContent?.length,
|
||||
subAgentBlocksCount: updatedToolCall.subAgentBlocks?.length,
|
||||
subAgentToolCallsCount: updatedToolCall.subAgentToolCalls?.length,
|
||||
})
|
||||
|
||||
const updatedMap = { ...toolCallsById, [parentToolCallId]: updatedToolCall }
|
||||
set({ toolCallsById: updatedMap })
|
||||
|
||||
let foundInContentBlocks = false
|
||||
for (let i = 0; i < context.contentBlocks.length; i++) {
|
||||
const b = context.contentBlocks[i]
|
||||
if (b.type === 'tool_call' && b.toolCall?.id === parentToolCallId) {
|
||||
context.contentBlocks[i] = { ...b, toolCall: updatedToolCall }
|
||||
foundInContentBlocks = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (!foundInContentBlocks) {
|
||||
logger.warn('[SubAgent] Parent tool call not found in contentBlocks', {
|
||||
parentToolCallId,
|
||||
contentBlocksCount: context.contentBlocks.length,
|
||||
toolCallBlockIds: context.contentBlocks
|
||||
.filter((b) => b.type === 'tool_call')
|
||||
.map((b) => b.toolCall?.id),
|
||||
})
|
||||
}
|
||||
|
||||
updateStreamingMessage(set, context)
|
||||
}
|
||||
|
||||
export const subAgentSSEHandlers: Record<string, SSEHandler> = {
|
||||
start: () => {
|
||||
// Subagent start event - no action needed, parent is already tracked from subagent_start
|
||||
},
|
||||
|
||||
content: (data, context, get, set) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
const contentStr = typeof data.data === 'string' ? data.data : data.content || ''
|
||||
logger.info('[SubAgent] content event', {
|
||||
parentToolCallId,
|
||||
hasData: !!contentStr,
|
||||
dataPreview: contentStr ? contentStr.substring(0, 50) : null,
|
||||
})
|
||||
if (!parentToolCallId || !contentStr) {
|
||||
logger.warn('[SubAgent] content missing parentToolCallId or data', {
|
||||
parentToolCallId,
|
||||
hasData: !!contentStr,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
appendSubAgentContent(context, parentToolCallId, contentStr)
|
||||
|
||||
updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
|
||||
},
|
||||
|
||||
reasoning: (data, context, get, set) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
const dataObj = asRecord(data?.data)
|
||||
const phase = data?.phase || (dataObj.phase as string | undefined)
|
||||
if (!parentToolCallId) return
|
||||
|
||||
if (phase === 'start' || phase === 'end') return
|
||||
|
||||
const chunk = typeof data?.data === 'string' ? data.data : data?.content || ''
|
||||
if (!chunk) return
|
||||
|
||||
appendSubAgentContent(context, parentToolCallId, chunk)
|
||||
|
||||
updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
|
||||
},
|
||||
|
||||
tool_generating: () => {
|
||||
// Tool generating event - no action needed, we'll handle the actual tool_call
|
||||
},
|
||||
|
||||
tool_call: async (data, context, get, set) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (!parentToolCallId) return
|
||||
|
||||
const toolData = asRecord(data?.data)
|
||||
const id: string | undefined = (toolData.id as string | undefined) || data?.toolCallId
|
||||
const name: string | undefined = (toolData.name as string | undefined) || data?.toolName
|
||||
if (!id || !name) return
|
||||
const isPartial = toolData.partial === true
|
||||
|
||||
let args: Record<string, unknown> | undefined = (toolData.arguments || toolData.input) as
|
||||
| Record<string, unknown>
|
||||
| undefined
|
||||
|
||||
if (typeof args === 'string') {
|
||||
try {
|
||||
args = JSON.parse(args) as Record<string, unknown>
|
||||
} catch {
|
||||
logger.warn('[SubAgent] Failed to parse arguments string', { args })
|
||||
}
|
||||
}
|
||||
|
||||
logger.info('[SubAgent] tool_call received', {
|
||||
id,
|
||||
name,
|
||||
hasArgs: !!args,
|
||||
argsKeys: args ? Object.keys(args) : [],
|
||||
toolDataKeys: Object.keys(toolData),
|
||||
dataKeys: Object.keys(data ?? {}),
|
||||
})
|
||||
|
||||
if (!context.subAgentToolCalls[parentToolCallId]) {
|
||||
context.subAgentToolCalls[parentToolCallId] = []
|
||||
}
|
||||
if (!context.subAgentBlocks[parentToolCallId]) {
|
||||
context.subAgentBlocks[parentToolCallId] = []
|
||||
}
|
||||
|
||||
const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex(
|
||||
(tc: CopilotToolCall) => tc.id === id
|
||||
)
|
||||
const subAgentToolCall: CopilotToolCall = {
|
||||
id,
|
||||
name,
|
||||
state: ClientToolCallState.pending,
|
||||
...(args ? { params: args } : {}),
|
||||
display: resolveToolDisplay(name, ClientToolCallState.pending, id, args),
|
||||
}
|
||||
|
||||
if (existingIndex >= 0) {
|
||||
context.subAgentToolCalls[parentToolCallId][existingIndex] = subAgentToolCall
|
||||
} else {
|
||||
context.subAgentToolCalls[parentToolCallId].push(subAgentToolCall)
|
||||
|
||||
context.subAgentBlocks[parentToolCallId].push({
|
||||
type: 'subagent_tool_call',
|
||||
toolCall: subAgentToolCall,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
}
|
||||
|
||||
const { toolCallsById } = get()
|
||||
const updated = { ...toolCallsById, [id]: subAgentToolCall }
|
||||
set({ toolCallsById: updated })
|
||||
|
||||
updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
|
||||
|
||||
if (isPartial) {
|
||||
return
|
||||
}
|
||||
},
|
||||
|
||||
tool_result: (data, context, get, set) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (!parentToolCallId) return
|
||||
|
||||
const resultData = asRecord(data?.data)
|
||||
const toolCallId: string | undefined = data?.toolCallId || (resultData.id as string | undefined)
|
||||
const success: boolean | undefined = data?.success !== false
|
||||
if (!toolCallId) return
|
||||
|
||||
if (!context.subAgentToolCalls[parentToolCallId]) return
|
||||
if (!context.subAgentBlocks[parentToolCallId]) return
|
||||
|
||||
const targetState = success ? ClientToolCallState.success : ClientToolCallState.error
|
||||
const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex(
|
||||
(tc: CopilotToolCall) => tc.id === toolCallId
|
||||
)
|
||||
|
||||
if (existingIndex >= 0) {
|
||||
const existing = context.subAgentToolCalls[parentToolCallId][existingIndex]
|
||||
const updatedSubAgentToolCall = {
|
||||
...existing,
|
||||
state: targetState,
|
||||
display: resolveToolDisplay(existing.name, targetState, toolCallId, existing.params),
|
||||
}
|
||||
context.subAgentToolCalls[parentToolCallId][existingIndex] = updatedSubAgentToolCall
|
||||
|
||||
for (const block of context.subAgentBlocks[parentToolCallId]) {
|
||||
if (block.type === 'subagent_tool_call' && block.toolCall?.id === toolCallId) {
|
||||
block.toolCall = updatedSubAgentToolCall
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
const { toolCallsById } = get()
|
||||
if (toolCallsById[toolCallId]) {
|
||||
const updatedMap = {
|
||||
...toolCallsById,
|
||||
[toolCallId]: updatedSubAgentToolCall,
|
||||
}
|
||||
set({ toolCallsById: updatedMap })
|
||||
logger.info('[SubAgent] Updated subagent tool call state in toolCallsById', {
|
||||
toolCallId,
|
||||
name: existing.name,
|
||||
state: targetState,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
|
||||
},
|
||||
|
||||
done: (_data, context, get, set) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (!parentToolCallId) return
|
||||
|
||||
updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
|
||||
},
|
||||
}
|
||||
|
||||
export async function applySseEvent(
|
||||
rawData: SSEEvent,
|
||||
context: ClientStreamingContext,
|
||||
get: () => CopilotStore,
|
||||
set: (next: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)) => void
|
||||
): Promise<boolean> {
|
||||
const normalizedEvent = normalizeSseEvent(rawData)
|
||||
if (shouldSkipToolCallEvent(normalizedEvent) || shouldSkipToolResultEvent(normalizedEvent)) {
|
||||
return true
|
||||
}
|
||||
const data = normalizedEvent
|
||||
|
||||
if (data.type === 'subagent_start') {
|
||||
const startData = asRecord(data.data)
|
||||
const toolCallId = startData.tool_call_id as string | undefined
|
||||
if (toolCallId) {
|
||||
context.subAgentParentToolCallId = toolCallId
|
||||
const { toolCallsById } = get()
|
||||
const parentToolCall = toolCallsById[toolCallId]
|
||||
if (parentToolCall) {
|
||||
const updatedToolCall: CopilotToolCall = {
|
||||
...parentToolCall,
|
||||
subAgentStreaming: true,
|
||||
}
|
||||
const updatedMap = { ...toolCallsById, [toolCallId]: updatedToolCall }
|
||||
set({ toolCallsById: updatedMap })
|
||||
}
|
||||
logger.info('[SSE] Subagent session started', {
|
||||
subagent: data.subagent,
|
||||
parentToolCallId: toolCallId,
|
||||
})
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if (data.type === 'subagent_end') {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (parentToolCallId) {
|
||||
const { toolCallsById } = get()
|
||||
const parentToolCall = toolCallsById[parentToolCallId]
|
||||
if (parentToolCall) {
|
||||
const updatedToolCall: CopilotToolCall = {
|
||||
...parentToolCall,
|
||||
subAgentContent: context.subAgentContent[parentToolCallId] || '',
|
||||
subAgentToolCalls: context.subAgentToolCalls[parentToolCallId] ?? [],
|
||||
subAgentBlocks: context.subAgentBlocks[parentToolCallId] ?? [],
|
||||
subAgentStreaming: false,
|
||||
}
|
||||
const updatedMap = { ...toolCallsById, [parentToolCallId]: updatedToolCall }
|
||||
set({ toolCallsById: updatedMap })
|
||||
logger.info('[SSE] Subagent session ended', {
|
||||
subagent: data.subagent,
|
||||
parentToolCallId,
|
||||
contentLength: context.subAgentContent[parentToolCallId]?.length || 0,
|
||||
toolCallCount: context.subAgentToolCalls[parentToolCallId]?.length || 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
context.subAgentParentToolCallId = undefined
|
||||
return true
|
||||
}
|
||||
|
||||
if (data.subagent) {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (!parentToolCallId) {
|
||||
logger.warn('[SSE] Subagent event without parent tool call ID', {
|
||||
type: data.type,
|
||||
subagent: data.subagent,
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
||||
logger.info('[SSE] Processing subagent event', {
|
||||
type: data.type,
|
||||
subagent: data.subagent,
|
||||
parentToolCallId,
|
||||
hasHandler: !!subAgentSSEHandlers[data.type],
|
||||
})
|
||||
|
||||
const subAgentHandler = subAgentSSEHandlers[data.type]
|
||||
if (subAgentHandler) {
|
||||
await subAgentHandler(data, context, get, set)
|
||||
} else {
|
||||
logger.warn('[SSE] No handler for subagent event type', { type: data.type })
|
||||
}
|
||||
return !context.streamComplete
|
||||
}
|
||||
|
||||
const handler = sseHandlers[data.type] || sseHandlers.default
|
||||
await handler(data, context, get, set)
|
||||
return !context.streamComplete
|
||||
}
|
||||
45
apps/sim/lib/copilot/client-sse/types.ts
Normal file
45
apps/sim/lib/copilot/client-sse/types.ts
Normal file
@@ -0,0 +1,45 @@
|
||||
import type {
|
||||
ChatContext,
|
||||
CopilotToolCall,
|
||||
SubAgentContentBlock,
|
||||
} from '@/stores/panel/copilot/types'
|
||||
|
||||
/**
|
||||
* A content block used in copilot messages and during streaming.
|
||||
* Uses a literal type union for `type` to stay compatible with CopilotMessage.
|
||||
*/
|
||||
export type ContentBlockType = 'text' | 'thinking' | 'tool_call' | 'contexts'
|
||||
|
||||
export interface ClientContentBlock {
|
||||
type: ContentBlockType
|
||||
content?: string
|
||||
timestamp: number
|
||||
toolCall?: CopilotToolCall | null
|
||||
startTime?: number
|
||||
duration?: number
|
||||
contexts?: ChatContext[]
|
||||
}
|
||||
|
||||
export interface StreamingContext {
|
||||
messageId: string
|
||||
accumulatedContent: string
|
||||
contentBlocks: ClientContentBlock[]
|
||||
currentTextBlock: ClientContentBlock | null
|
||||
isInThinkingBlock: boolean
|
||||
currentThinkingBlock: ClientContentBlock | null
|
||||
isInDesignWorkflowBlock: boolean
|
||||
designWorkflowContent: string
|
||||
pendingContent: string
|
||||
newChatId?: string
|
||||
doneEventCount: number
|
||||
streamComplete?: boolean
|
||||
wasAborted?: boolean
|
||||
suppressContinueOption?: boolean
|
||||
subAgentParentToolCallId?: string
|
||||
subAgentContent: Record<string, string>
|
||||
subAgentToolCalls: Record<string, CopilotToolCall[]>
|
||||
subAgentBlocks: Record<string, SubAgentContentBlock[]>
|
||||
suppressStreamingUpdates?: boolean
|
||||
}
|
||||
|
||||
export type ClientStreamingContext = StreamingContext
|
||||
@@ -108,14 +108,14 @@ function parseBooleanEnv(value: string | undefined): boolean | null {
|
||||
export const DEFAULT_COPILOT_CONFIG: CopilotConfig = {
|
||||
chat: {
|
||||
defaultProvider: 'anthropic',
|
||||
defaultModel: 'claude-3-7-sonnet-latest',
|
||||
defaultModel: 'claude-4.6-opus',
|
||||
temperature: 0.1,
|
||||
maxTokens: 8192,
|
||||
systemPrompt: AGENT_MODE_SYSTEM_PROMPT,
|
||||
},
|
||||
rag: {
|
||||
defaultProvider: 'anthropic',
|
||||
defaultModel: 'claude-3-7-sonnet-latest',
|
||||
defaultModel: 'claude-4.6-opus',
|
||||
temperature: 0.1,
|
||||
maxTokens: 2000,
|
||||
embeddingModel: 'text-embedding-3-small',
|
||||
|
||||
@@ -1,2 +1,115 @@
|
||||
import { env } from '@/lib/core/config/env'
|
||||
|
||||
export const SIM_AGENT_API_URL_DEFAULT = 'https://copilot.sim.ai'
|
||||
export const SIM_AGENT_VERSION = '1.0.3'
|
||||
|
||||
/** Resolved copilot backend URL — reads from env with fallback to default. */
|
||||
const rawAgentUrl = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT
|
||||
export const SIM_AGENT_API_URL =
|
||||
rawAgentUrl.startsWith('http://') || rawAgentUrl.startsWith('https://')
|
||||
? rawAgentUrl
|
||||
: SIM_AGENT_API_URL_DEFAULT
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Redis key prefixes
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Redis key prefix for tool call confirmation payloads (polled by waitForToolDecision). */
|
||||
export const REDIS_TOOL_CALL_PREFIX = 'tool_call:'
|
||||
|
||||
/** Redis key prefix for copilot SSE stream buffers. */
|
||||
export const REDIS_COPILOT_STREAM_PREFIX = 'copilot_stream:'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Timeouts
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Default timeout for the copilot orchestration stream loop (5 min). */
|
||||
export const ORCHESTRATION_TIMEOUT_MS = 300_000
|
||||
|
||||
/** Timeout for the client-side streaming response handler (10 min). */
|
||||
export const STREAM_TIMEOUT_MS = 600_000
|
||||
|
||||
/** TTL for Redis tool call confirmation entries (24 h). */
|
||||
export const REDIS_TOOL_CALL_TTL_SECONDS = 86_400
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tool decision polling
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Initial poll interval when waiting for a user tool decision. */
|
||||
export const TOOL_DECISION_INITIAL_POLL_MS = 100
|
||||
|
||||
/** Maximum poll interval when waiting for a user tool decision. */
|
||||
export const TOOL_DECISION_MAX_POLL_MS = 3_000
|
||||
|
||||
/** Backoff multiplier for the tool decision poll interval. */
|
||||
export const TOOL_DECISION_POLL_BACKOFF = 1.5
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Stream resume
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Maximum number of resume attempts before giving up. */
|
||||
export const MAX_RESUME_ATTEMPTS = 3
|
||||
|
||||
/** SessionStorage key for persisting active stream metadata across page reloads. */
|
||||
export const STREAM_STORAGE_KEY = 'copilot_active_stream'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Client-side streaming batching
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Delay (ms) before processing the next queued message after stream completion. */
|
||||
export const QUEUE_PROCESS_DELAY_MS = 100
|
||||
|
||||
/** Delay (ms) before invalidating subscription queries after stream completion. */
|
||||
export const SUBSCRIPTION_INVALIDATE_DELAY_MS = 1_000
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// UI helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Maximum character length for an optimistic chat title derived from a user message. */
|
||||
export const OPTIMISTIC_TITLE_MAX_LENGTH = 50
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Copilot API paths (client-side fetch targets)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** POST — send a chat message to the copilot. */
|
||||
export const COPILOT_CHAT_API_PATH = '/api/copilot/chat'
|
||||
|
||||
/** GET — resume/replay a copilot SSE stream. */
|
||||
export const COPILOT_CHAT_STREAM_API_PATH = '/api/copilot/chat/stream'
|
||||
|
||||
/** POST — persist chat messages / plan artifact / config. */
|
||||
export const COPILOT_UPDATE_MESSAGES_API_PATH = '/api/copilot/chat/update-messages'
|
||||
|
||||
/** DELETE — delete a copilot chat. */
|
||||
export const COPILOT_DELETE_CHAT_API_PATH = '/api/copilot/chat/delete'
|
||||
|
||||
/** POST — confirm or reject a tool call. */
|
||||
export const COPILOT_CONFIRM_API_PATH = '/api/copilot/confirm'
|
||||
|
||||
/** POST — forward diff-accepted/rejected stats to the copilot backend. */
|
||||
export const COPILOT_STATS_API_PATH = '/api/copilot/stats'
|
||||
|
||||
/** GET — load checkpoints for a chat. */
|
||||
export const COPILOT_CHECKPOINTS_API_PATH = '/api/copilot/checkpoints'
|
||||
|
||||
/** POST — revert to a checkpoint. */
|
||||
export const COPILOT_CHECKPOINTS_REVERT_API_PATH = '/api/copilot/checkpoints/revert'
|
||||
|
||||
/** GET/POST/DELETE — manage auto-allowed tools. */
|
||||
export const COPILOT_AUTO_ALLOWED_TOOLS_API_PATH = '/api/copilot/auto-allowed-tools'
|
||||
|
||||
/** GET — fetch user credentials for masking. */
|
||||
export const COPILOT_CREDENTIALS_API_PATH = '/api/copilot/credentials'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Dedup limits
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Maximum entries in the in-memory SSE tool-event dedup cache. */
|
||||
export const STREAM_BUFFER_MAX_DEDUP_ENTRIES = 1_000
|
||||
|
||||
129
apps/sim/lib/copilot/messages/checkpoints.ts
Normal file
129
apps/sim/lib/copilot/messages/checkpoints.ts
Normal file
@@ -0,0 +1,129 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { COPILOT_CHECKPOINTS_API_PATH } from '@/lib/copilot/constants'
|
||||
import type { CopilotMessage, CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types'
|
||||
import { mergeSubblockState } from '@/stores/workflows/utils'
|
||||
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
|
||||
import type { WorkflowState } from '@/stores/workflows/workflow/types'
|
||||
|
||||
const logger = createLogger('CopilotMessageCheckpoints')
|
||||
|
||||
export function buildCheckpointWorkflowState(workflowId: string): WorkflowState | null {
|
||||
const rawState = useWorkflowStore.getState().getWorkflowState()
|
||||
if (!rawState) return null
|
||||
|
||||
const blocksWithSubblockValues = mergeSubblockState(rawState.blocks, workflowId)
|
||||
|
||||
const filteredBlocks = Object.entries(blocksWithSubblockValues).reduce(
|
||||
(acc, [blockId, block]) => {
|
||||
if (block?.type && block?.name) {
|
||||
acc[blockId] = {
|
||||
...block,
|
||||
id: block.id || blockId,
|
||||
enabled: block.enabled !== undefined ? block.enabled : true,
|
||||
horizontalHandles: block.horizontalHandles !== undefined ? block.horizontalHandles : true,
|
||||
height: block.height !== undefined ? block.height : 90,
|
||||
subBlocks: block.subBlocks ?? {},
|
||||
outputs: block.outputs ?? {},
|
||||
data: block.data ?? {},
|
||||
position: block.position || { x: 0, y: 0 },
|
||||
}
|
||||
}
|
||||
return acc
|
||||
},
|
||||
{} as WorkflowState['blocks']
|
||||
)
|
||||
|
||||
return {
|
||||
blocks: filteredBlocks,
|
||||
edges: rawState.edges ?? [],
|
||||
loops: rawState.loops ?? {},
|
||||
parallels: rawState.parallels ?? {},
|
||||
lastSaved: rawState.lastSaved || Date.now(),
|
||||
deploymentStatuses: rawState.deploymentStatuses ?? {},
|
||||
}
|
||||
}
|
||||
|
||||
export async function saveMessageCheckpoint(
|
||||
messageId: string,
|
||||
get: () => CopilotStore,
|
||||
set: (partial: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)) => void
|
||||
): Promise<boolean> {
|
||||
const { workflowId, currentChat, messageSnapshots, messageCheckpoints } = get()
|
||||
if (!workflowId || !currentChat?.id) return false
|
||||
|
||||
const snapshot = messageSnapshots[messageId]
|
||||
if (!snapshot) return false
|
||||
|
||||
const nextSnapshots = { ...messageSnapshots }
|
||||
delete nextSnapshots[messageId]
|
||||
set({ messageSnapshots: nextSnapshots })
|
||||
|
||||
try {
|
||||
const response = await fetch(COPILOT_CHECKPOINTS_API_PATH, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
workflowId,
|
||||
chatId: currentChat.id,
|
||||
messageId,
|
||||
workflowState: JSON.stringify(snapshot),
|
||||
}),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to create checkpoint: ${response.statusText}`)
|
||||
}
|
||||
|
||||
const result = await response.json()
|
||||
const newCheckpoint = result.checkpoint
|
||||
if (newCheckpoint) {
|
||||
const existingCheckpoints = messageCheckpoints[messageId] ?? []
|
||||
const updatedCheckpoints = {
|
||||
...messageCheckpoints,
|
||||
[messageId]: [newCheckpoint, ...existingCheckpoints],
|
||||
}
|
||||
set({ messageCheckpoints: updatedCheckpoints })
|
||||
}
|
||||
|
||||
return true
|
||||
} catch (error) {
|
||||
logger.error('Failed to create checkpoint from snapshot:', error)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
export function extractToolCallsRecursively(
|
||||
toolCall: CopilotToolCall,
|
||||
map: Record<string, CopilotToolCall>
|
||||
): void {
|
||||
if (!toolCall?.id) return
|
||||
map[toolCall.id] = toolCall
|
||||
|
||||
if (Array.isArray(toolCall.subAgentBlocks)) {
|
||||
for (const block of toolCall.subAgentBlocks) {
|
||||
if (block?.type === 'subagent_tool_call' && block.toolCall?.id) {
|
||||
extractToolCallsRecursively(block.toolCall, map)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (Array.isArray(toolCall.subAgentToolCalls)) {
|
||||
for (const subTc of toolCall.subAgentToolCalls) {
|
||||
extractToolCallsRecursively(subTc, map)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function buildToolCallsById(messages: CopilotMessage[]): Record<string, CopilotToolCall> {
|
||||
const toolCallsById: Record<string, CopilotToolCall> = {}
|
||||
for (const msg of messages) {
|
||||
if (msg.contentBlocks) {
|
||||
for (const block of msg.contentBlocks) {
|
||||
if (block?.type === 'tool_call' && block.toolCall?.id) {
|
||||
extractToolCallsRecursively(block.toolCall, toolCallsById)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return toolCallsById
|
||||
}
|
||||
28
apps/sim/lib/copilot/messages/credential-masking.ts
Normal file
28
apps/sim/lib/copilot/messages/credential-masking.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
export function maskCredentialIdsInValue<T>(value: T, credentialIds: Set<string>): T {
|
||||
if (!value || credentialIds.size === 0) return value
|
||||
|
||||
if (typeof value === 'string') {
|
||||
let masked = value as string
|
||||
const sortedIds = Array.from(credentialIds).sort((a, b) => b.length - a.length)
|
||||
for (const id of sortedIds) {
|
||||
if (id && masked.includes(id)) {
|
||||
masked = masked.split(id).join('••••••••')
|
||||
}
|
||||
}
|
||||
return masked as unknown as T
|
||||
}
|
||||
|
||||
if (Array.isArray(value)) {
|
||||
return value.map((item) => maskCredentialIdsInValue(item, credentialIds)) as T
|
||||
}
|
||||
|
||||
if (typeof value === 'object') {
|
||||
const masked: Record<string, unknown> = {}
|
||||
for (const key of Object.keys(value as Record<string, unknown>)) {
|
||||
masked[key] = maskCredentialIdsInValue((value as Record<string, unknown>)[key], credentialIds)
|
||||
}
|
||||
return masked as T
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
4
apps/sim/lib/copilot/messages/index.ts
Normal file
4
apps/sim/lib/copilot/messages/index.ts
Normal file
@@ -0,0 +1,4 @@
|
||||
export * from './checkpoints'
|
||||
export * from './credential-masking'
|
||||
export * from './persist'
|
||||
export * from './serialization'
|
||||
43
apps/sim/lib/copilot/messages/persist.ts
Normal file
43
apps/sim/lib/copilot/messages/persist.ts
Normal file
@@ -0,0 +1,43 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { COPILOT_UPDATE_MESSAGES_API_PATH } from '@/lib/copilot/constants'
|
||||
import type { CopilotMessage } from '@/stores/panel/copilot/types'
|
||||
import { serializeMessagesForDB } from './serialization'
|
||||
|
||||
const logger = createLogger('CopilotMessagePersistence')
|
||||
|
||||
export async function persistMessages(params: {
|
||||
chatId: string
|
||||
messages: CopilotMessage[]
|
||||
sensitiveCredentialIds?: Set<string>
|
||||
planArtifact?: string | null
|
||||
mode?: string
|
||||
model?: string
|
||||
conversationId?: string
|
||||
}): Promise<boolean> {
|
||||
try {
|
||||
const dbMessages = serializeMessagesForDB(
|
||||
params.messages,
|
||||
params.sensitiveCredentialIds ?? new Set<string>()
|
||||
)
|
||||
const response = await fetch(COPILOT_UPDATE_MESSAGES_API_PATH, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
chatId: params.chatId,
|
||||
messages: dbMessages,
|
||||
...(params.planArtifact !== undefined ? { planArtifact: params.planArtifact } : {}),
|
||||
...(params.mode || params.model
|
||||
? { config: { mode: params.mode, model: params.model } }
|
||||
: {}),
|
||||
...(params.conversationId ? { conversationId: params.conversationId } : {}),
|
||||
}),
|
||||
})
|
||||
return response.ok
|
||||
} catch (error) {
|
||||
logger.warn('Failed to persist messages', {
|
||||
chatId: params.chatId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return false
|
||||
}
|
||||
}
|
||||
172
apps/sim/lib/copilot/messages/serialization.ts
Normal file
172
apps/sim/lib/copilot/messages/serialization.ts
Normal file
@@ -0,0 +1,172 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import type { CopilotMessage, CopilotToolCall } from '@/stores/panel/copilot/types'
|
||||
import { maskCredentialIdsInValue } from './credential-masking'
|
||||
|
||||
const logger = createLogger('CopilotMessageSerialization')
|
||||
|
||||
export function clearStreamingFlags(toolCall: CopilotToolCall): void {
|
||||
if (!toolCall) return
|
||||
|
||||
toolCall.subAgentStreaming = false
|
||||
|
||||
if (Array.isArray(toolCall.subAgentBlocks)) {
|
||||
for (const block of toolCall.subAgentBlocks) {
|
||||
if (block?.type === 'subagent_tool_call' && block.toolCall) {
|
||||
clearStreamingFlags(block.toolCall)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (Array.isArray(toolCall.subAgentToolCalls)) {
|
||||
for (const subTc of toolCall.subAgentToolCalls) {
|
||||
clearStreamingFlags(subTc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function normalizeMessagesForUI(messages: CopilotMessage[]): CopilotMessage[] {
|
||||
try {
|
||||
for (const message of messages) {
|
||||
if (message.role === 'assistant') {
|
||||
logger.debug('[normalizeMessagesForUI] Loading assistant message', {
|
||||
id: message.id,
|
||||
hasContent: !!message.content?.trim(),
|
||||
contentBlockCount: message.contentBlocks?.length || 0,
|
||||
contentBlockTypes: message.contentBlocks?.map((b) => b?.type) ?? [],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for (const message of messages) {
|
||||
if (message.contentBlocks) {
|
||||
for (const block of message.contentBlocks) {
|
||||
if (block?.type === 'tool_call' && block.toolCall) {
|
||||
clearStreamingFlags(block.toolCall)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (message.toolCalls) {
|
||||
for (const toolCall of message.toolCalls) {
|
||||
clearStreamingFlags(toolCall)
|
||||
}
|
||||
}
|
||||
}
|
||||
return messages
|
||||
} catch (error) {
|
||||
logger.warn('[normalizeMessagesForUI] Failed to normalize messages', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return messages
|
||||
}
|
||||
}
|
||||
|
||||
export function deepClone<T>(obj: T): T {
|
||||
try {
|
||||
const json = JSON.stringify(obj)
|
||||
if (!json || json === 'undefined') {
|
||||
logger.warn('[deepClone] JSON.stringify returned empty for object', {
|
||||
type: typeof obj,
|
||||
isArray: Array.isArray(obj),
|
||||
length: Array.isArray(obj) ? obj.length : undefined,
|
||||
})
|
||||
return obj
|
||||
}
|
||||
const parsed = JSON.parse(json)
|
||||
if (Array.isArray(obj) && (!Array.isArray(parsed) || parsed.length !== obj.length)) {
|
||||
logger.warn('[deepClone] Array clone mismatch', {
|
||||
originalLength: obj.length,
|
||||
clonedLength: Array.isArray(parsed) ? parsed.length : 'not array',
|
||||
})
|
||||
}
|
||||
return parsed
|
||||
} catch (err) {
|
||||
logger.error('[deepClone] Failed to clone object', {
|
||||
error: String(err),
|
||||
type: typeof obj,
|
||||
isArray: Array.isArray(obj),
|
||||
})
|
||||
return obj
|
||||
}
|
||||
}
|
||||
|
||||
export function serializeMessagesForDB(
|
||||
messages: CopilotMessage[],
|
||||
credentialIds: Set<string>
|
||||
): CopilotMessage[] {
|
||||
const result = messages
|
||||
.map((msg) => {
|
||||
let timestamp: string = msg.timestamp
|
||||
if (typeof timestamp !== 'string') {
|
||||
const ts = timestamp as unknown
|
||||
timestamp = ts instanceof Date ? ts.toISOString() : new Date().toISOString()
|
||||
}
|
||||
|
||||
const serialized: CopilotMessage = {
|
||||
id: msg.id,
|
||||
role: msg.role,
|
||||
content: msg.content || '',
|
||||
timestamp,
|
||||
}
|
||||
|
||||
if (Array.isArray(msg.contentBlocks) && msg.contentBlocks.length > 0) {
|
||||
serialized.contentBlocks = deepClone(msg.contentBlocks)
|
||||
}
|
||||
|
||||
if (Array.isArray(msg.toolCalls) && msg.toolCalls.length > 0) {
|
||||
serialized.toolCalls = deepClone(msg.toolCalls)
|
||||
}
|
||||
|
||||
if (Array.isArray(msg.fileAttachments) && msg.fileAttachments.length > 0) {
|
||||
serialized.fileAttachments = deepClone(msg.fileAttachments)
|
||||
}
|
||||
|
||||
if (Array.isArray(msg.contexts) && msg.contexts.length > 0) {
|
||||
serialized.contexts = deepClone(msg.contexts)
|
||||
}
|
||||
|
||||
if (Array.isArray(msg.citations) && msg.citations.length > 0) {
|
||||
serialized.citations = deepClone(msg.citations)
|
||||
}
|
||||
|
||||
if (msg.errorType) {
|
||||
serialized.errorType = msg.errorType
|
||||
}
|
||||
|
||||
return maskCredentialIdsInValue(serialized, credentialIds)
|
||||
})
|
||||
.filter((msg) => {
|
||||
if (msg.role === 'assistant') {
|
||||
const hasContent = typeof msg.content === 'string' && msg.content.trim().length > 0
|
||||
const hasTools = Array.isArray(msg.toolCalls) && msg.toolCalls.length > 0
|
||||
const hasBlocks = Array.isArray(msg.contentBlocks) && msg.contentBlocks.length > 0
|
||||
return hasContent || hasTools || hasBlocks
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
for (const msg of messages) {
|
||||
if (msg.role === 'assistant') {
|
||||
logger.debug('[serializeMessagesForDB] Input assistant message', {
|
||||
id: msg.id,
|
||||
hasContent: !!msg.content?.trim(),
|
||||
contentBlockCount: msg.contentBlocks?.length || 0,
|
||||
contentBlockTypes: msg.contentBlocks?.map((b) => b?.type) ?? [],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug('[serializeMessagesForDB] Serialized messages', {
|
||||
inputCount: messages.length,
|
||||
outputCount: result.length,
|
||||
sample:
|
||||
result.length > 0
|
||||
? {
|
||||
role: result[result.length - 1].role,
|
||||
hasContent: !!result[result.length - 1].content,
|
||||
contentBlockCount: result[result.length - 1].contentBlocks?.length || 0,
|
||||
toolCallCount: result[result.length - 1].toolCalls?.length || 0,
|
||||
}
|
||||
: null,
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
@@ -18,6 +18,7 @@ export const COPILOT_MODEL_IDS = [
|
||||
'claude-4-sonnet',
|
||||
'claude-4.5-haiku',
|
||||
'claude-4.5-sonnet',
|
||||
'claude-4.6-opus',
|
||||
'claude-4.5-opus',
|
||||
'claude-4.1-opus',
|
||||
'gemini-3-pro',
|
||||
|
||||
67
apps/sim/lib/copilot/orchestrator/config.ts
Normal file
67
apps/sim/lib/copilot/orchestrator/config.ts
Normal file
@@ -0,0 +1,67 @@
|
||||
export const INTERRUPT_TOOL_NAMES = [
|
||||
'set_global_workflow_variables',
|
||||
'run_workflow',
|
||||
'run_workflow_until_block',
|
||||
'run_from_block',
|
||||
'run_block',
|
||||
'manage_mcp_tool',
|
||||
'manage_custom_tool',
|
||||
'deploy_mcp',
|
||||
'deploy_chat',
|
||||
'deploy_api',
|
||||
'create_workspace_mcp_server',
|
||||
'set_environment_variables',
|
||||
'make_api_request',
|
||||
'oauth_request_access',
|
||||
'navigate_ui',
|
||||
'knowledge_base',
|
||||
'generate_api_key',
|
||||
] as const
|
||||
|
||||
export const INTERRUPT_TOOL_SET = new Set<string>(INTERRUPT_TOOL_NAMES)
|
||||
|
||||
export const SUBAGENT_TOOL_NAMES = [
|
||||
'debug',
|
||||
'edit',
|
||||
'build',
|
||||
'plan',
|
||||
'test',
|
||||
'deploy',
|
||||
'auth',
|
||||
'research',
|
||||
'knowledge',
|
||||
'custom_tool',
|
||||
'tour',
|
||||
'info',
|
||||
'workflow',
|
||||
'evaluate',
|
||||
'superagent',
|
||||
'discovery',
|
||||
] as const
|
||||
|
||||
export const SUBAGENT_TOOL_SET = new Set<string>(SUBAGENT_TOOL_NAMES)
|
||||
|
||||
/**
|
||||
* Respond tools are internal to the copilot's subagent system.
|
||||
* They're used by subagents to signal completion and should NOT be executed by the sim side.
|
||||
* The copilot backend handles these internally.
|
||||
*/
|
||||
export const RESPOND_TOOL_NAMES = [
|
||||
'plan_respond',
|
||||
'edit_respond',
|
||||
'build_respond',
|
||||
'debug_respond',
|
||||
'info_respond',
|
||||
'research_respond',
|
||||
'deploy_respond',
|
||||
'superagent_respond',
|
||||
'discovery_respond',
|
||||
'tour_respond',
|
||||
'auth_respond',
|
||||
'workflow_respond',
|
||||
'knowledge_respond',
|
||||
'custom_tool_respond',
|
||||
'test_respond',
|
||||
] as const
|
||||
|
||||
export const RESPOND_TOOL_SET = new Set<string>(RESPOND_TOOL_NAMES)
|
||||
70
apps/sim/lib/copilot/orchestrator/index.ts
Normal file
70
apps/sim/lib/copilot/orchestrator/index.ts
Normal file
@@ -0,0 +1,70 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
|
||||
import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor'
|
||||
import type { OrchestratorOptions, OrchestratorResult } from '@/lib/copilot/orchestrator/types'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import { buildToolCallSummaries, createStreamingContext, runStreamLoop } from './stream-core'
|
||||
|
||||
const logger = createLogger('CopilotOrchestrator')
|
||||
|
||||
export interface OrchestrateStreamOptions extends OrchestratorOptions {
|
||||
userId: string
|
||||
workflowId: string
|
||||
chatId?: string
|
||||
}
|
||||
|
||||
export async function orchestrateCopilotStream(
|
||||
requestPayload: Record<string, unknown>,
|
||||
options: OrchestrateStreamOptions
|
||||
): Promise<OrchestratorResult> {
|
||||
const { userId, workflowId, chatId } = options
|
||||
const execContext = await prepareExecutionContext(userId, workflowId)
|
||||
|
||||
const payloadMsgId = requestPayload?.messageId
|
||||
const context = createStreamingContext({
|
||||
chatId,
|
||||
messageId: typeof payloadMsgId === 'string' ? payloadMsgId : crypto.randomUUID(),
|
||||
})
|
||||
|
||||
try {
|
||||
await runStreamLoop(
|
||||
`${SIM_AGENT_API_URL}/api/chat-completion-streaming`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}),
|
||||
},
|
||||
body: JSON.stringify(requestPayload),
|
||||
},
|
||||
context,
|
||||
execContext,
|
||||
options
|
||||
)
|
||||
|
||||
const result: OrchestratorResult = {
|
||||
success: context.errors.length === 0,
|
||||
content: context.accumulatedContent,
|
||||
contentBlocks: context.contentBlocks,
|
||||
toolCalls: buildToolCallSummaries(context),
|
||||
chatId: context.chatId,
|
||||
conversationId: context.conversationId,
|
||||
errors: context.errors.length ? context.errors : undefined,
|
||||
}
|
||||
await options.onComplete?.(result)
|
||||
return result
|
||||
} catch (error) {
|
||||
const err = error instanceof Error ? error : new Error('Copilot orchestration failed')
|
||||
logger.error('Copilot orchestration failed', { error: err.message })
|
||||
await options.onError?.(err)
|
||||
return {
|
||||
success: false,
|
||||
content: '',
|
||||
contentBlocks: [],
|
||||
toolCalls: [],
|
||||
chatId: context.chatId,
|
||||
conversationId: context.conversationId,
|
||||
error: err.message,
|
||||
}
|
||||
}
|
||||
}
|
||||
29
apps/sim/lib/copilot/orchestrator/persistence.ts
Normal file
29
apps/sim/lib/copilot/orchestrator/persistence.ts
Normal file
@@ -0,0 +1,29 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { REDIS_TOOL_CALL_PREFIX } from '@/lib/copilot/constants'
|
||||
import { getRedisClient } from '@/lib/core/config/redis'
|
||||
|
||||
const logger = createLogger('CopilotOrchestratorPersistence')
|
||||
|
||||
/**
|
||||
* Get a tool call confirmation status from Redis.
|
||||
*/
|
||||
export async function getToolConfirmation(toolCallId: string): Promise<{
|
||||
status: string
|
||||
message?: string
|
||||
timestamp?: string
|
||||
} | null> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) return null
|
||||
|
||||
try {
|
||||
const data = await redis.get(`${REDIS_TOOL_CALL_PREFIX}${toolCallId}`)
|
||||
if (!data) return null
|
||||
return JSON.parse(data) as { status: string; message?: string; timestamp?: string }
|
||||
} catch (error) {
|
||||
logger.error('Failed to read tool confirmation', {
|
||||
toolCallId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return null
|
||||
}
|
||||
}
|
||||
95
apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts
Normal file
95
apps/sim/lib/copilot/orchestrator/sse-handlers.test.ts
Normal file
@@ -0,0 +1,95 @@
|
||||
/**
|
||||
* @vitest-environment node
|
||||
*/
|
||||
|
||||
import { loggerMock } from '@sim/testing'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
vi.mock('@sim/logger', () => loggerMock)
|
||||
|
||||
const executeToolServerSide = vi.fn()
|
||||
const markToolComplete = vi.fn()
|
||||
|
||||
vi.mock('@/lib/copilot/orchestrator/tool-executor', () => ({
|
||||
executeToolServerSide,
|
||||
markToolComplete,
|
||||
}))
|
||||
|
||||
import { sseHandlers } from '@/lib/copilot/orchestrator/sse-handlers'
|
||||
import type { ExecutionContext, StreamingContext } from '@/lib/copilot/orchestrator/types'
|
||||
|
||||
describe('sse-handlers tool lifecycle', () => {
|
||||
let context: StreamingContext
|
||||
let execContext: ExecutionContext
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
context = {
|
||||
chatId: undefined,
|
||||
conversationId: undefined,
|
||||
messageId: 'msg-1',
|
||||
accumulatedContent: '',
|
||||
contentBlocks: [],
|
||||
toolCalls: new Map(),
|
||||
currentThinkingBlock: null,
|
||||
isInThinkingBlock: false,
|
||||
subAgentParentToolCallId: undefined,
|
||||
subAgentContent: {},
|
||||
subAgentToolCalls: {},
|
||||
pendingContent: '',
|
||||
streamComplete: false,
|
||||
wasAborted: false,
|
||||
errors: [],
|
||||
}
|
||||
execContext = {
|
||||
userId: 'user-1',
|
||||
workflowId: 'workflow-1',
|
||||
}
|
||||
})
|
||||
|
||||
it('executes tool_call and emits tool_result + mark-complete', async () => {
|
||||
executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } })
|
||||
markToolComplete.mockResolvedValueOnce(true)
|
||||
const onEvent = vi.fn()
|
||||
|
||||
await sseHandlers.tool_call(
|
||||
{
|
||||
type: 'tool_call',
|
||||
data: { id: 'tool-1', name: 'get_user_workflow', arguments: { workflowId: 'workflow-1' } },
|
||||
} as any,
|
||||
context,
|
||||
execContext,
|
||||
{ onEvent, interactive: false, timeout: 1000 }
|
||||
)
|
||||
|
||||
expect(executeToolServerSide).toHaveBeenCalledTimes(1)
|
||||
expect(markToolComplete).toHaveBeenCalledTimes(1)
|
||||
expect(onEvent).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
type: 'tool_result',
|
||||
toolCallId: 'tool-1',
|
||||
success: true,
|
||||
})
|
||||
)
|
||||
|
||||
const updated = context.toolCalls.get('tool-1')
|
||||
expect(updated?.status).toBe('success')
|
||||
expect(updated?.result?.output).toEqual({ ok: true })
|
||||
})
|
||||
|
||||
it('skips duplicate tool_call after result', async () => {
|
||||
executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } })
|
||||
markToolComplete.mockResolvedValueOnce(true)
|
||||
|
||||
const event = {
|
||||
type: 'tool_call',
|
||||
data: { id: 'tool-dup', name: 'get_user_workflow', arguments: { workflowId: 'workflow-1' } },
|
||||
}
|
||||
|
||||
await sseHandlers.tool_call(event as any, context, execContext, { interactive: false })
|
||||
await sseHandlers.tool_call(event as any, context, execContext, { interactive: false })
|
||||
|
||||
expect(executeToolServerSide).toHaveBeenCalledTimes(1)
|
||||
expect(markToolComplete).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
})
|
||||
419
apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts
Normal file
419
apps/sim/lib/copilot/orchestrator/sse-handlers/handlers.ts
Normal file
@@ -0,0 +1,419 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { STREAM_TIMEOUT_MS } from '@/lib/copilot/constants'
|
||||
import { RESPOND_TOOL_SET, SUBAGENT_TOOL_SET } from '@/lib/copilot/orchestrator/config'
|
||||
import {
|
||||
asRecord,
|
||||
getEventData,
|
||||
markToolResultSeen,
|
||||
wasToolResultSeen,
|
||||
} from '@/lib/copilot/orchestrator/sse-utils'
|
||||
import { markToolComplete } from '@/lib/copilot/orchestrator/tool-executor'
|
||||
import type {
|
||||
ContentBlock,
|
||||
ExecutionContext,
|
||||
OrchestratorOptions,
|
||||
SSEEvent,
|
||||
StreamingContext,
|
||||
ToolCallState,
|
||||
} from '@/lib/copilot/orchestrator/types'
|
||||
import { executeToolAndReport, isInterruptToolName, waitForToolDecision } from './tool-execution'
|
||||
|
||||
const logger = createLogger('CopilotSseHandlers')
|
||||
|
||||
// Normalization + dedupe helpers live in sse-utils to keep server/client in sync.
|
||||
|
||||
function inferToolSuccess(data: Record<string, unknown> | undefined): {
|
||||
success: boolean
|
||||
hasResultData: boolean
|
||||
hasError: boolean
|
||||
} {
|
||||
const resultObj = asRecord(data?.result)
|
||||
const hasExplicitSuccess = data?.success !== undefined || resultObj.success !== undefined
|
||||
const explicitSuccess = data?.success ?? resultObj.success
|
||||
const hasResultData = data?.result !== undefined || data?.data !== undefined
|
||||
const hasError = !!data?.error || !!resultObj.error
|
||||
const success = hasExplicitSuccess ? !!explicitSuccess : hasResultData && !hasError
|
||||
return { success, hasResultData, hasError }
|
||||
}
|
||||
|
||||
export type SSEHandler = (
|
||||
event: SSEEvent,
|
||||
context: StreamingContext,
|
||||
execContext: ExecutionContext,
|
||||
options: OrchestratorOptions
|
||||
) => void | Promise<void>
|
||||
|
||||
function addContentBlock(context: StreamingContext, block: Omit<ContentBlock, 'timestamp'>): void {
|
||||
context.contentBlocks.push({
|
||||
...block,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
}
|
||||
|
||||
export const sseHandlers: Record<string, SSEHandler> = {
|
||||
chat_id: (event, context) => {
|
||||
context.chatId = asRecord(event.data).chatId as string | undefined
|
||||
},
|
||||
title_updated: () => {},
|
||||
tool_result: (event, context) => {
|
||||
const data = getEventData(event)
|
||||
const toolCallId = event.toolCallId || (data?.id as string | undefined)
|
||||
if (!toolCallId) return
|
||||
const current = context.toolCalls.get(toolCallId)
|
||||
if (!current) return
|
||||
|
||||
const { success, hasResultData, hasError } = inferToolSuccess(data)
|
||||
|
||||
current.status = success ? 'success' : 'error'
|
||||
current.endTime = Date.now()
|
||||
if (hasResultData) {
|
||||
current.result = {
|
||||
success,
|
||||
output: data?.result || data?.data,
|
||||
}
|
||||
}
|
||||
if (hasError) {
|
||||
const resultObj = asRecord(data?.result)
|
||||
current.error = (data?.error || resultObj.error) as string | undefined
|
||||
}
|
||||
},
|
||||
tool_error: (event, context) => {
|
||||
const data = getEventData(event)
|
||||
const toolCallId = event.toolCallId || (data?.id as string | undefined)
|
||||
if (!toolCallId) return
|
||||
const current = context.toolCalls.get(toolCallId)
|
||||
if (!current) return
|
||||
current.status = 'error'
|
||||
current.error = (data?.error as string | undefined) || 'Tool execution failed'
|
||||
current.endTime = Date.now()
|
||||
},
|
||||
tool_generating: (event, context) => {
|
||||
const data = getEventData(event)
|
||||
const toolCallId =
|
||||
event.toolCallId ||
|
||||
(data?.toolCallId as string | undefined) ||
|
||||
(data?.id as string | undefined)
|
||||
const toolName =
|
||||
event.toolName || (data?.toolName as string | undefined) || (data?.name as string | undefined)
|
||||
if (!toolCallId || !toolName) return
|
||||
if (!context.toolCalls.has(toolCallId)) {
|
||||
context.toolCalls.set(toolCallId, {
|
||||
id: toolCallId,
|
||||
name: toolName,
|
||||
status: 'pending',
|
||||
startTime: Date.now(),
|
||||
})
|
||||
}
|
||||
},
|
||||
tool_call: async (event, context, execContext, options) => {
|
||||
const toolData = getEventData(event) || ({} as Record<string, unknown>)
|
||||
const toolCallId = (toolData.id as string | undefined) || event.toolCallId
|
||||
const toolName = (toolData.name as string | undefined) || event.toolName
|
||||
if (!toolCallId || !toolName) return
|
||||
|
||||
const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as
|
||||
| Record<string, unknown>
|
||||
| undefined
|
||||
const isPartial = toolData.partial === true
|
||||
const existing = context.toolCalls.get(toolCallId)
|
||||
|
||||
// If we've already completed this tool call, ignore late/duplicate tool_call events
|
||||
// to avoid resetting UI/state back to pending and re-executing.
|
||||
if (
|
||||
existing?.endTime ||
|
||||
(existing && existing.status !== 'pending' && existing.status !== 'executing')
|
||||
) {
|
||||
if (!existing.params && args) {
|
||||
existing.params = args
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if (existing) {
|
||||
if (args && !existing.params) existing.params = args
|
||||
} else {
|
||||
context.toolCalls.set(toolCallId, {
|
||||
id: toolCallId,
|
||||
name: toolName,
|
||||
status: 'pending',
|
||||
params: args,
|
||||
startTime: Date.now(),
|
||||
})
|
||||
const created = context.toolCalls.get(toolCallId)!
|
||||
addContentBlock(context, { type: 'tool_call', toolCall: created })
|
||||
}
|
||||
|
||||
if (isPartial) return
|
||||
if (wasToolResultSeen(toolCallId)) return
|
||||
|
||||
const toolCall = context.toolCalls.get(toolCallId)
|
||||
if (!toolCall) return
|
||||
|
||||
// Subagent tools are executed by the copilot backend, not sim side.
|
||||
if (SUBAGENT_TOOL_SET.has(toolName)) {
|
||||
return
|
||||
}
|
||||
|
||||
// Respond tools are internal to copilot's subagent system - skip execution.
|
||||
// The copilot backend handles these internally to signal subagent completion.
|
||||
if (RESPOND_TOOL_SET.has(toolName)) {
|
||||
toolCall.status = 'success'
|
||||
toolCall.endTime = Date.now()
|
||||
toolCall.result = {
|
||||
success: true,
|
||||
output: 'Internal respond tool - handled by copilot backend',
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const isInterruptTool = isInterruptToolName(toolName)
|
||||
const isInteractive = options.interactive === true
|
||||
|
||||
if (isInterruptTool && isInteractive) {
|
||||
const decision = await waitForToolDecision(
|
||||
toolCallId,
|
||||
options.timeout || STREAM_TIMEOUT_MS,
|
||||
options.abortSignal
|
||||
)
|
||||
if (decision?.status === 'accepted' || decision?.status === 'success') {
|
||||
await executeToolAndReport(toolCallId, context, execContext, options)
|
||||
return
|
||||
}
|
||||
|
||||
if (decision?.status === 'rejected' || decision?.status === 'error') {
|
||||
toolCall.status = 'rejected'
|
||||
toolCall.endTime = Date.now()
|
||||
await markToolComplete(
|
||||
toolCall.id,
|
||||
toolCall.name,
|
||||
400,
|
||||
decision.message || 'Tool execution rejected',
|
||||
{ skipped: true, reason: 'user_rejected' }
|
||||
)
|
||||
markToolResultSeen(toolCall.id)
|
||||
await options.onEvent?.({
|
||||
type: 'tool_result',
|
||||
toolCallId: toolCall.id,
|
||||
data: {
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
success: false,
|
||||
result: { skipped: true, reason: 'user_rejected' },
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if (decision?.status === 'background') {
|
||||
toolCall.status = 'skipped'
|
||||
toolCall.endTime = Date.now()
|
||||
await markToolComplete(
|
||||
toolCall.id,
|
||||
toolCall.name,
|
||||
202,
|
||||
decision.message || 'Tool execution moved to background',
|
||||
{ background: true }
|
||||
)
|
||||
markToolResultSeen(toolCall.id)
|
||||
await options.onEvent?.({
|
||||
type: 'tool_result',
|
||||
toolCallId: toolCall.id,
|
||||
data: {
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
success: true,
|
||||
result: { background: true },
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if (options.autoExecuteTools !== false) {
|
||||
await executeToolAndReport(toolCallId, context, execContext, options)
|
||||
}
|
||||
},
|
||||
reasoning: (event, context) => {
|
||||
const d = asRecord(event.data)
|
||||
const phase = d.phase || asRecord(d.data).phase
|
||||
if (phase === 'start') {
|
||||
context.isInThinkingBlock = true
|
||||
context.currentThinkingBlock = {
|
||||
type: 'thinking',
|
||||
content: '',
|
||||
timestamp: Date.now(),
|
||||
}
|
||||
return
|
||||
}
|
||||
if (phase === 'end') {
|
||||
if (context.currentThinkingBlock) {
|
||||
context.contentBlocks.push(context.currentThinkingBlock)
|
||||
}
|
||||
context.isInThinkingBlock = false
|
||||
context.currentThinkingBlock = null
|
||||
return
|
||||
}
|
||||
const chunk = (d.data || d.content || event.content) as string | undefined
|
||||
if (!chunk || !context.currentThinkingBlock) return
|
||||
context.currentThinkingBlock.content = `${context.currentThinkingBlock.content || ''}${chunk}`
|
||||
},
|
||||
content: (event, context) => {
|
||||
// Go backend sends content as a plain string in event.data, not wrapped in an object.
|
||||
let chunk: string | undefined
|
||||
if (typeof event.data === 'string') {
|
||||
chunk = event.data
|
||||
} else {
|
||||
const d = asRecord(event.data)
|
||||
chunk = (d.content || d.data || event.content) as string | undefined
|
||||
}
|
||||
if (!chunk) return
|
||||
context.accumulatedContent += chunk
|
||||
addContentBlock(context, { type: 'text', content: chunk })
|
||||
},
|
||||
done: (event, context) => {
|
||||
const d = asRecord(event.data)
|
||||
if (d.responseId) {
|
||||
context.conversationId = d.responseId as string
|
||||
}
|
||||
context.streamComplete = true
|
||||
},
|
||||
start: (event, context) => {
|
||||
const d = asRecord(event.data)
|
||||
if (d.responseId) {
|
||||
context.conversationId = d.responseId as string
|
||||
}
|
||||
},
|
||||
error: (event, context) => {
|
||||
const d = asRecord(event.data)
|
||||
const message = (d.message || d.error || event.error) as string | undefined
|
||||
if (message) {
|
||||
context.errors.push(message)
|
||||
}
|
||||
context.streamComplete = true
|
||||
},
|
||||
}
|
||||
|
||||
export const subAgentHandlers: Record<string, SSEHandler> = {
|
||||
content: (event, context) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (!parentToolCallId || !event.data) return
|
||||
// Go backend sends content as a plain string in event.data
|
||||
let chunk: string | undefined
|
||||
if (typeof event.data === 'string') {
|
||||
chunk = event.data
|
||||
} else {
|
||||
const d = asRecord(event.data)
|
||||
chunk = (d.content || d.data || event.content) as string | undefined
|
||||
}
|
||||
if (!chunk) return
|
||||
context.subAgentContent[parentToolCallId] =
|
||||
(context.subAgentContent[parentToolCallId] || '') + chunk
|
||||
addContentBlock(context, { type: 'subagent_text', content: chunk })
|
||||
},
|
||||
tool_call: async (event, context, execContext, options) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (!parentToolCallId) return
|
||||
const toolData = getEventData(event) || ({} as Record<string, unknown>)
|
||||
const toolCallId = (toolData.id as string | undefined) || event.toolCallId
|
||||
const toolName = (toolData.name as string | undefined) || event.toolName
|
||||
if (!toolCallId || !toolName) return
|
||||
const isPartial = toolData.partial === true
|
||||
const args = (toolData.arguments || toolData.input || asRecord(event.data).input) as
|
||||
| Record<string, unknown>
|
||||
| undefined
|
||||
|
||||
const existing = context.toolCalls.get(toolCallId)
|
||||
// Ignore late/duplicate tool_call events once we already have a result.
|
||||
if (wasToolResultSeen(toolCallId) || existing?.endTime) {
|
||||
return
|
||||
}
|
||||
|
||||
const toolCall: ToolCallState = {
|
||||
id: toolCallId,
|
||||
name: toolName,
|
||||
status: 'pending',
|
||||
params: args,
|
||||
startTime: Date.now(),
|
||||
}
|
||||
|
||||
// Store in both places - but do NOT overwrite existing tool call state for the same id.
|
||||
if (!context.subAgentToolCalls[parentToolCallId]) {
|
||||
context.subAgentToolCalls[parentToolCallId] = []
|
||||
}
|
||||
if (!context.subAgentToolCalls[parentToolCallId].some((tc) => tc.id === toolCallId)) {
|
||||
context.subAgentToolCalls[parentToolCallId].push(toolCall)
|
||||
}
|
||||
if (!context.toolCalls.has(toolCallId)) {
|
||||
context.toolCalls.set(toolCallId, toolCall)
|
||||
}
|
||||
|
||||
if (isPartial) return
|
||||
|
||||
// Respond tools are internal to copilot's subagent system - skip execution.
|
||||
if (RESPOND_TOOL_SET.has(toolName)) {
|
||||
toolCall.status = 'success'
|
||||
toolCall.endTime = Date.now()
|
||||
toolCall.result = {
|
||||
success: true,
|
||||
output: 'Internal respond tool - handled by copilot backend',
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if (options.autoExecuteTools !== false) {
|
||||
await executeToolAndReport(toolCallId, context, execContext, options)
|
||||
}
|
||||
},
|
||||
tool_result: (event, context) => {
|
||||
const parentToolCallId = context.subAgentParentToolCallId
|
||||
if (!parentToolCallId) return
|
||||
const data = getEventData(event)
|
||||
const toolCallId = event.toolCallId || (data?.id as string | undefined)
|
||||
if (!toolCallId) return
|
||||
|
||||
// Update in subAgentToolCalls.
|
||||
const toolCalls = context.subAgentToolCalls[parentToolCallId] || []
|
||||
const subAgentToolCall = toolCalls.find((tc) => tc.id === toolCallId)
|
||||
|
||||
// Also update in main toolCalls (where we added it for execution).
|
||||
const mainToolCall = context.toolCalls.get(toolCallId)
|
||||
|
||||
const { success, hasResultData, hasError } = inferToolSuccess(data)
|
||||
|
||||
const status = success ? 'success' : 'error'
|
||||
const endTime = Date.now()
|
||||
const result = hasResultData ? { success, output: data?.result || data?.data } : undefined
|
||||
|
||||
if (subAgentToolCall) {
|
||||
subAgentToolCall.status = status
|
||||
subAgentToolCall.endTime = endTime
|
||||
if (result) subAgentToolCall.result = result
|
||||
if (hasError) {
|
||||
const resultObj = asRecord(data?.result)
|
||||
subAgentToolCall.error = (data?.error || resultObj.error) as string | undefined
|
||||
}
|
||||
}
|
||||
|
||||
if (mainToolCall) {
|
||||
mainToolCall.status = status
|
||||
mainToolCall.endTime = endTime
|
||||
if (result) mainToolCall.result = result
|
||||
if (hasError) {
|
||||
const resultObj = asRecord(data?.result)
|
||||
mainToolCall.error = (data?.error || resultObj.error) as string | undefined
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
export function handleSubagentRouting(event: SSEEvent, context: StreamingContext): boolean {
|
||||
if (!event.subagent) return false
|
||||
if (!context.subAgentParentToolCallId) {
|
||||
logger.warn('Subagent event missing parent tool call', {
|
||||
type: event.type,
|
||||
subagent: event.subagent,
|
||||
})
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
2
apps/sim/lib/copilot/orchestrator/sse-handlers/index.ts
Normal file
2
apps/sim/lib/copilot/orchestrator/sse-handlers/index.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
export type { SSEHandler } from './handlers'
|
||||
export { handleSubagentRouting, sseHandlers, subAgentHandlers } from './handlers'
|
||||
127
apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts
Normal file
127
apps/sim/lib/copilot/orchestrator/sse-handlers/tool-execution.ts
Normal file
@@ -0,0 +1,127 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import {
|
||||
TOOL_DECISION_INITIAL_POLL_MS,
|
||||
TOOL_DECISION_MAX_POLL_MS,
|
||||
TOOL_DECISION_POLL_BACKOFF,
|
||||
} from '@/lib/copilot/constants'
|
||||
import { INTERRUPT_TOOL_SET } from '@/lib/copilot/orchestrator/config'
|
||||
import { getToolConfirmation } from '@/lib/copilot/orchestrator/persistence'
|
||||
import {
|
||||
asRecord,
|
||||
markToolResultSeen,
|
||||
wasToolResultSeen,
|
||||
} from '@/lib/copilot/orchestrator/sse-utils'
|
||||
import { executeToolServerSide, markToolComplete } from '@/lib/copilot/orchestrator/tool-executor'
|
||||
import type {
|
||||
ExecutionContext,
|
||||
OrchestratorOptions,
|
||||
SSEEvent,
|
||||
StreamingContext,
|
||||
} from '@/lib/copilot/orchestrator/types'
|
||||
|
||||
const logger = createLogger('CopilotSseToolExecution')
|
||||
|
||||
export function isInterruptToolName(toolName: string): boolean {
|
||||
return INTERRUPT_TOOL_SET.has(toolName)
|
||||
}
|
||||
|
||||
export async function executeToolAndReport(
|
||||
toolCallId: string,
|
||||
context: StreamingContext,
|
||||
execContext: ExecutionContext,
|
||||
options?: OrchestratorOptions
|
||||
): Promise<void> {
|
||||
const toolCall = context.toolCalls.get(toolCallId)
|
||||
if (!toolCall) return
|
||||
|
||||
if (toolCall.status === 'executing') return
|
||||
if (wasToolResultSeen(toolCall.id)) return
|
||||
|
||||
toolCall.status = 'executing'
|
||||
try {
|
||||
const result = await executeToolServerSide(toolCall, execContext)
|
||||
toolCall.status = result.success ? 'success' : 'error'
|
||||
toolCall.result = result
|
||||
toolCall.error = result.error
|
||||
toolCall.endTime = Date.now()
|
||||
|
||||
// If create_workflow was successful, update the execution context with the new workflowId.
|
||||
// This ensures subsequent tools in the same stream have access to the workflowId.
|
||||
const output = asRecord(result.output)
|
||||
if (
|
||||
toolCall.name === 'create_workflow' &&
|
||||
result.success &&
|
||||
output.workflowId &&
|
||||
!execContext.workflowId
|
||||
) {
|
||||
execContext.workflowId = output.workflowId as string
|
||||
if (output.workspaceId) {
|
||||
execContext.workspaceId = output.workspaceId as string
|
||||
}
|
||||
}
|
||||
|
||||
markToolResultSeen(toolCall.id)
|
||||
|
||||
await markToolComplete(
|
||||
toolCall.id,
|
||||
toolCall.name,
|
||||
result.success ? 200 : 500,
|
||||
result.error || (result.success ? 'Tool completed' : 'Tool failed'),
|
||||
result.output
|
||||
)
|
||||
|
||||
const resultEvent: SSEEvent = {
|
||||
type: 'tool_result',
|
||||
toolCallId: toolCall.id,
|
||||
toolName: toolCall.name,
|
||||
success: result.success,
|
||||
result: result.output,
|
||||
data: {
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
success: result.success,
|
||||
result: result.output,
|
||||
},
|
||||
}
|
||||
await options?.onEvent?.(resultEvent)
|
||||
} catch (error) {
|
||||
toolCall.status = 'error'
|
||||
toolCall.error = error instanceof Error ? error.message : String(error)
|
||||
toolCall.endTime = Date.now()
|
||||
|
||||
markToolResultSeen(toolCall.id)
|
||||
|
||||
await markToolComplete(toolCall.id, toolCall.name, 500, toolCall.error)
|
||||
|
||||
const errorEvent: SSEEvent = {
|
||||
type: 'tool_error',
|
||||
toolCallId: toolCall.id,
|
||||
data: {
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
error: toolCall.error,
|
||||
},
|
||||
}
|
||||
await options?.onEvent?.(errorEvent)
|
||||
}
|
||||
}
|
||||
|
||||
export async function waitForToolDecision(
|
||||
toolCallId: string,
|
||||
timeoutMs: number,
|
||||
abortSignal?: AbortSignal
|
||||
): Promise<{ status: string; message?: string } | null> {
|
||||
const start = Date.now()
|
||||
let interval = TOOL_DECISION_INITIAL_POLL_MS
|
||||
const maxInterval = TOOL_DECISION_MAX_POLL_MS
|
||||
while (Date.now() - start < timeoutMs) {
|
||||
if (abortSignal?.aborted) return null
|
||||
const decision = await getToolConfirmation(toolCallId)
|
||||
if (decision?.status) {
|
||||
return decision
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, interval))
|
||||
interval = Math.min(interval * TOOL_DECISION_POLL_BACKOFF, maxInterval)
|
||||
}
|
||||
return null
|
||||
}
|
||||
71
apps/sim/lib/copilot/orchestrator/sse-parser.ts
Normal file
71
apps/sim/lib/copilot/orchestrator/sse-parser.ts
Normal file
@@ -0,0 +1,71 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
|
||||
|
||||
const logger = createLogger('CopilotSseParser')
|
||||
|
||||
/**
|
||||
* Parses SSE streams from the copilot backend into typed events.
|
||||
*/
|
||||
export async function* parseSSEStream(
|
||||
reader: ReadableStreamDefaultReader<Uint8Array>,
|
||||
decoder: TextDecoder,
|
||||
abortSignal?: AbortSignal
|
||||
): AsyncGenerator<SSEEvent> {
|
||||
let buffer = ''
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
if (abortSignal?.aborted) {
|
||||
logger.info('SSE stream aborted by signal')
|
||||
break
|
||||
}
|
||||
|
||||
const { done, value } = await reader.read()
|
||||
if (done) break
|
||||
|
||||
buffer += decoder.decode(value, { stream: true })
|
||||
const lines = buffer.split('\n')
|
||||
buffer = lines.pop() || ''
|
||||
|
||||
for (const line of lines) {
|
||||
if (!line.trim()) continue
|
||||
if (!line.startsWith('data: ')) continue
|
||||
|
||||
const jsonStr = line.slice(6)
|
||||
if (jsonStr === '[DONE]') continue
|
||||
|
||||
try {
|
||||
const event = JSON.parse(jsonStr) as SSEEvent
|
||||
if (event?.type) {
|
||||
yield event
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Failed to parse SSE event', {
|
||||
preview: jsonStr.slice(0, 200),
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (buffer.trim() && buffer.startsWith('data: ')) {
|
||||
try {
|
||||
const event = JSON.parse(buffer.slice(6)) as SSEEvent
|
||||
if (event?.type) {
|
||||
yield event
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Failed to parse final SSE buffer', {
|
||||
preview: buffer.slice(0, 200),
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
try {
|
||||
reader.releaseLock()
|
||||
} catch {
|
||||
logger.warn('Failed to release SSE reader lock')
|
||||
}
|
||||
}
|
||||
}
|
||||
42
apps/sim/lib/copilot/orchestrator/sse-utils.test.ts
Normal file
42
apps/sim/lib/copilot/orchestrator/sse-utils.test.ts
Normal file
@@ -0,0 +1,42 @@
|
||||
/**
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import {
|
||||
normalizeSseEvent,
|
||||
shouldSkipToolCallEvent,
|
||||
shouldSkipToolResultEvent,
|
||||
} from '@/lib/copilot/orchestrator/sse-utils'
|
||||
|
||||
describe('sse-utils', () => {
|
||||
it.concurrent('normalizes tool fields from string data', () => {
|
||||
const event = {
|
||||
type: 'tool_result',
|
||||
data: JSON.stringify({
|
||||
id: 'tool_1',
|
||||
name: 'edit_workflow',
|
||||
success: true,
|
||||
result: { ok: true },
|
||||
}),
|
||||
}
|
||||
|
||||
const normalized = normalizeSseEvent(event as any)
|
||||
|
||||
expect(normalized.toolCallId).toBe('tool_1')
|
||||
expect(normalized.toolName).toBe('edit_workflow')
|
||||
expect(normalized.success).toBe(true)
|
||||
expect(normalized.result).toEqual({ ok: true })
|
||||
})
|
||||
|
||||
it.concurrent('dedupes tool_call events', () => {
|
||||
const event = { type: 'tool_call', data: { id: 'tool_call_1', name: 'plan' } }
|
||||
expect(shouldSkipToolCallEvent(event as any)).toBe(false)
|
||||
expect(shouldSkipToolCallEvent(event as any)).toBe(true)
|
||||
})
|
||||
|
||||
it.concurrent('dedupes tool_result events', () => {
|
||||
const event = { type: 'tool_result', data: { id: 'tool_result_1', name: 'plan' } }
|
||||
expect(shouldSkipToolResultEvent(event as any)).toBe(false)
|
||||
expect(shouldSkipToolResultEvent(event as any)).toBe(true)
|
||||
})
|
||||
})
|
||||
124
apps/sim/lib/copilot/orchestrator/sse-utils.ts
Normal file
124
apps/sim/lib/copilot/orchestrator/sse-utils.ts
Normal file
@@ -0,0 +1,124 @@
|
||||
import { STREAM_BUFFER_MAX_DEDUP_ENTRIES } from '@/lib/copilot/constants'
|
||||
import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
|
||||
|
||||
type EventDataObject = Record<string, unknown> | undefined
|
||||
|
||||
/** Safely cast event.data to a record for property access. */
|
||||
export const asRecord = (data: unknown): Record<string, unknown> =>
|
||||
(data && typeof data === 'object' && !Array.isArray(data) ? data : {}) as Record<string, unknown>
|
||||
|
||||
/**
|
||||
* In-memory tool event dedupe with bounded size.
|
||||
*
|
||||
* NOTE: Process-local only. In a multi-instance setup (e.g., ECS),
|
||||
* each task maintains its own dedupe cache.
|
||||
*/
|
||||
const seenToolCalls = new Set<string>()
|
||||
const seenToolResults = new Set<string>()
|
||||
|
||||
function addToSet(set: Set<string>, id: string): void {
|
||||
if (set.size >= STREAM_BUFFER_MAX_DEDUP_ENTRIES) {
|
||||
const first = set.values().next().value
|
||||
if (first) set.delete(first)
|
||||
}
|
||||
set.add(id)
|
||||
}
|
||||
|
||||
const parseEventData = (data: unknown): EventDataObject => {
|
||||
if (!data) return undefined
|
||||
if (typeof data !== 'string') {
|
||||
return data as EventDataObject
|
||||
}
|
||||
try {
|
||||
return JSON.parse(data) as EventDataObject
|
||||
} catch {
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
const hasToolFields = (data: EventDataObject): boolean => {
|
||||
if (!data) return false
|
||||
return (
|
||||
data.id !== undefined ||
|
||||
data.toolCallId !== undefined ||
|
||||
data.name !== undefined ||
|
||||
data.success !== undefined ||
|
||||
data.result !== undefined ||
|
||||
data.arguments !== undefined
|
||||
)
|
||||
}
|
||||
|
||||
export const getEventData = (event: SSEEvent): EventDataObject => {
|
||||
const topLevel = parseEventData(event.data)
|
||||
if (!topLevel) return undefined
|
||||
if (hasToolFields(topLevel)) return topLevel
|
||||
const nested = parseEventData(topLevel.data)
|
||||
return nested || topLevel
|
||||
}
|
||||
|
||||
function getToolCallIdFromEvent(event: SSEEvent): string | undefined {
|
||||
const data = getEventData(event)
|
||||
return (
|
||||
event.toolCallId || (data?.id as string | undefined) || (data?.toolCallId as string | undefined)
|
||||
)
|
||||
}
|
||||
|
||||
/** Normalizes SSE events so tool metadata is available at the top level. */
|
||||
export function normalizeSseEvent(event: SSEEvent): SSEEvent {
|
||||
if (!event) return event
|
||||
const data = getEventData(event)
|
||||
if (!data) return event
|
||||
const toolCallId =
|
||||
event.toolCallId || (data.id as string | undefined) || (data.toolCallId as string | undefined)
|
||||
const toolName =
|
||||
event.toolName || (data.name as string | undefined) || (data.toolName as string | undefined)
|
||||
const success = event.success ?? (data.success as boolean | undefined)
|
||||
const result = event.result ?? data.result
|
||||
const normalizedData = typeof event.data === 'string' ? data : event.data
|
||||
return {
|
||||
...event,
|
||||
data: normalizedData,
|
||||
toolCallId,
|
||||
toolName,
|
||||
success,
|
||||
result,
|
||||
}
|
||||
}
|
||||
|
||||
function markToolCallSeen(toolCallId: string): void {
|
||||
addToSet(seenToolCalls, toolCallId)
|
||||
}
|
||||
|
||||
function wasToolCallSeen(toolCallId: string): boolean {
|
||||
return seenToolCalls.has(toolCallId)
|
||||
}
|
||||
|
||||
export function markToolResultSeen(toolCallId: string): void {
|
||||
addToSet(seenToolResults, toolCallId)
|
||||
}
|
||||
|
||||
export function wasToolResultSeen(toolCallId: string): boolean {
|
||||
return seenToolResults.has(toolCallId)
|
||||
}
|
||||
|
||||
export function shouldSkipToolCallEvent(event: SSEEvent): boolean {
|
||||
if (event.type !== 'tool_call') return false
|
||||
const toolCallId = getToolCallIdFromEvent(event)
|
||||
if (!toolCallId) return false
|
||||
const eventData = getEventData(event)
|
||||
if (eventData?.partial === true) return false
|
||||
if (wasToolResultSeen(toolCallId) || wasToolCallSeen(toolCallId)) {
|
||||
return true
|
||||
}
|
||||
markToolCallSeen(toolCallId)
|
||||
return false
|
||||
}
|
||||
|
||||
export function shouldSkipToolResultEvent(event: SSEEvent): boolean {
|
||||
if (event.type !== 'tool_result') return false
|
||||
const toolCallId = getToolCallIdFromEvent(event)
|
||||
if (!toolCallId) return false
|
||||
if (wasToolResultSeen(toolCallId)) return true
|
||||
markToolResultSeen(toolCallId)
|
||||
return false
|
||||
}
|
||||
119
apps/sim/lib/copilot/orchestrator/stream-buffer.test.ts
Normal file
119
apps/sim/lib/copilot/orchestrator/stream-buffer.test.ts
Normal file
@@ -0,0 +1,119 @@
|
||||
/**
|
||||
* @vitest-environment node
|
||||
*/
|
||||
|
||||
import { loggerMock } from '@sim/testing'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
vi.mock('@sim/logger', () => loggerMock)
|
||||
|
||||
type StoredEntry = { score: number; value: string }
|
||||
|
||||
const createRedisStub = () => {
|
||||
const events = new Map<string, StoredEntry[]>()
|
||||
const counters = new Map<string, number>()
|
||||
|
||||
const readEntries = (key: string, min: number, max: number) => {
|
||||
const list = events.get(key) || []
|
||||
return list
|
||||
.filter((entry) => entry.score >= min && entry.score <= max)
|
||||
.sort((a, b) => a.score - b.score)
|
||||
.map((entry) => entry.value)
|
||||
}
|
||||
|
||||
return {
|
||||
del: vi.fn().mockResolvedValue(1),
|
||||
hset: vi.fn().mockResolvedValue(1),
|
||||
hgetall: vi.fn().mockResolvedValue({}),
|
||||
expire: vi.fn().mockResolvedValue(1),
|
||||
eval: vi
|
||||
.fn()
|
||||
.mockImplementation(
|
||||
(
|
||||
_lua: string,
|
||||
_keysCount: number,
|
||||
seqKey: string,
|
||||
eventsKey: string,
|
||||
_ttl: number,
|
||||
_limit: number,
|
||||
streamId: string,
|
||||
eventJson: string
|
||||
) => {
|
||||
const current = counters.get(seqKey) || 0
|
||||
const next = current + 1
|
||||
counters.set(seqKey, next)
|
||||
const entry = JSON.stringify({ eventId: next, streamId, event: JSON.parse(eventJson) })
|
||||
const list = events.get(eventsKey) || []
|
||||
list.push({ score: next, value: entry })
|
||||
events.set(eventsKey, list)
|
||||
return next
|
||||
}
|
||||
),
|
||||
incrby: vi.fn().mockImplementation((key: string, amount: number) => {
|
||||
const current = counters.get(key) || 0
|
||||
const next = current + amount
|
||||
counters.set(key, next)
|
||||
return next
|
||||
}),
|
||||
zrangebyscore: vi.fn().mockImplementation((key: string, min: string, max: string) => {
|
||||
const minVal = Number(min)
|
||||
const maxVal = max === '+inf' ? Number.POSITIVE_INFINITY : Number(max)
|
||||
return Promise.resolve(readEntries(key, minVal, maxVal))
|
||||
}),
|
||||
pipeline: vi.fn().mockImplementation(() => {
|
||||
const api: Record<string, any> = {}
|
||||
api.zadd = vi.fn().mockImplementation((key: string, ...args: Array<string | number>) => {
|
||||
const list = events.get(key) || []
|
||||
for (let i = 0; i < args.length; i += 2) {
|
||||
list.push({ score: Number(args[i]), value: String(args[i + 1]) })
|
||||
}
|
||||
events.set(key, list)
|
||||
return api
|
||||
})
|
||||
api.expire = vi.fn().mockReturnValue(api)
|
||||
api.zremrangebyrank = vi.fn().mockReturnValue(api)
|
||||
api.exec = vi.fn().mockResolvedValue([])
|
||||
return api
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
let mockRedis: ReturnType<typeof createRedisStub>
|
||||
|
||||
vi.mock('@/lib/core/config/redis', () => ({
|
||||
getRedisClient: () => mockRedis,
|
||||
}))
|
||||
|
||||
import {
|
||||
appendStreamEvent,
|
||||
createStreamEventWriter,
|
||||
readStreamEvents,
|
||||
} from '@/lib/copilot/orchestrator/stream-buffer'
|
||||
|
||||
describe('stream-buffer', () => {
|
||||
beforeEach(() => {
|
||||
mockRedis = createRedisStub()
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
it.concurrent('replays events after a given event id', async () => {
|
||||
await appendStreamEvent('stream-1', { type: 'content', data: 'hello' })
|
||||
await appendStreamEvent('stream-1', { type: 'content', data: 'world' })
|
||||
|
||||
const allEvents = await readStreamEvents('stream-1', 0)
|
||||
expect(allEvents.map((entry) => entry.event.data)).toEqual(['hello', 'world'])
|
||||
|
||||
const replayed = await readStreamEvents('stream-1', 1)
|
||||
expect(replayed.map((entry) => entry.event.data)).toEqual(['world'])
|
||||
})
|
||||
|
||||
it.concurrent('flushes buffered events for resume', async () => {
|
||||
const writer = createStreamEventWriter('stream-2')
|
||||
await writer.write({ type: 'content', data: 'a' })
|
||||
await writer.write({ type: 'content', data: 'b' })
|
||||
await writer.flush()
|
||||
|
||||
const events = await readStreamEvents('stream-2', 0)
|
||||
expect(events.map((entry) => entry.event.data)).toEqual(['a', 'b'])
|
||||
})
|
||||
})
|
||||
309
apps/sim/lib/copilot/orchestrator/stream-buffer.ts
Normal file
309
apps/sim/lib/copilot/orchestrator/stream-buffer.ts
Normal file
@@ -0,0 +1,309 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { REDIS_COPILOT_STREAM_PREFIX } from '@/lib/copilot/constants'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import { getRedisClient } from '@/lib/core/config/redis'
|
||||
|
||||
const logger = createLogger('CopilotStreamBuffer')
|
||||
|
||||
const STREAM_DEFAULTS = {
|
||||
ttlSeconds: 60 * 60,
|
||||
eventLimit: 5000,
|
||||
reserveBatch: 200,
|
||||
flushIntervalMs: 15,
|
||||
flushMaxBatch: 200,
|
||||
}
|
||||
|
||||
export type StreamBufferConfig = {
|
||||
ttlSeconds: number
|
||||
eventLimit: number
|
||||
reserveBatch: number
|
||||
flushIntervalMs: number
|
||||
flushMaxBatch: number
|
||||
}
|
||||
|
||||
const parseNumber = (value: number | string | undefined, fallback: number): number => {
|
||||
if (typeof value === 'number' && Number.isFinite(value)) return value
|
||||
const parsed = Number(value)
|
||||
return Number.isFinite(parsed) ? parsed : fallback
|
||||
}
|
||||
|
||||
export function getStreamBufferConfig(): StreamBufferConfig {
|
||||
return {
|
||||
ttlSeconds: parseNumber(env.COPILOT_STREAM_TTL_SECONDS, STREAM_DEFAULTS.ttlSeconds),
|
||||
eventLimit: parseNumber(env.COPILOT_STREAM_EVENT_LIMIT, STREAM_DEFAULTS.eventLimit),
|
||||
reserveBatch: parseNumber(env.COPILOT_STREAM_RESERVE_BATCH, STREAM_DEFAULTS.reserveBatch),
|
||||
flushIntervalMs: parseNumber(
|
||||
env.COPILOT_STREAM_FLUSH_INTERVAL_MS,
|
||||
STREAM_DEFAULTS.flushIntervalMs
|
||||
),
|
||||
flushMaxBatch: parseNumber(env.COPILOT_STREAM_FLUSH_MAX_BATCH, STREAM_DEFAULTS.flushMaxBatch),
|
||||
}
|
||||
}
|
||||
|
||||
const APPEND_STREAM_EVENT_LUA = `
|
||||
local seqKey = KEYS[1]
|
||||
local eventsKey = KEYS[2]
|
||||
local ttl = tonumber(ARGV[1])
|
||||
local limit = tonumber(ARGV[2])
|
||||
local streamId = ARGV[3]
|
||||
local eventJson = ARGV[4]
|
||||
|
||||
local id = redis.call('INCR', seqKey)
|
||||
local entry = '{"eventId":' .. id .. ',"streamId":' .. cjson.encode(streamId) .. ',"event":' .. eventJson .. '}'
|
||||
redis.call('ZADD', eventsKey, id, entry)
|
||||
redis.call('EXPIRE', eventsKey, ttl)
|
||||
redis.call('EXPIRE', seqKey, ttl)
|
||||
if limit > 0 then
|
||||
redis.call('ZREMRANGEBYRANK', eventsKey, 0, -limit-1)
|
||||
end
|
||||
return id
|
||||
`
|
||||
|
||||
function getStreamKeyPrefix(streamId: string) {
|
||||
return `${REDIS_COPILOT_STREAM_PREFIX}${streamId}`
|
||||
}
|
||||
|
||||
function getEventsKey(streamId: string) {
|
||||
return `${getStreamKeyPrefix(streamId)}:events`
|
||||
}
|
||||
|
||||
function getSeqKey(streamId: string) {
|
||||
return `${getStreamKeyPrefix(streamId)}:seq`
|
||||
}
|
||||
|
||||
function getMetaKey(streamId: string) {
|
||||
return `${getStreamKeyPrefix(streamId)}:meta`
|
||||
}
|
||||
|
||||
export type StreamStatus = 'active' | 'complete' | 'error'
|
||||
|
||||
export type StreamMeta = {
|
||||
status: StreamStatus
|
||||
userId?: string
|
||||
updatedAt?: string
|
||||
error?: string
|
||||
}
|
||||
|
||||
export type StreamEventEntry = {
|
||||
eventId: number
|
||||
streamId: string
|
||||
event: Record<string, unknown>
|
||||
}
|
||||
|
||||
export type StreamEventWriter = {
|
||||
write: (event: Record<string, unknown>) => Promise<StreamEventEntry>
|
||||
flush: () => Promise<void>
|
||||
close: () => Promise<void>
|
||||
}
|
||||
|
||||
export async function resetStreamBuffer(streamId: string): Promise<void> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) return
|
||||
try {
|
||||
await redis.del(getEventsKey(streamId), getSeqKey(streamId), getMetaKey(streamId))
|
||||
} catch (error) {
|
||||
logger.warn('Failed to reset stream buffer', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export async function setStreamMeta(streamId: string, meta: StreamMeta): Promise<void> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) return
|
||||
try {
|
||||
const config = getStreamBufferConfig()
|
||||
const payload: Record<string, string> = {
|
||||
status: meta.status,
|
||||
updatedAt: meta.updatedAt || new Date().toISOString(),
|
||||
}
|
||||
if (meta.userId) payload.userId = meta.userId
|
||||
if (meta.error) payload.error = meta.error
|
||||
await redis.hset(getMetaKey(streamId), payload)
|
||||
await redis.expire(getMetaKey(streamId), config.ttlSeconds)
|
||||
} catch (error) {
|
||||
logger.warn('Failed to update stream meta', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export async function getStreamMeta(streamId: string): Promise<StreamMeta | null> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) return null
|
||||
try {
|
||||
const meta = await redis.hgetall(getMetaKey(streamId))
|
||||
if (!meta || Object.keys(meta).length === 0) return null
|
||||
return meta as StreamMeta
|
||||
} catch (error) {
|
||||
logger.warn('Failed to read stream meta', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
export async function appendStreamEvent(
|
||||
streamId: string,
|
||||
event: Record<string, unknown>
|
||||
): Promise<StreamEventEntry> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) {
|
||||
return { eventId: 0, streamId, event }
|
||||
}
|
||||
|
||||
try {
|
||||
const config = getStreamBufferConfig()
|
||||
const eventJson = JSON.stringify(event)
|
||||
const nextId = await redis.eval(
|
||||
APPEND_STREAM_EVENT_LUA,
|
||||
2,
|
||||
getSeqKey(streamId),
|
||||
getEventsKey(streamId),
|
||||
config.ttlSeconds,
|
||||
config.eventLimit,
|
||||
streamId,
|
||||
eventJson
|
||||
)
|
||||
const eventId = typeof nextId === 'number' ? nextId : Number(nextId)
|
||||
return { eventId, streamId, event }
|
||||
} catch (error) {
|
||||
logger.warn('Failed to append stream event', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return { eventId: 0, streamId, event }
|
||||
}
|
||||
}
|
||||
|
||||
export function createStreamEventWriter(streamId: string): StreamEventWriter {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) {
|
||||
return {
|
||||
write: async (event) => ({ eventId: 0, streamId, event }),
|
||||
flush: async () => {},
|
||||
close: async () => {},
|
||||
}
|
||||
}
|
||||
|
||||
const config = getStreamBufferConfig()
|
||||
let pending: StreamEventEntry[] = []
|
||||
let nextEventId = 0
|
||||
let maxReservedId = 0
|
||||
let flushTimer: ReturnType<typeof setTimeout> | null = null
|
||||
const scheduleFlush = () => {
|
||||
if (flushTimer) return
|
||||
flushTimer = setTimeout(() => {
|
||||
flushTimer = null
|
||||
void flush()
|
||||
}, config.flushIntervalMs)
|
||||
}
|
||||
|
||||
const reserveIds = async (minCount: number) => {
|
||||
const reserveCount = Math.max(config.reserveBatch, minCount)
|
||||
const newMax = await redis.incrby(getSeqKey(streamId), reserveCount)
|
||||
const startId = newMax - reserveCount + 1
|
||||
if (nextEventId === 0 || nextEventId > maxReservedId) {
|
||||
nextEventId = startId
|
||||
maxReservedId = newMax
|
||||
}
|
||||
}
|
||||
|
||||
let flushPromise: Promise<void> | null = null
|
||||
let closed = false
|
||||
|
||||
const doFlush = async () => {
|
||||
if (pending.length === 0) return
|
||||
const batch = pending
|
||||
pending = []
|
||||
try {
|
||||
const key = getEventsKey(streamId)
|
||||
const zaddArgs: (string | number)[] = []
|
||||
for (const entry of batch) {
|
||||
zaddArgs.push(entry.eventId, JSON.stringify(entry))
|
||||
}
|
||||
const pipeline = redis.pipeline()
|
||||
pipeline.zadd(key, ...(zaddArgs as [number, string]))
|
||||
pipeline.expire(key, config.ttlSeconds)
|
||||
pipeline.expire(getSeqKey(streamId), config.ttlSeconds)
|
||||
pipeline.zremrangebyrank(key, 0, -config.eventLimit - 1)
|
||||
await pipeline.exec()
|
||||
} catch (error) {
|
||||
logger.warn('Failed to flush stream events', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
pending = batch.concat(pending)
|
||||
}
|
||||
}
|
||||
|
||||
const flush = async () => {
|
||||
if (flushPromise) {
|
||||
await flushPromise
|
||||
return
|
||||
}
|
||||
flushPromise = doFlush()
|
||||
try {
|
||||
await flushPromise
|
||||
} finally {
|
||||
flushPromise = null
|
||||
if (pending.length > 0) scheduleFlush()
|
||||
}
|
||||
}
|
||||
|
||||
const write = async (event: Record<string, unknown>) => {
|
||||
if (closed) return { eventId: 0, streamId, event }
|
||||
if (nextEventId === 0 || nextEventId > maxReservedId) {
|
||||
await reserveIds(1)
|
||||
}
|
||||
const eventId = nextEventId++
|
||||
const entry: StreamEventEntry = { eventId, streamId, event }
|
||||
pending.push(entry)
|
||||
if (pending.length >= config.flushMaxBatch) {
|
||||
await flush()
|
||||
} else {
|
||||
scheduleFlush()
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
const close = async () => {
|
||||
closed = true
|
||||
if (flushTimer) {
|
||||
clearTimeout(flushTimer)
|
||||
flushTimer = null
|
||||
}
|
||||
await flush()
|
||||
}
|
||||
|
||||
return { write, flush, close }
|
||||
}
|
||||
|
||||
export async function readStreamEvents(
|
||||
streamId: string,
|
||||
afterEventId: number
|
||||
): Promise<StreamEventEntry[]> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) return []
|
||||
try {
|
||||
const raw = await redis.zrangebyscore(getEventsKey(streamId), afterEventId + 1, '+inf')
|
||||
return raw
|
||||
.map((entry) => {
|
||||
try {
|
||||
return JSON.parse(entry) as StreamEventEntry
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
})
|
||||
.filter((entry): entry is StreamEventEntry => Boolean(entry))
|
||||
} catch (error) {
|
||||
logger.warn('Failed to read stream events', {
|
||||
streamId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return []
|
||||
}
|
||||
}
|
||||
182
apps/sim/lib/copilot/orchestrator/stream-core.ts
Normal file
182
apps/sim/lib/copilot/orchestrator/stream-core.ts
Normal file
@@ -0,0 +1,182 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { ORCHESTRATION_TIMEOUT_MS } from '@/lib/copilot/constants'
|
||||
import {
|
||||
handleSubagentRouting,
|
||||
sseHandlers,
|
||||
subAgentHandlers,
|
||||
} from '@/lib/copilot/orchestrator/sse-handlers'
|
||||
import { parseSSEStream } from '@/lib/copilot/orchestrator/sse-parser'
|
||||
import {
|
||||
normalizeSseEvent,
|
||||
shouldSkipToolCallEvent,
|
||||
shouldSkipToolResultEvent,
|
||||
} from '@/lib/copilot/orchestrator/sse-utils'
|
||||
import type {
|
||||
ExecutionContext,
|
||||
OrchestratorOptions,
|
||||
SSEEvent,
|
||||
StreamingContext,
|
||||
ToolCallSummary,
|
||||
} from '@/lib/copilot/orchestrator/types'
|
||||
|
||||
const logger = createLogger('CopilotStreamCore')
|
||||
|
||||
/**
|
||||
* Options for the shared stream processing loop.
|
||||
*/
|
||||
export interface StreamLoopOptions extends OrchestratorOptions {
|
||||
/**
|
||||
* Called for each normalized event BEFORE standard handler dispatch.
|
||||
* Return true to skip the default handler for this event.
|
||||
*/
|
||||
onBeforeDispatch?: (event: SSEEvent, context: StreamingContext) => boolean | undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a fresh StreamingContext.
|
||||
*/
|
||||
export function createStreamingContext(overrides?: Partial<StreamingContext>): StreamingContext {
|
||||
return {
|
||||
chatId: undefined,
|
||||
conversationId: undefined,
|
||||
messageId: crypto.randomUUID(),
|
||||
accumulatedContent: '',
|
||||
contentBlocks: [],
|
||||
toolCalls: new Map(),
|
||||
currentThinkingBlock: null,
|
||||
isInThinkingBlock: false,
|
||||
subAgentParentToolCallId: undefined,
|
||||
subAgentContent: {},
|
||||
subAgentToolCalls: {},
|
||||
pendingContent: '',
|
||||
streamComplete: false,
|
||||
wasAborted: false,
|
||||
errors: [],
|
||||
...overrides,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Run the SSE stream processing loop.
|
||||
*
|
||||
* Handles: fetch -> parse -> normalize -> dedupe -> subagent routing -> handler dispatch.
|
||||
* Callers provide the fetch URL/options and can intercept events via onBeforeDispatch.
|
||||
*/
|
||||
export async function runStreamLoop(
|
||||
fetchUrl: string,
|
||||
fetchOptions: RequestInit,
|
||||
context: StreamingContext,
|
||||
execContext: ExecutionContext,
|
||||
options: StreamLoopOptions
|
||||
): Promise<void> {
|
||||
const { timeout = ORCHESTRATION_TIMEOUT_MS, abortSignal } = options
|
||||
|
||||
const response = await fetch(fetchUrl, {
|
||||
...fetchOptions,
|
||||
signal: abortSignal,
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text().catch(() => '')
|
||||
throw new Error(
|
||||
`Copilot backend error (${response.status}): ${errorText || response.statusText}`
|
||||
)
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
throw new Error('Copilot backend response missing body')
|
||||
}
|
||||
|
||||
const reader = response.body.getReader()
|
||||
const decoder = new TextDecoder()
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
context.errors.push('Request timed out')
|
||||
context.streamComplete = true
|
||||
reader.cancel().catch(() => {})
|
||||
}, timeout)
|
||||
|
||||
try {
|
||||
for await (const event of parseSSEStream(reader, decoder, abortSignal)) {
|
||||
if (abortSignal?.aborted) {
|
||||
context.wasAborted = true
|
||||
break
|
||||
}
|
||||
|
||||
const normalizedEvent = normalizeSseEvent(event)
|
||||
|
||||
// Skip duplicate tool events.
|
||||
const shouldSkipToolCall = shouldSkipToolCallEvent(normalizedEvent)
|
||||
const shouldSkipToolResult = shouldSkipToolResultEvent(normalizedEvent)
|
||||
|
||||
if (!shouldSkipToolCall && !shouldSkipToolResult) {
|
||||
try {
|
||||
await options.onEvent?.(normalizedEvent)
|
||||
} catch (error) {
|
||||
logger.warn('Failed to forward SSE event', {
|
||||
type: normalizedEvent.type,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Let the caller intercept before standard dispatch.
|
||||
if (options.onBeforeDispatch?.(normalizedEvent, context)) {
|
||||
if (context.streamComplete) break
|
||||
continue
|
||||
}
|
||||
|
||||
// Standard subagent start/end handling.
|
||||
if (normalizedEvent.type === 'subagent_start') {
|
||||
const eventData = normalizedEvent.data as Record<string, unknown> | undefined
|
||||
const toolCallId = eventData?.tool_call_id as string | undefined
|
||||
if (toolCallId) {
|
||||
context.subAgentParentToolCallId = toolCallId
|
||||
context.subAgentContent[toolCallId] = ''
|
||||
context.subAgentToolCalls[toolCallId] = []
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if (normalizedEvent.type === 'subagent_end') {
|
||||
context.subAgentParentToolCallId = undefined
|
||||
continue
|
||||
}
|
||||
|
||||
// Subagent event routing.
|
||||
if (handleSubagentRouting(normalizedEvent, context)) {
|
||||
const handler = subAgentHandlers[normalizedEvent.type]
|
||||
if (handler) {
|
||||
await handler(normalizedEvent, context, execContext, options)
|
||||
}
|
||||
if (context.streamComplete) break
|
||||
continue
|
||||
}
|
||||
|
||||
// Main event handler dispatch.
|
||||
const handler = sseHandlers[normalizedEvent.type]
|
||||
if (handler) {
|
||||
await handler(normalizedEvent, context, execContext, options)
|
||||
}
|
||||
if (context.streamComplete) break
|
||||
}
|
||||
} finally {
|
||||
clearTimeout(timeoutId)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a ToolCallSummary array from the streaming context.
|
||||
*/
|
||||
export function buildToolCallSummaries(context: StreamingContext): ToolCallSummary[] {
|
||||
return Array.from(context.toolCalls.values()).map((toolCall) => ({
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
status: toolCall.status,
|
||||
params: toolCall.params,
|
||||
result: toolCall.result?.output,
|
||||
error: toolCall.error,
|
||||
durationMs:
|
||||
toolCall.endTime && toolCall.startTime ? toolCall.endTime - toolCall.startTime : undefined,
|
||||
}))
|
||||
}
|
||||
137
apps/sim/lib/copilot/orchestrator/subagent.ts
Normal file
137
apps/sim/lib/copilot/orchestrator/subagent.ts
Normal file
@@ -0,0 +1,137 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
|
||||
import { prepareExecutionContext } from '@/lib/copilot/orchestrator/tool-executor'
|
||||
import type {
|
||||
ExecutionContext,
|
||||
OrchestratorOptions,
|
||||
SSEEvent,
|
||||
StreamingContext,
|
||||
ToolCallSummary,
|
||||
} from '@/lib/copilot/orchestrator/types'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
|
||||
import { buildToolCallSummaries, createStreamingContext, runStreamLoop } from './stream-core'
|
||||
|
||||
const logger = createLogger('CopilotSubagentOrchestrator')
|
||||
|
||||
export interface SubagentOrchestratorOptions extends Omit<OrchestratorOptions, 'onComplete'> {
|
||||
userId: string
|
||||
workflowId?: string
|
||||
workspaceId?: string
|
||||
onComplete?: (result: SubagentOrchestratorResult) => void | Promise<void>
|
||||
}
|
||||
|
||||
export interface SubagentOrchestratorResult {
|
||||
success: boolean
|
||||
content: string
|
||||
toolCalls: ToolCallSummary[]
|
||||
structuredResult?: {
|
||||
type?: string
|
||||
summary?: string
|
||||
data?: unknown
|
||||
success?: boolean
|
||||
}
|
||||
error?: string
|
||||
errors?: string[]
|
||||
}
|
||||
|
||||
export async function orchestrateSubagentStream(
|
||||
agentId: string,
|
||||
requestPayload: Record<string, unknown>,
|
||||
options: SubagentOrchestratorOptions
|
||||
): Promise<SubagentOrchestratorResult> {
|
||||
const { userId, workflowId, workspaceId } = options
|
||||
const execContext = await buildExecutionContext(userId, workflowId, workspaceId)
|
||||
|
||||
const msgId = requestPayload?.messageId
|
||||
const context = createStreamingContext({
|
||||
messageId: typeof msgId === 'string' ? msgId : crypto.randomUUID(),
|
||||
})
|
||||
|
||||
let structuredResult: SubagentOrchestratorResult['structuredResult']
|
||||
|
||||
try {
|
||||
await runStreamLoop(
|
||||
`${SIM_AGENT_API_URL}/api/subagent/${agentId}`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}),
|
||||
},
|
||||
body: JSON.stringify({ ...requestPayload, userId, stream: true }),
|
||||
},
|
||||
context,
|
||||
execContext,
|
||||
{
|
||||
...options,
|
||||
onBeforeDispatch: (event: SSEEvent, ctx: StreamingContext) => {
|
||||
// Handle structured_result / subagent_result - subagent-specific.
|
||||
if (event.type === 'structured_result' || event.type === 'subagent_result') {
|
||||
structuredResult = normalizeStructuredResult(event.data)
|
||||
ctx.streamComplete = true
|
||||
return true // skip default dispatch
|
||||
}
|
||||
|
||||
// For direct subagent calls, events may have the subagent field set
|
||||
// but no subagent_start because this IS the top-level agent.
|
||||
// Skip subagent routing for events where the subagent field matches
|
||||
// the current agentId - these are top-level events.
|
||||
if (event.subagent === agentId && !ctx.subAgentParentToolCallId) {
|
||||
return false // let default dispatch handle it
|
||||
}
|
||||
|
||||
return false // let default dispatch handle it
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const result: SubagentOrchestratorResult = {
|
||||
success: context.errors.length === 0 && !context.wasAborted,
|
||||
content: context.accumulatedContent,
|
||||
toolCalls: buildToolCallSummaries(context),
|
||||
structuredResult,
|
||||
errors: context.errors.length ? context.errors : undefined,
|
||||
}
|
||||
await options.onComplete?.(result)
|
||||
return result
|
||||
} catch (error) {
|
||||
const err = error instanceof Error ? error : new Error('Subagent orchestration failed')
|
||||
logger.error('Subagent orchestration failed', { error: err.message, agentId })
|
||||
await options.onError?.(err)
|
||||
return {
|
||||
success: false,
|
||||
content: context.accumulatedContent,
|
||||
toolCalls: [],
|
||||
error: err.message,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function normalizeStructuredResult(data: unknown): SubagentOrchestratorResult['structuredResult'] {
|
||||
if (!data || typeof data !== 'object') return undefined
|
||||
const d = data as Record<string, unknown>
|
||||
return {
|
||||
type: (d.result_type || d.type) as string | undefined,
|
||||
summary: d.summary as string | undefined,
|
||||
data: d.data ?? d,
|
||||
success: d.success as boolean | undefined,
|
||||
}
|
||||
}
|
||||
|
||||
async function buildExecutionContext(
|
||||
userId: string,
|
||||
workflowId?: string,
|
||||
workspaceId?: string
|
||||
): Promise<ExecutionContext> {
|
||||
if (workflowId) {
|
||||
return prepareExecutionContext(userId, workflowId)
|
||||
}
|
||||
const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId)
|
||||
return {
|
||||
userId,
|
||||
workflowId: workflowId || '',
|
||||
workspaceId,
|
||||
decryptedEnvVars,
|
||||
}
|
||||
}
|
||||
129
apps/sim/lib/copilot/orchestrator/tool-executor/access.ts
Normal file
129
apps/sim/lib/copilot/orchestrator/tool-executor/access.ts
Normal file
@@ -0,0 +1,129 @@
|
||||
import { db } from '@sim/db'
|
||||
import { permissions, workflow, workspace } from '@sim/db/schema'
|
||||
import { and, asc, desc, eq, inArray, or } from 'drizzle-orm'
|
||||
|
||||
type WorkflowRecord = typeof workflow.$inferSelect
|
||||
|
||||
export async function ensureWorkflowAccess(
|
||||
workflowId: string,
|
||||
userId: string
|
||||
): Promise<{
|
||||
workflow: WorkflowRecord
|
||||
workspaceId?: string | null
|
||||
}> {
|
||||
const [workflowRecord] = await db
|
||||
.select()
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, workflowId))
|
||||
.limit(1)
|
||||
if (!workflowRecord) {
|
||||
throw new Error(`Workflow ${workflowId} not found`)
|
||||
}
|
||||
|
||||
if (workflowRecord.userId === userId) {
|
||||
return { workflow: workflowRecord, workspaceId: workflowRecord.workspaceId }
|
||||
}
|
||||
|
||||
if (workflowRecord.workspaceId) {
|
||||
const [permissionRow] = await db
|
||||
.select({ permissionType: permissions.permissionType })
|
||||
.from(permissions)
|
||||
.where(
|
||||
and(
|
||||
eq(permissions.entityType, 'workspace'),
|
||||
eq(permissions.entityId, workflowRecord.workspaceId),
|
||||
eq(permissions.userId, userId)
|
||||
)
|
||||
)
|
||||
.limit(1)
|
||||
if (permissionRow) {
|
||||
return { workflow: workflowRecord, workspaceId: workflowRecord.workspaceId }
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('Unauthorized workflow access')
|
||||
}
|
||||
|
||||
export async function getDefaultWorkspaceId(userId: string): Promise<string> {
|
||||
const workspaces = await db
|
||||
.select({ workspaceId: workspace.id })
|
||||
.from(permissions)
|
||||
.innerJoin(workspace, eq(permissions.entityId, workspace.id))
|
||||
.where(and(eq(permissions.userId, userId), eq(permissions.entityType, 'workspace')))
|
||||
.orderBy(desc(workspace.createdAt))
|
||||
.limit(1)
|
||||
|
||||
const workspaceId = workspaces[0]?.workspaceId
|
||||
if (!workspaceId) {
|
||||
throw new Error('No workspace found for user')
|
||||
}
|
||||
|
||||
return workspaceId
|
||||
}
|
||||
|
||||
export async function ensureWorkspaceAccess(
|
||||
workspaceId: string,
|
||||
userId: string,
|
||||
requireWrite: boolean
|
||||
): Promise<void> {
|
||||
const [row] = await db
|
||||
.select({
|
||||
permissionType: permissions.permissionType,
|
||||
ownerId: workspace.ownerId,
|
||||
})
|
||||
.from(permissions)
|
||||
.innerJoin(workspace, eq(permissions.entityId, workspace.id))
|
||||
.where(
|
||||
and(
|
||||
eq(permissions.entityType, 'workspace'),
|
||||
eq(permissions.entityId, workspaceId),
|
||||
eq(permissions.userId, userId)
|
||||
)
|
||||
)
|
||||
.limit(1)
|
||||
|
||||
if (!row) {
|
||||
throw new Error(`Workspace ${workspaceId} not found`)
|
||||
}
|
||||
|
||||
const isOwner = row.ownerId === userId
|
||||
const permissionType = row.permissionType
|
||||
const canWrite = isOwner || permissionType === 'admin' || permissionType === 'write'
|
||||
|
||||
if (requireWrite && !canWrite) {
|
||||
throw new Error('Write or admin access required for this workspace')
|
||||
}
|
||||
|
||||
if (!requireWrite && !canWrite && permissionType !== 'read') {
|
||||
throw new Error('Access denied to workspace')
|
||||
}
|
||||
}
|
||||
|
||||
export async function getAccessibleWorkflowsForUser(
|
||||
userId: string,
|
||||
options?: { workspaceId?: string; folderId?: string }
|
||||
) {
|
||||
const workspaceIds = await db
|
||||
.select({ entityId: permissions.entityId })
|
||||
.from(permissions)
|
||||
.where(and(eq(permissions.userId, userId), eq(permissions.entityType, 'workspace')))
|
||||
|
||||
const workspaceIdList = workspaceIds.map((row) => row.entityId)
|
||||
|
||||
const workflowConditions = [eq(workflow.userId, userId)]
|
||||
if (workspaceIdList.length > 0) {
|
||||
workflowConditions.push(inArray(workflow.workspaceId, workspaceIdList))
|
||||
}
|
||||
if (options?.workspaceId) {
|
||||
workflowConditions.push(eq(workflow.workspaceId, options.workspaceId))
|
||||
}
|
||||
if (options?.folderId) {
|
||||
workflowConditions.push(eq(workflow.folderId, options.folderId))
|
||||
}
|
||||
|
||||
return db
|
||||
.select()
|
||||
.from(workflow)
|
||||
.where(or(...workflowConditions))
|
||||
.orderBy(asc(workflow.sortOrder), asc(workflow.createdAt), asc(workflow.id))
|
||||
}
|
||||
@@ -0,0 +1,288 @@
|
||||
import crypto from 'crypto'
|
||||
import { db } from '@sim/db'
|
||||
import { chat, workflowMcpTool } from '@sim/db/schema'
|
||||
import { and, eq } from 'drizzle-orm'
|
||||
import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types'
|
||||
import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema'
|
||||
import { deployWorkflow, undeployWorkflow } from '@/lib/workflows/persistence/utils'
|
||||
import { checkChatAccess, checkWorkflowAccessForChatCreation } from '@/app/api/chat/utils'
|
||||
import { ensureWorkflowAccess } from '../access'
|
||||
import type { DeployApiParams, DeployChatParams, DeployMcpParams } from '../param-types'
|
||||
|
||||
export async function executeDeployApi(
|
||||
params: DeployApiParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
const action = params.action === 'undeploy' ? 'undeploy' : 'deploy'
|
||||
const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId)
|
||||
|
||||
if (action === 'undeploy') {
|
||||
const result = await undeployWorkflow({ workflowId })
|
||||
if (!result.success) {
|
||||
return { success: false, error: result.error || 'Failed to undeploy workflow' }
|
||||
}
|
||||
return { success: true, output: { workflowId, isDeployed: false } }
|
||||
}
|
||||
|
||||
const result = await deployWorkflow({
|
||||
workflowId,
|
||||
deployedBy: context.userId,
|
||||
workflowName: workflowRecord.name || undefined,
|
||||
})
|
||||
if (!result.success) {
|
||||
return { success: false, error: result.error || 'Failed to deploy workflow' }
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
workflowId,
|
||||
isDeployed: true,
|
||||
deployedAt: result.deployedAt,
|
||||
version: result.version,
|
||||
},
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeDeployChat(
|
||||
params: DeployChatParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
|
||||
const action = params.action === 'undeploy' ? 'undeploy' : 'deploy'
|
||||
if (action === 'undeploy') {
|
||||
const existing = await db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1)
|
||||
if (!existing.length) {
|
||||
return { success: false, error: 'No active chat deployment found for this workflow' }
|
||||
}
|
||||
const { hasAccess } = await checkChatAccess(existing[0].id, context.userId)
|
||||
if (!hasAccess) {
|
||||
return { success: false, error: 'Unauthorized chat access' }
|
||||
}
|
||||
await db.delete(chat).where(eq(chat.id, existing[0].id))
|
||||
return { success: true, output: { success: true, action: 'undeploy', isDeployed: false } }
|
||||
}
|
||||
|
||||
const { hasAccess } = await checkWorkflowAccessForChatCreation(workflowId, context.userId)
|
||||
if (!hasAccess) {
|
||||
return { success: false, error: 'Workflow not found or access denied' }
|
||||
}
|
||||
|
||||
const existing = await db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1)
|
||||
const existingDeployment = existing[0] || null
|
||||
|
||||
const identifier = String(params.identifier || existingDeployment?.identifier || '').trim()
|
||||
const title = String(params.title || existingDeployment?.title || '').trim()
|
||||
if (!identifier || !title) {
|
||||
return { success: false, error: 'Chat identifier and title are required' }
|
||||
}
|
||||
|
||||
const identifierPattern = /^[a-z0-9-]+$/
|
||||
if (!identifierPattern.test(identifier)) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Identifier can only contain lowercase letters, numbers, and hyphens',
|
||||
}
|
||||
}
|
||||
|
||||
const existingIdentifier = await db
|
||||
.select()
|
||||
.from(chat)
|
||||
.where(eq(chat.identifier, identifier))
|
||||
.limit(1)
|
||||
if (existingIdentifier.length > 0 && existingIdentifier[0].id !== existingDeployment?.id) {
|
||||
return { success: false, error: 'Identifier already in use' }
|
||||
}
|
||||
|
||||
const deployResult = await deployWorkflow({
|
||||
workflowId,
|
||||
deployedBy: context.userId,
|
||||
})
|
||||
if (!deployResult.success) {
|
||||
return { success: false, error: deployResult.error || 'Failed to deploy workflow' }
|
||||
}
|
||||
|
||||
const existingCustomizations =
|
||||
(existingDeployment?.customizations as
|
||||
| { primaryColor?: string; welcomeMessage?: string }
|
||||
| undefined) || {}
|
||||
|
||||
const payload = {
|
||||
workflowId,
|
||||
identifier,
|
||||
title,
|
||||
description: String(params.description || existingDeployment?.description || ''),
|
||||
customizations: {
|
||||
primaryColor:
|
||||
params.customizations?.primaryColor ||
|
||||
existingCustomizations.primaryColor ||
|
||||
'var(--brand-primary-hover-hex)',
|
||||
welcomeMessage:
|
||||
params.customizations?.welcomeMessage ||
|
||||
existingCustomizations.welcomeMessage ||
|
||||
'Hi there! How can I help you today?',
|
||||
},
|
||||
authType: params.authType || existingDeployment?.authType || 'public',
|
||||
password: params.password,
|
||||
allowedEmails: params.allowedEmails || existingDeployment?.allowedEmails || [],
|
||||
outputConfigs: params.outputConfigs || existingDeployment?.outputConfigs || [],
|
||||
}
|
||||
|
||||
if (existingDeployment) {
|
||||
await db
|
||||
.update(chat)
|
||||
.set({
|
||||
identifier: payload.identifier,
|
||||
title: payload.title,
|
||||
description: payload.description,
|
||||
customizations: payload.customizations,
|
||||
authType: payload.authType,
|
||||
password: payload.password || existingDeployment.password,
|
||||
allowedEmails:
|
||||
payload.authType === 'email' || payload.authType === 'sso' ? payload.allowedEmails : [],
|
||||
outputConfigs: payload.outputConfigs,
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(eq(chat.id, existingDeployment.id))
|
||||
} else {
|
||||
await db.insert(chat).values({
|
||||
id: crypto.randomUUID(),
|
||||
workflowId,
|
||||
userId: context.userId,
|
||||
identifier: payload.identifier,
|
||||
title: payload.title,
|
||||
description: payload.description,
|
||||
customizations: payload.customizations,
|
||||
isActive: true,
|
||||
authType: payload.authType,
|
||||
password: payload.password || null,
|
||||
allowedEmails:
|
||||
payload.authType === 'email' || payload.authType === 'sso' ? payload.allowedEmails : [],
|
||||
outputConfigs: payload.outputConfigs,
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: { success: true, action: 'deploy', isDeployed: true, identifier },
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeDeployMcp(
|
||||
params: DeployMcpParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
|
||||
const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId)
|
||||
const workspaceId = workflowRecord.workspaceId
|
||||
if (!workspaceId) {
|
||||
return { success: false, error: 'workspaceId is required' }
|
||||
}
|
||||
|
||||
if (!workflowRecord.isDeployed) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Workflow must be deployed before adding as an MCP tool. Use deploy_api first.',
|
||||
}
|
||||
}
|
||||
|
||||
const serverId = params.serverId
|
||||
if (!serverId) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'serverId is required. Use list_workspace_mcp_servers to get available servers.',
|
||||
}
|
||||
}
|
||||
|
||||
const existingTool = await db
|
||||
.select()
|
||||
.from(workflowMcpTool)
|
||||
.where(
|
||||
and(eq(workflowMcpTool.serverId, serverId), eq(workflowMcpTool.workflowId, workflowId))
|
||||
)
|
||||
.limit(1)
|
||||
|
||||
const toolName = sanitizeToolName(
|
||||
params.toolName || workflowRecord.name || `workflow_${workflowId}`
|
||||
)
|
||||
const toolDescription =
|
||||
params.toolDescription ||
|
||||
workflowRecord.description ||
|
||||
`Execute ${workflowRecord.name} workflow`
|
||||
const parameterSchema = params.parameterSchema || {}
|
||||
|
||||
if (existingTool.length > 0) {
|
||||
const toolId = existingTool[0].id
|
||||
await db
|
||||
.update(workflowMcpTool)
|
||||
.set({
|
||||
toolName,
|
||||
toolDescription,
|
||||
parameterSchema,
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(eq(workflowMcpTool.id, toolId))
|
||||
return { success: true, output: { toolId, toolName, toolDescription, updated: true } }
|
||||
}
|
||||
|
||||
const toolId = crypto.randomUUID()
|
||||
await db.insert(workflowMcpTool).values({
|
||||
id: toolId,
|
||||
serverId,
|
||||
workflowId,
|
||||
toolName,
|
||||
toolDescription,
|
||||
parameterSchema,
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
|
||||
return { success: true, output: { toolId, toolName, toolDescription, updated: false } }
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeRedeploy(context: ExecutionContext): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
await ensureWorkflowAccess(workflowId, context.userId)
|
||||
|
||||
const result = await deployWorkflow({ workflowId, deployedBy: context.userId })
|
||||
if (!result.success) {
|
||||
return { success: false, error: result.error || 'Failed to redeploy workflow' }
|
||||
}
|
||||
return {
|
||||
success: true,
|
||||
output: { workflowId, deployedAt: result.deployedAt || null, version: result.version },
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,2 @@
|
||||
export * from './deploy'
|
||||
export * from './manage'
|
||||
@@ -0,0 +1,226 @@
|
||||
import crypto from 'crypto'
|
||||
import { db } from '@sim/db'
|
||||
import { chat, workflow, workflowMcpServer, workflowMcpTool } from '@sim/db/schema'
|
||||
import { eq, inArray } from 'drizzle-orm'
|
||||
import type { ExecutionContext, ToolCallResult } from '@/lib/copilot/orchestrator/types'
|
||||
import { sanitizeToolName } from '@/lib/mcp/workflow-tool-schema'
|
||||
import { hasValidStartBlock } from '@/lib/workflows/triggers/trigger-utils.server'
|
||||
import { ensureWorkflowAccess } from '../access'
|
||||
import type {
|
||||
CheckDeploymentStatusParams,
|
||||
CreateWorkspaceMcpServerParams,
|
||||
ListWorkspaceMcpServersParams,
|
||||
} from '../param-types'
|
||||
|
||||
export async function executeCheckDeploymentStatus(
|
||||
params: CheckDeploymentStatusParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId)
|
||||
const workspaceId = workflowRecord.workspaceId
|
||||
|
||||
const [apiDeploy, chatDeploy] = await Promise.all([
|
||||
db.select().from(workflow).where(eq(workflow.id, workflowId)).limit(1),
|
||||
db.select().from(chat).where(eq(chat.workflowId, workflowId)).limit(1),
|
||||
])
|
||||
|
||||
const isApiDeployed = apiDeploy[0]?.isDeployed || false
|
||||
const apiDetails = {
|
||||
isDeployed: isApiDeployed,
|
||||
deployedAt: apiDeploy[0]?.deployedAt || null,
|
||||
endpoint: isApiDeployed ? `/api/workflows/${workflowId}/execute` : null,
|
||||
apiKey: workflowRecord.workspaceId ? 'Workspace API keys' : 'Personal API keys',
|
||||
needsRedeployment: false,
|
||||
}
|
||||
|
||||
const isChatDeployed = !!chatDeploy[0]
|
||||
const chatCustomizations =
|
||||
(chatDeploy[0]?.customizations as
|
||||
| { welcomeMessage?: string; primaryColor?: string }
|
||||
| undefined) || {}
|
||||
const chatDetails = {
|
||||
isDeployed: isChatDeployed,
|
||||
chatId: chatDeploy[0]?.id || null,
|
||||
identifier: chatDeploy[0]?.identifier || null,
|
||||
chatUrl: isChatDeployed ? `/chat/${chatDeploy[0]?.identifier}` : null,
|
||||
title: chatDeploy[0]?.title || null,
|
||||
description: chatDeploy[0]?.description || null,
|
||||
authType: chatDeploy[0]?.authType || null,
|
||||
allowedEmails: chatDeploy[0]?.allowedEmails || null,
|
||||
outputConfigs: chatDeploy[0]?.outputConfigs || null,
|
||||
welcomeMessage: chatCustomizations.welcomeMessage || null,
|
||||
primaryColor: chatCustomizations.primaryColor || null,
|
||||
hasPassword: Boolean(chatDeploy[0]?.password),
|
||||
}
|
||||
|
||||
const mcpDetails: {
|
||||
isDeployed: boolean
|
||||
servers: Array<{
|
||||
serverId: string
|
||||
serverName: string
|
||||
toolName: string
|
||||
toolDescription: string | null
|
||||
parameterSchema: unknown
|
||||
toolId: string
|
||||
}>
|
||||
} = { isDeployed: false, servers: [] }
|
||||
if (workspaceId) {
|
||||
const servers = await db
|
||||
.select({
|
||||
serverId: workflowMcpServer.id,
|
||||
serverName: workflowMcpServer.name,
|
||||
toolName: workflowMcpTool.toolName,
|
||||
toolDescription: workflowMcpTool.toolDescription,
|
||||
parameterSchema: workflowMcpTool.parameterSchema,
|
||||
toolId: workflowMcpTool.id,
|
||||
})
|
||||
.from(workflowMcpTool)
|
||||
.innerJoin(workflowMcpServer, eq(workflowMcpTool.serverId, workflowMcpServer.id))
|
||||
.where(eq(workflowMcpTool.workflowId, workflowId))
|
||||
|
||||
if (servers.length > 0) {
|
||||
mcpDetails.isDeployed = true
|
||||
mcpDetails.servers = servers
|
||||
}
|
||||
}
|
||||
|
||||
const isDeployed = apiDetails.isDeployed || chatDetails.isDeployed || mcpDetails.isDeployed
|
||||
return {
|
||||
success: true,
|
||||
output: { isDeployed, api: apiDetails, chat: chatDetails, mcp: mcpDetails },
|
||||
}
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeListWorkspaceMcpServers(
|
||||
params: ListWorkspaceMcpServersParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId)
|
||||
const workspaceId = workflowRecord.workspaceId
|
||||
if (!workspaceId) {
|
||||
return { success: false, error: 'workspaceId is required' }
|
||||
}
|
||||
|
||||
const servers = await db
|
||||
.select({
|
||||
id: workflowMcpServer.id,
|
||||
name: workflowMcpServer.name,
|
||||
description: workflowMcpServer.description,
|
||||
})
|
||||
.from(workflowMcpServer)
|
||||
.where(eq(workflowMcpServer.workspaceId, workspaceId))
|
||||
|
||||
const serverIds = servers.map((server) => server.id)
|
||||
const tools =
|
||||
serverIds.length > 0
|
||||
? await db
|
||||
.select({
|
||||
serverId: workflowMcpTool.serverId,
|
||||
toolName: workflowMcpTool.toolName,
|
||||
})
|
||||
.from(workflowMcpTool)
|
||||
.where(inArray(workflowMcpTool.serverId, serverIds))
|
||||
: []
|
||||
|
||||
const toolNamesByServer: Record<string, string[]> = {}
|
||||
for (const tool of tools) {
|
||||
if (!toolNamesByServer[tool.serverId]) {
|
||||
toolNamesByServer[tool.serverId] = []
|
||||
}
|
||||
toolNamesByServer[tool.serverId].push(tool.toolName)
|
||||
}
|
||||
|
||||
const serversWithToolNames = servers.map((server) => ({
|
||||
...server,
|
||||
toolCount: toolNamesByServer[server.id]?.length || 0,
|
||||
toolNames: toolNamesByServer[server.id] || [],
|
||||
}))
|
||||
|
||||
return { success: true, output: { servers: serversWithToolNames, count: servers.length } }
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
|
||||
export async function executeCreateWorkspaceMcpServer(
|
||||
params: CreateWorkspaceMcpServerParams,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
const workflowId = params.workflowId || context.workflowId
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'workflowId is required' }
|
||||
}
|
||||
const { workflow: workflowRecord } = await ensureWorkflowAccess(workflowId, context.userId)
|
||||
const workspaceId = workflowRecord.workspaceId
|
||||
if (!workspaceId) {
|
||||
return { success: false, error: 'workspaceId is required' }
|
||||
}
|
||||
|
||||
const name = params.name?.trim()
|
||||
if (!name) {
|
||||
return { success: false, error: 'name is required' }
|
||||
}
|
||||
|
||||
const serverId = crypto.randomUUID()
|
||||
const [server] = await db
|
||||
.insert(workflowMcpServer)
|
||||
.values({
|
||||
id: serverId,
|
||||
workspaceId,
|
||||
createdBy: context.userId,
|
||||
name,
|
||||
description: params.description?.trim() || null,
|
||||
isPublic: params.isPublic ?? false,
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.returning()
|
||||
|
||||
const workflowIds: string[] = params.workflowIds || []
|
||||
const addedTools: Array<{ workflowId: string; toolName: string }> = []
|
||||
|
||||
if (workflowIds.length > 0) {
|
||||
const workflows = await db.select().from(workflow).where(inArray(workflow.id, workflowIds))
|
||||
|
||||
for (const wf of workflows) {
|
||||
if (wf.workspaceId !== workspaceId || !wf.isDeployed) {
|
||||
continue
|
||||
}
|
||||
const hasStartBlock = await hasValidStartBlock(wf.id)
|
||||
if (!hasStartBlock) {
|
||||
continue
|
||||
}
|
||||
const toolName = sanitizeToolName(wf.name || `workflow_${wf.id}`)
|
||||
await db.insert(workflowMcpTool).values({
|
||||
id: crypto.randomUUID(),
|
||||
serverId,
|
||||
workflowId: wf.id,
|
||||
toolName,
|
||||
toolDescription: wf.description || `Execute ${wf.name} workflow`,
|
||||
parameterSchema: {},
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
addedTools.push({ workflowId: wf.id, toolName })
|
||||
}
|
||||
}
|
||||
|
||||
return { success: true, output: { server, addedTools } }
|
||||
} catch (error) {
|
||||
return { success: false, error: error instanceof Error ? error.message : String(error) }
|
||||
}
|
||||
}
|
||||
272
apps/sim/lib/copilot/orchestrator/tool-executor/index.ts
Normal file
272
apps/sim/lib/copilot/orchestrator/tool-executor/index.ts
Normal file
@@ -0,0 +1,272 @@
|
||||
import { db } from '@sim/db'
|
||||
import { workflow } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { eq } from 'drizzle-orm'
|
||||
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
|
||||
import type {
|
||||
ExecutionContext,
|
||||
ToolCallResult,
|
||||
ToolCallState,
|
||||
} from '@/lib/copilot/orchestrator/types'
|
||||
import { routeExecution } from '@/lib/copilot/tools/server/router'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
|
||||
import { getTool, resolveToolId } from '@/tools/utils'
|
||||
import {
|
||||
executeCheckDeploymentStatus,
|
||||
executeCreateWorkspaceMcpServer,
|
||||
executeDeployApi,
|
||||
executeDeployChat,
|
||||
executeDeployMcp,
|
||||
executeListWorkspaceMcpServers,
|
||||
executeRedeploy,
|
||||
} from './deployment-tools'
|
||||
import { executeIntegrationToolDirect } from './integration-tools'
|
||||
import type {
|
||||
CheckDeploymentStatusParams,
|
||||
CreateFolderParams,
|
||||
CreateWorkflowParams,
|
||||
CreateWorkspaceMcpServerParams,
|
||||
DeployApiParams,
|
||||
DeployChatParams,
|
||||
DeployMcpParams,
|
||||
GenerateApiKeyParams,
|
||||
GetBlockOutputsParams,
|
||||
GetBlockUpstreamReferencesParams,
|
||||
GetDeployedWorkflowStateParams,
|
||||
GetUserWorkflowParams,
|
||||
GetWorkflowDataParams,
|
||||
GetWorkflowFromNameParams,
|
||||
ListFoldersParams,
|
||||
ListUserWorkflowsParams,
|
||||
ListWorkspaceMcpServersParams,
|
||||
MoveFolderParams,
|
||||
MoveWorkflowParams,
|
||||
RenameWorkflowParams,
|
||||
RunBlockParams,
|
||||
RunFromBlockParams,
|
||||
RunWorkflowParams,
|
||||
RunWorkflowUntilBlockParams,
|
||||
SetGlobalWorkflowVariablesParams,
|
||||
} from './param-types'
|
||||
import { PLATFORM_ACTIONS_CONTENT } from './platform-actions'
|
||||
import {
|
||||
executeCreateFolder,
|
||||
executeCreateWorkflow,
|
||||
executeGenerateApiKey,
|
||||
executeGetBlockOutputs,
|
||||
executeGetBlockUpstreamReferences,
|
||||
executeGetDeployedWorkflowState,
|
||||
executeGetUserWorkflow,
|
||||
executeGetWorkflowData,
|
||||
executeGetWorkflowFromName,
|
||||
executeListFolders,
|
||||
executeListUserWorkflows,
|
||||
executeListUserWorkspaces,
|
||||
executeMoveFolder,
|
||||
executeMoveWorkflow,
|
||||
executeRenameWorkflow,
|
||||
executeRunBlock,
|
||||
executeRunFromBlock,
|
||||
executeRunWorkflow,
|
||||
executeRunWorkflowUntilBlock,
|
||||
executeSetGlobalWorkflowVariables,
|
||||
} from './workflow-tools'
|
||||
|
||||
const logger = createLogger('CopilotToolExecutor')
|
||||
|
||||
const SERVER_TOOLS = new Set<string>([
|
||||
'get_blocks_and_tools',
|
||||
'get_blocks_metadata',
|
||||
'get_block_options',
|
||||
'get_block_config',
|
||||
'get_trigger_blocks',
|
||||
'edit_workflow',
|
||||
'get_workflow_console',
|
||||
'search_documentation',
|
||||
'search_online',
|
||||
'set_environment_variables',
|
||||
'get_credentials',
|
||||
'make_api_request',
|
||||
'knowledge_base',
|
||||
])
|
||||
|
||||
const SIM_WORKFLOW_TOOL_HANDLERS: Record<
|
||||
string,
|
||||
(params: Record<string, unknown>, context: ExecutionContext) => Promise<ToolCallResult>
|
||||
> = {
|
||||
get_user_workflow: (p, c) => executeGetUserWorkflow(p as GetUserWorkflowParams, c),
|
||||
get_workflow_from_name: (p, c) => executeGetWorkflowFromName(p as GetWorkflowFromNameParams, c),
|
||||
list_user_workflows: (p, c) => executeListUserWorkflows(p as ListUserWorkflowsParams, c),
|
||||
list_user_workspaces: (_p, c) => executeListUserWorkspaces(c),
|
||||
list_folders: (p, c) => executeListFolders(p as ListFoldersParams, c),
|
||||
create_workflow: (p, c) => executeCreateWorkflow(p as CreateWorkflowParams, c),
|
||||
create_folder: (p, c) => executeCreateFolder(p as CreateFolderParams, c),
|
||||
rename_workflow: (p, c) => executeRenameWorkflow(p as unknown as RenameWorkflowParams, c),
|
||||
move_workflow: (p, c) => executeMoveWorkflow(p as unknown as MoveWorkflowParams, c),
|
||||
move_folder: (p, c) => executeMoveFolder(p as unknown as MoveFolderParams, c),
|
||||
get_workflow_data: (p, c) => executeGetWorkflowData(p as GetWorkflowDataParams, c),
|
||||
get_block_outputs: (p, c) => executeGetBlockOutputs(p as GetBlockOutputsParams, c),
|
||||
get_block_upstream_references: (p, c) =>
|
||||
executeGetBlockUpstreamReferences(p as unknown as GetBlockUpstreamReferencesParams, c),
|
||||
run_workflow: (p, c) => executeRunWorkflow(p as RunWorkflowParams, c),
|
||||
run_workflow_until_block: (p, c) =>
|
||||
executeRunWorkflowUntilBlock(p as unknown as RunWorkflowUntilBlockParams, c),
|
||||
run_from_block: (p, c) => executeRunFromBlock(p as unknown as RunFromBlockParams, c),
|
||||
run_block: (p, c) => executeRunBlock(p as unknown as RunBlockParams, c),
|
||||
get_deployed_workflow_state: (p, c) =>
|
||||
executeGetDeployedWorkflowState(p as GetDeployedWorkflowStateParams, c),
|
||||
generate_api_key: (p, c) => executeGenerateApiKey(p as unknown as GenerateApiKeyParams, c),
|
||||
get_platform_actions: () =>
|
||||
Promise.resolve({
|
||||
success: true,
|
||||
output: { content: PLATFORM_ACTIONS_CONTENT },
|
||||
}),
|
||||
set_global_workflow_variables: (p, c) =>
|
||||
executeSetGlobalWorkflowVariables(p as SetGlobalWorkflowVariablesParams, c),
|
||||
deploy_api: (p, c) => executeDeployApi(p as DeployApiParams, c),
|
||||
deploy_chat: (p, c) => executeDeployChat(p as DeployChatParams, c),
|
||||
deploy_mcp: (p, c) => executeDeployMcp(p as DeployMcpParams, c),
|
||||
redeploy: (_p, c) => executeRedeploy(c),
|
||||
check_deployment_status: (p, c) =>
|
||||
executeCheckDeploymentStatus(p as CheckDeploymentStatusParams, c),
|
||||
list_workspace_mcp_servers: (p, c) =>
|
||||
executeListWorkspaceMcpServers(p as ListWorkspaceMcpServersParams, c),
|
||||
create_workspace_mcp_server: (p, c) =>
|
||||
executeCreateWorkspaceMcpServer(p as CreateWorkspaceMcpServerParams, c),
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a tool server-side without calling internal routes.
|
||||
*/
|
||||
export async function executeToolServerSide(
|
||||
toolCall: ToolCallState,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
const toolName = toolCall.name
|
||||
const resolvedToolName = resolveToolId(toolName)
|
||||
|
||||
if (SERVER_TOOLS.has(toolName)) {
|
||||
return executeServerToolDirect(toolName, toolCall.params || {}, context)
|
||||
}
|
||||
|
||||
if (toolName in SIM_WORKFLOW_TOOL_HANDLERS) {
|
||||
return executeSimWorkflowTool(toolName, toolCall.params || {}, context)
|
||||
}
|
||||
|
||||
const toolConfig = getTool(resolvedToolName)
|
||||
if (!toolConfig) {
|
||||
logger.warn('Tool not found in registry', { toolName, resolvedToolName })
|
||||
return {
|
||||
success: false,
|
||||
error: `Tool not found: ${toolName}`,
|
||||
}
|
||||
}
|
||||
|
||||
return executeIntegrationToolDirect(toolCall, toolConfig, context)
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a server tool directly via the server tool router.
|
||||
*/
|
||||
async function executeServerToolDirect(
|
||||
toolName: string,
|
||||
params: Record<string, unknown>,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
try {
|
||||
// Inject workflowId from context if not provided in params
|
||||
// This is needed for tools like set_environment_variables that require workflowId
|
||||
const enrichedParams = { ...params }
|
||||
if (!enrichedParams.workflowId && context.workflowId) {
|
||||
enrichedParams.workflowId = context.workflowId
|
||||
}
|
||||
|
||||
const result = await routeExecution(toolName, enrichedParams, { userId: context.userId })
|
||||
return { success: true, output: result }
|
||||
} catch (error) {
|
||||
logger.error('Server tool execution failed', {
|
||||
toolName,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Server tool execution failed',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function executeSimWorkflowTool(
|
||||
toolName: string,
|
||||
params: Record<string, unknown>,
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
const handler = SIM_WORKFLOW_TOOL_HANDLERS[toolName]
|
||||
if (!handler) return { success: false, error: `Unsupported workflow tool: ${toolName}` }
|
||||
return handler(params, context)
|
||||
}
|
||||
|
||||
/**
|
||||
* Notify the copilot backend that a tool has completed.
|
||||
*/
|
||||
export async function markToolComplete(
|
||||
toolCallId: string,
|
||||
toolName: string,
|
||||
status: number,
|
||||
message?: unknown,
|
||||
data?: unknown
|
||||
): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${SIM_AGENT_API_URL}/api/tools/mark-complete`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}),
|
||||
},
|
||||
body: JSON.stringify({
|
||||
id: toolCallId,
|
||||
name: toolName,
|
||||
status,
|
||||
message,
|
||||
data,
|
||||
}),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
logger.warn('Mark-complete call failed', { toolCallId, status: response.status })
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
} catch (error) {
|
||||
logger.error('Mark-complete call failed', {
|
||||
toolCallId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare execution context with cached environment values.
|
||||
*/
|
||||
export async function prepareExecutionContext(
|
||||
userId: string,
|
||||
workflowId: string
|
||||
): Promise<ExecutionContext> {
|
||||
const workflowResult = await db
|
||||
.select({ workspaceId: workflow.workspaceId })
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, workflowId))
|
||||
.limit(1)
|
||||
const workspaceId = workflowResult[0]?.workspaceId ?? undefined
|
||||
|
||||
const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId)
|
||||
|
||||
return {
|
||||
userId,
|
||||
workflowId,
|
||||
workspaceId,
|
||||
decryptedEnvVars,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,105 @@
|
||||
import { db } from '@sim/db'
|
||||
import { account, workflow } from '@sim/db/schema'
|
||||
import { and, eq } from 'drizzle-orm'
|
||||
import type {
|
||||
ExecutionContext,
|
||||
ToolCallResult,
|
||||
ToolCallState,
|
||||
} from '@/lib/copilot/orchestrator/types'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
|
||||
import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||
import { resolveEnvVarReferences } from '@/executor/utils/reference-validation'
|
||||
import { executeTool } from '@/tools'
|
||||
import { resolveToolId } from '@/tools/utils'
|
||||
|
||||
export async function executeIntegrationToolDirect(
|
||||
toolCall: ToolCallState,
|
||||
toolConfig: {
|
||||
oauth?: { required?: boolean; provider?: string }
|
||||
params?: { apiKey?: { required?: boolean } }
|
||||
},
|
||||
context: ExecutionContext
|
||||
): Promise<ToolCallResult> {
|
||||
const { userId, workflowId } = context
|
||||
const toolName = resolveToolId(toolCall.name)
|
||||
const toolArgs = toolCall.params || {}
|
||||
|
||||
let workspaceId = context.workspaceId
|
||||
if (!workspaceId && workflowId) {
|
||||
const workflowResult = await db
|
||||
.select({ workspaceId: workflow.workspaceId })
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, workflowId))
|
||||
.limit(1)
|
||||
workspaceId = workflowResult[0]?.workspaceId ?? undefined
|
||||
}
|
||||
|
||||
const decryptedEnvVars =
|
||||
context.decryptedEnvVars || (await getEffectiveDecryptedEnv(userId, workspaceId))
|
||||
|
||||
// Deep resolution walks nested objects to replace {{ENV_VAR}} references.
|
||||
// Safe because tool arguments originate from the LLM (not direct user input)
|
||||
// and env vars belong to the user themselves.
|
||||
const executionParams = resolveEnvVarReferences(toolArgs, decryptedEnvVars, {
|
||||
deep: true,
|
||||
}) as Record<string, unknown>
|
||||
|
||||
if (toolConfig.oauth?.required && toolConfig.oauth.provider) {
|
||||
const provider = toolConfig.oauth.provider
|
||||
const accounts = await db
|
||||
.select()
|
||||
.from(account)
|
||||
.where(and(eq(account.providerId, provider), eq(account.userId, userId)))
|
||||
.limit(1)
|
||||
|
||||
if (!accounts.length) {
|
||||
return {
|
||||
success: false,
|
||||
error: `No ${provider} account connected. Please connect your account first.`,
|
||||
}
|
||||
}
|
||||
|
||||
const acc = accounts[0]
|
||||
const requestId = generateRequestId()
|
||||
const { accessToken } = await refreshTokenIfNeeded(requestId, acc, acc.id)
|
||||
|
||||
if (!accessToken) {
|
||||
return {
|
||||
success: false,
|
||||
error: `OAuth token not available for ${provider}. Please reconnect your account.`,
|
||||
}
|
||||
}
|
||||
|
||||
executionParams.accessToken = accessToken
|
||||
}
|
||||
|
||||
if (toolConfig.params?.apiKey?.required && !executionParams.apiKey) {
|
||||
return {
|
||||
success: false,
|
||||
error: `API key not provided for ${toolName}. Use {{YOUR_API_KEY_ENV_VAR}} to reference your environment variable.`,
|
||||
}
|
||||
}
|
||||
|
||||
executionParams._context = {
|
||||
workflowId,
|
||||
userId,
|
||||
}
|
||||
|
||||
if (toolName === 'function_execute') {
|
||||
executionParams.envVars = decryptedEnvVars
|
||||
executionParams.workflowVariables = {}
|
||||
executionParams.blockData = {}
|
||||
executionParams.blockNameMapping = {}
|
||||
executionParams.language = executionParams.language || 'javascript'
|
||||
executionParams.timeout = executionParams.timeout || 30000
|
||||
}
|
||||
|
||||
const result = await executeTool(toolName, executionParams)
|
||||
|
||||
return {
|
||||
success: result.success,
|
||||
output: result.output,
|
||||
error: result.error,
|
||||
}
|
||||
}
|
||||
187
apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts
Normal file
187
apps/sim/lib/copilot/orchestrator/tool-executor/param-types.ts
Normal file
@@ -0,0 +1,187 @@
|
||||
/**
|
||||
* Typed parameter interfaces for tool executor functions.
|
||||
* Replaces Record<string, any> with specific shapes based on actual property access.
|
||||
*/
|
||||
|
||||
// === Workflow Query Params ===
|
||||
|
||||
export interface GetUserWorkflowParams {
|
||||
workflowId?: string
|
||||
}
|
||||
|
||||
export interface GetWorkflowFromNameParams {
|
||||
workflow_name?: string
|
||||
}
|
||||
|
||||
export interface ListUserWorkflowsParams {
|
||||
workspaceId?: string
|
||||
folderId?: string
|
||||
}
|
||||
|
||||
export interface GetWorkflowDataParams {
|
||||
workflowId?: string
|
||||
data_type?: string
|
||||
dataType?: string
|
||||
}
|
||||
|
||||
export interface GetBlockOutputsParams {
|
||||
workflowId?: string
|
||||
blockIds?: string[]
|
||||
}
|
||||
|
||||
export interface GetBlockUpstreamReferencesParams {
|
||||
workflowId?: string
|
||||
blockIds: string[]
|
||||
}
|
||||
|
||||
export interface ListFoldersParams {
|
||||
workspaceId?: string
|
||||
}
|
||||
|
||||
// === Workflow Mutation Params ===
|
||||
|
||||
export interface CreateWorkflowParams {
|
||||
name?: string
|
||||
workspaceId?: string
|
||||
folderId?: string
|
||||
description?: string
|
||||
}
|
||||
|
||||
export interface CreateFolderParams {
|
||||
name?: string
|
||||
workspaceId?: string
|
||||
parentId?: string
|
||||
}
|
||||
|
||||
export interface RunWorkflowParams {
|
||||
workflowId?: string
|
||||
workflow_input?: unknown
|
||||
input?: unknown
|
||||
/** When true, runs the deployed version instead of the draft. Default: false (draft). */
|
||||
useDeployedState?: boolean
|
||||
}
|
||||
|
||||
export interface RunWorkflowUntilBlockParams {
|
||||
workflowId?: string
|
||||
workflow_input?: unknown
|
||||
input?: unknown
|
||||
/** The block ID to stop after. Execution halts once this block completes. */
|
||||
stopAfterBlockId: string
|
||||
/** When true, runs the deployed version instead of the draft. Default: false (draft). */
|
||||
useDeployedState?: boolean
|
||||
}
|
||||
|
||||
export interface RunFromBlockParams {
|
||||
workflowId?: string
|
||||
/** The block ID to start execution from. */
|
||||
startBlockId: string
|
||||
/** Optional execution ID to load the snapshot from. If omitted, uses the latest execution. */
|
||||
executionId?: string
|
||||
workflow_input?: unknown
|
||||
input?: unknown
|
||||
useDeployedState?: boolean
|
||||
}
|
||||
|
||||
export interface RunBlockParams {
|
||||
workflowId?: string
|
||||
/** The block ID to run. Only this block executes using cached upstream outputs. */
|
||||
blockId: string
|
||||
/** Optional execution ID to load the snapshot from. If omitted, uses the latest execution. */
|
||||
executionId?: string
|
||||
workflow_input?: unknown
|
||||
input?: unknown
|
||||
useDeployedState?: boolean
|
||||
}
|
||||
|
||||
export interface GetDeployedWorkflowStateParams {
|
||||
workflowId?: string
|
||||
}
|
||||
|
||||
export interface GenerateApiKeyParams {
|
||||
name: string
|
||||
workspaceId?: string
|
||||
}
|
||||
|
||||
export interface VariableOperation {
|
||||
name: string
|
||||
operation: 'add' | 'edit' | 'delete'
|
||||
value?: unknown
|
||||
type?: string
|
||||
}
|
||||
|
||||
export interface SetGlobalWorkflowVariablesParams {
|
||||
workflowId?: string
|
||||
operations?: VariableOperation[]
|
||||
}
|
||||
|
||||
// === Deployment Params ===
|
||||
|
||||
export interface DeployApiParams {
|
||||
workflowId?: string
|
||||
action?: 'deploy' | 'undeploy'
|
||||
}
|
||||
|
||||
export interface DeployChatParams {
|
||||
workflowId?: string
|
||||
action?: 'deploy' | 'undeploy' | 'update'
|
||||
identifier?: string
|
||||
title?: string
|
||||
description?: string
|
||||
customizations?: {
|
||||
primaryColor?: string
|
||||
secondaryColor?: string
|
||||
welcomeMessage?: string
|
||||
iconUrl?: string
|
||||
}
|
||||
authType?: 'none' | 'password' | 'public' | 'email' | 'sso'
|
||||
password?: string
|
||||
subdomain?: string
|
||||
allowedEmails?: string[]
|
||||
outputConfigs?: unknown[]
|
||||
}
|
||||
|
||||
export interface DeployMcpParams {
|
||||
workflowId?: string
|
||||
action?: 'deploy' | 'undeploy'
|
||||
toolName?: string
|
||||
toolDescription?: string
|
||||
serverId?: string
|
||||
parameterSchema?: Record<string, unknown>
|
||||
}
|
||||
|
||||
export interface CheckDeploymentStatusParams {
|
||||
workflowId?: string
|
||||
}
|
||||
|
||||
export interface ListWorkspaceMcpServersParams {
|
||||
workspaceId?: string
|
||||
workflowId?: string
|
||||
}
|
||||
|
||||
export interface CreateWorkspaceMcpServerParams {
|
||||
workflowId?: string
|
||||
name?: string
|
||||
description?: string
|
||||
toolName?: string
|
||||
toolDescription?: string
|
||||
serverName?: string
|
||||
isPublic?: boolean
|
||||
workflowIds?: string[]
|
||||
}
|
||||
|
||||
// === Workflow Organization Params ===
|
||||
|
||||
export interface RenameWorkflowParams {
|
||||
workflowId: string
|
||||
name: string
|
||||
}
|
||||
|
||||
export interface MoveWorkflowParams {
|
||||
workflowId: string
|
||||
folderId: string | null
|
||||
}
|
||||
|
||||
export interface MoveFolderParams {
|
||||
folderId: string
|
||||
parentId: string | null
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user