Compare commits

...

18 Commits

Author SHA1 Message Date
Siddharth Ganesan
8848780f56 Fix stream reconnect 2026-04-04 17:26:36 -07:00
Siddharth Ganesan
fefeb010de Add client retry logic 2026-04-04 17:13:38 -07:00
Siddharth Ganesan
ee6c7f98ff File types 2026-04-04 17:04:14 -07:00
Siddharth Ganesan
64758af2b6 improvement(mothership): docs 2026-04-04 14:19:24 -07:00
Siddharth Ganesan
8c09e19293 Add deps 2026-04-04 13:06:45 -07:00
Siddharth Ganesan
feb1c88d2f feat(mothership): append 2026-04-04 12:31:23 -07:00
Siddharth Ganesan
78007c11a0 feat(motheship): add docx support 2026-04-04 11:41:27 -07:00
Siddharth Ganesan
bac1d5e588 Force redeploy 2026-04-03 18:43:50 -07:00
Siddharth Ganesan
7fdab14266 improvement(mothership): new agent loop (#3920)
* feat(transport): replace shared chat transport with mothership-stream module

* improvement(contracts): regenerate contracts from go

* feat(tools): add tool catalog codegen from go tool contracts

* feat(tools): add tool-executor dispatch framework for sim side tool routing

* feat(orchestrator): rewrite tool dispatch with catalog-driven executor and simplified resume loop

* feat(orchestrator): checkpoint resume flow

* refactor(copilot): consolidate orchestrator into request/ layer

* refactor(mothership): reorganize lib/copilot into structured subdirectories

* refactor(mothership): canonical transcript layer, dead code cleanup, type consolidation

* refactor(mothership): rebase onto latest staging

* refactor(mothership): rename request continue to lifecycle

* feat(trace): add initial version of request traces

* improvement(stream): batch stream from redis

* fix(resume): fix the resume checkpoint

* fix(resume): fix resume client tool

* fix(subagents): subagent resume should join on existing subagent text block

* improvement(reconnect): harden reconnect logic

* fix(superagent): fix superagent integration tools

* improvement(stream): improve stream perf

* Rebase with origin dev

* fix(tests): fix failing test

* fix(build): fix type errors

* fix(build): fix build errors

* fix(build): fix type errors

* feat(mothership): add cli execution

* fix(mothership): fix function execute tests
2026-04-03 17:27:51 -07:00
Vikhyath Mondreti
3b9e663f25 fix build error 2026-04-02 18:53:40 -07:00
Vikhyath Mondreti
381bc1d556 fix(concurrency): cleanup worker code 2026-04-02 18:48:00 -07:00
Waleed
20c05644ab fix(enterprise): smooth audit log list animation (#3905) 2026-04-02 16:03:03 -07:00
Waleed
f9d73db65c feat(rootly): expand Rootly integration from 14 to 27 tools (#3902)
* feat(rootly): expand Rootly integration from 14 to 27 tools

Add 13 new tools: delete_incident, get_alert, update_alert,
acknowledge_alert, resolve_alert, create_action_item, list_action_items,
list_users, list_on_calls, list_schedules, list_escalation_policies,
list_causes, list_playbooks. Includes tool files, types, registry,
block definition with subBlocks/conditions/params, and docs.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(rootly): handle 204 No Content response for delete_incident

DELETE /v1/incidents/{id} returns 204 with empty body. Avoid calling
response.json() on success — return success/message instead.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(rootly): remove non-TSDoc comments, add empty body to acknowledge_alert

Remove all inline section comments from block definition per CLAUDE.md
guidelines. Add explicit empty JSON:API body to acknowledge_alert POST
to prevent potential 400 from servers expecting a body with Content-Type.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(rootly): send empty body on resolve_alert, guard assignedToUserId parse

resolve_alert now sends { data: {} } instead of undefined when no
optional params are provided, matching the acknowledge_alert fix.
create_action_item now validates assignedToUserId is numeric before
parseInt to avoid silent NaN coercion.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(rootly): extract on-call relationships from JSON:API relationships/included

On-call user, schedule, and escalation policy are exposed as JSON:API
relationships, not flat attributes. Now extracts IDs from
item.relationships and looks up names from the included array.
Adds ?include=user,schedule,escalation_policy to the request URL.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(rootly): remove last non-TSDoc comment from block definition

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* docs

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-02 13:40:45 -07:00
Waleed
e2e53aba76 feat(agentmail): add AgentMail integration with 21 tools (#3901)
* feat(agentmail): add AgentMail integration with 21 tools

* fix(agentmail): clear stale to field when switching to reply_message operation

* fix(agentmail): guard messageId and label remappings with operation checks

* fix(agentmail): clean up subBlock titles

* fix(agentmail): guard replyTo and thread label remappings with operation checks

* fix(agentmail): guard inboxIdParam remapping with operation check

* fix(agentmail): guard permanent, replyAll, and draftInReplyTo with operation checks
2026-04-02 13:40:23 -07:00
Waleed
727bb1cadb fix(bullmq): restore CONCURRENCY_CONTROL_ENABLED flag guard (#3903) 2026-04-02 13:07:50 -07:00
Waleed
e2e29cefd7 fix(blog): use landing theme variables in MDX components (#3900) 2026-04-02 12:34:38 -07:00
Waleed
45f053a383 feat(rootly): add Rootly incident management integration with 14 tools (#3899)
* feat(rootly): add Rootly incident management integration with 14 tools

* fix(rootly): address PR review feedback - PATCH method, totalCount, environmentIds

- Changed update_incident HTTP method from PUT to PATCH per Rootly API spec
- Fixed totalCount in all 9 list tools to use data.meta?.total_count from API response
- Added missing updateEnvironmentIds subBlock and params mapping for update_incident

* fix(rootly): add id to PATCH body and unchanged option to update status dropdown

- Include incident id in JSON:API PATCH body per spec requirement
- Add 'Unchanged' empty option to updateStatus dropdown to avoid accidental overwrites

* icon update

* improvement(rootly): complete block-tool alignment and fix validation gaps

- Add missing get_incident output fields (private, shortUrl, closedAt)
- Add missing block subBlocks: createPrivate, alertStatus, alertExternalId, listAlertsServices
- Add pageNumber subBlocks for all 9 list operations
- Add teams/environments filter subBlocks for list_incidents and list_alerts
- Add environmentIds subBlock for create_alert
- Add empty default options to all optional dropdowns (createStatus, createKind, listIncidentsSort, eventVisibility)
- Wire all new subBlocks in tools.config.params and inputs
- Regenerate docs

* fix(rootly): align tools with OpenAPI spec

- list_incident_types: use filter[name] instead of unsupported filter[search]
- list_severities: add missing search param (filter[search])
- create_incident: title is optional per API (auto-generated if null)
- update_incident: add kind, private, labels, incidentTypeIds,
  functionalityIds, cancellationMessage params
- create/update/list incidents: add scheduled, in_progress, completed
  status values
- create_alert: fix status description (only open/triggered on create)
- add_incident_event: add updatedAt to response
- block: add matching subBlocks and params for all new tool fields

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(rootly): final validation fixes from OpenAPI spec audit

- update_incident: change PATCH to PUT per OpenAPI spec
- index.ts: add types re-export
- types.ts: fix id fields to string | null (matches ?? null runtime)
- block: add value initializers to 4 dropdowns missing them
- registry: fix alphabetical order (incident_types before incidents)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* reorg

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-02 11:40:40 -07:00
Waleed
225d5d551a improvement(models): update default to claude-sonnet-4-6 and reorganize OpenAI models (#3898)
* improvement(models): update default to claude-sonnet-4-6 and reorganize OpenAI models

* fix(tests): update stale claude-sonnet-4-5 references to claude-sonnet-4-6

* fix(combobox): rename misleading claudeSonnet45 variable to defaultModelOption
2026-04-02 10:51:27 -07:00
354 changed files with 24667 additions and 15726 deletions

View File

@@ -74,10 +74,6 @@ docker compose -f docker-compose.prod.yml up -d
Open [http://localhost:3000](http://localhost:3000)
#### Background worker note
The Docker Compose stack starts a dedicated worker container by default. If `REDIS_URL` is not configured, the worker will start, log that it is idle, and do no queue processing. This is expected. Queue-backed API, webhook, and schedule execution requires Redis; installs without Redis continue to use the inline execution path.
Sim also supports local models via [Ollama](https://ollama.ai) and [vLLM](https://docs.vllm.ai/) — see the [Docker self-hosting docs](https://docs.sim.ai/self-hosting/docker) for setup details.
### Self-hosted: Manual Setup
@@ -117,12 +113,10 @@ cd packages/db && bunx drizzle-kit migrate --config=./drizzle.config.ts
5. Start development servers:
```bash
bun run dev:full # Starts Next.js app, realtime socket server, and the BullMQ worker
bun run dev:full # Starts Next.js app and realtime socket server
```
If `REDIS_URL` is not configured, the worker will remain idle and execution continues inline.
Or run separately: `bun run dev` (Next.js), `cd apps/sim && bun run dev:sockets` (realtime), and `cd apps/sim && bun run worker` (BullMQ worker).
Or run separately: `bun run dev` (Next.js) and `cd apps/sim && bun run dev:sockets` (realtime).
## Copilot API Keys

View File

@@ -1,6 +1,33 @@
import type { SVGProps } from 'react'
import { useId } from 'react'
export function AgentMailIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg {...props} viewBox='0 0 350 363' fill='none' xmlns='http://www.w3.org/2000/svg'>
<path
d='M318.029 88.3407C196.474 115.33 153.48 115.321 33.9244 88.3271C30.6216 87.5814 27.1432 88.9727 25.3284 91.8313L1.24109 129.774C-1.76483 134.509 0.965276 140.798 6.46483 141.898C152.613 171.13 197.678 171.182 343.903 141.835C349.304 140.751 352.064 134.641 349.247 129.907L326.719 92.0479C324.95 89.0744 321.407 87.5907 318.029 88.3407Z'
fill='currentColor'
/>
<path
d='M75.9931 246.6L149.939 311.655C151.973 313.444 151.633 316.969 149.281 318.48L119.141 337.84C117.283 339.034 114.951 338.412 113.933 336.452L70.1276 252.036C68.0779 248.086 72.7553 243.751 75.9931 246.6Z'
fill='currentColor'
/>
<path
d='M274.025 246.6L200.08 311.655C198.046 313.444 198.385 316.969 200.737 318.48L230.877 337.84C232.736 339.034 235.068 338.412 236.085 336.452L279.891 252.036C281.941 248.086 277.263 243.751 274.025 246.6Z'
fill='currentColor'
/>
<path
d='M138.75 198.472L152.436 192.983C155.238 191.918 157.77 191.918 158.574 191.918C164.115 192.126 169.564 192.232 175.009 192.235C180.454 192.232 185.904 192.126 191.444 191.918C192.248 191.918 194.78 191.918 197.583 192.983L211.269 198.472C212.645 199.025 214.082 199.382 215.544 199.448C218.585 199.587 221.733 199.464 224.63 198.811C225.706 198.568 226.728 198.103 227.704 197.545L243.046 188.784C244.81 187.777 246.726 187.138 248.697 186.9L258.276 185.5H259.242H263.556L262.713 190.965L256.679 234.22C255.957 238.31 254.25 242.328 250.443 245.834L187.376 299.258C184.555 301.648 181.107 302.942 177.562 302.942H175.009H172.457C168.911 302.942 165.464 301.648 162.643 299.258L99.5761 245.834C95.7684 242.328 94.0614 238.31 93.3393 234.22L87.3059 190.965L86.4624 185.5H90.7771H91.7429L101.322 186.9C103.293 187.138 105.208 187.777 106.972 188.784L122.314 197.545C123.291 198.103 124.313 198.568 125.389 198.811C128.286 199.464 131.434 199.587 134.474 199.448C135.936 199.382 137.373 199.025 138.75 198.472Z'
fill='currentColor'
/>
<path
d='M102.47 0.847827C205.434 44.796 156.456 42.1015 248.434 1.63153C252.885 -1.09955 258.353 1.88915 259.419 7.69219L269.235 61.1686L270.819 69.7893L263.592 71.8231L263.582 71.8259C190.588 92.3069 165.244 92.0078 86.7576 71.7428L79.1971 69.7905L80.9925 60.8681L91.8401 6.91975C92.9559 1.3706 98.105 -1.55777 102.47 0.847827Z'
fill='currentColor'
/>
</svg>
)
}
export function SearchIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg
@@ -6387,6 +6414,41 @@ export function RipplingIcon(props: SVGProps<SVGSVGElement>) {
)
}
export function RootlyIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg {...props} viewBox='0 0 250 217' fill='none' xmlns='http://www.w3.org/2000/svg'>
<path
d='m124.8 5.21c-9.62 11.52-15.84 24.61-15.84 35.75 0 11.65 7.22 21.11 15.56 21.11 8.72 0 15.81-10.18 15.81-21.25 0-11.06-6.44-24.29-15.53-35.61z'
fill='currentColor'
/>
<path
d='m124.7 84.29c-9.76 11.45-16.05 23.67-16.05 34.88 0 10.99 7.15 20.82 15.74 20.51 8.72-0.34 16.25-10.31 16.04-21.37-0.27-11.06-6.58-22.64-15.73-34.02z'
fill='currentColor'
/>
<path
d='m48.81 48.5c5.82 18.47 16.5 35.38 33.97 36.06 10.99 0.4 15.38-7.12 15.31-12.52-0.13-9.19-8.14-24.76-36.9-24.76-4.74 0-8.26 0.34-12.38 1.22z'
fill='currentColor'
/>
<path
d='m18.92 99.03c9.83 15.7 22.58 26.25 36.07 26.39 9.9 0 18.18-5.68 18.12-14.34-0.07-7.92-8.35-18.84-25.25-18.84-9.69 0-17.77 2.61-28.94 6.79z'
fill='currentColor'
/>
<path
d='m200.1 48.43c-4.18-1.01-7.63-1.29-13.32-1.29-21.73 0-36.35 9.91-36.69 24.7-0.2 7.52 6.17 12.78 15.83 12.78 14.48 0 26.89-14.79 34.18-36.19z'
fill='currentColor'
/>
<path
d='m230.6 98.96c-9.9-4.58-18.55-6.72-28.77-6.72-15.59 0-26.14 10.72-26.07 19.38 0.07 7.71 7.73 13.53 17.13 13.53 12.34 0 25.23-9.81 37.71-26.19z'
fill='currentColor'
/>
<path
d='m6.12 146.9 3.65 24.48c10.99-2.34 21.41-3.21 34.17-3.21 38.03 0 63.94 13.69 66.15 41.52h28.83c2.69-26.48 24.99-41.52 66.67-41.52 11.62 0 22.37 1.15 34.32 3.21l4.05-24.34c-10.99-1.8-20.72-2.41-32.73-2.41-38.44 0-68.07 10.32-86.55 31.79-16.25-19.98-42.03-31.79-84.53-31.79-12.01 0-23.36 0.61-34.03 2.27z'
fill='currentColor'
/>
</svg>
)
}
export function HexIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 1450.3 600'>

View File

@@ -5,6 +5,7 @@
import type { ComponentType, SVGProps } from 'react'
import {
A2AIcon,
AgentMailIcon,
AhrefsIcon,
AirtableIcon,
AirweaveIcon,
@@ -139,6 +140,7 @@ import {
ResendIcon,
RevenueCatIcon,
RipplingIcon,
RootlyIcon,
S3Icon,
SalesforceIcon,
SearchIcon,
@@ -188,6 +190,7 @@ type IconComponent = ComponentType<SVGProps<SVGSVGElement>>
export const blockTypeToIconMap: Record<string, IconComponent> = {
a2a: A2AIcon,
agentmail: AgentMailIcon,
ahrefs: AhrefsIcon,
airtable: AirtableIcon,
airweave: AirweaveIcon,
@@ -320,6 +323,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
resend: ResendIcon,
revenuecat: RevenueCatIcon,
rippling: RipplingIcon,
rootly: RootlyIcon,
s3: S3Icon,
salesforce: SalesforceIcon,
search: SearchIcon,

View File

@@ -195,17 +195,6 @@ By default, your usage is capped at the credits included in your plan. To allow
Max (individual) shares the same rate limits as team plans. Team plans (Pro or Max for Teams) use the Max-tier rate limits.
### Concurrent Execution Limits
| Plan | Concurrent Executions |
|------|----------------------|
| **Free** | 5 |
| **Pro** | 50 |
| **Max / Team** | 200 |
| **Enterprise** | 200 (customizable) |
Concurrent execution limits control how many workflow executions can run simultaneously within a workspace. When the limit is reached, new executions are queued and admitted as running executions complete. Manual runs from the editor are not subject to these limits.
### File Storage
| Plan | Storage |

View File

@@ -0,0 +1,592 @@
---
title: AgentMail
description: Manage email inboxes, threads, and messages with AgentMail
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="agentmail"
color="#000000"
/>
{/* MANUAL-CONTENT-START:intro */}
[AgentMail](https://agentmail.to/) is an API-first email platform built for agents and automation. AgentMail lets you create email inboxes on the fly, send and receive messages, reply to threads, manage drafts, and organize conversations with labels — all through a simple REST API designed for programmatic access.
**Why AgentMail?**
- **Agent-Native Email:** Purpose-built for AI agents and automation — create inboxes, send messages, and manage threads without human-facing UI overhead.
- **Full Email Lifecycle:** Send new messages, reply to threads, forward emails, manage drafts, and schedule sends — all from a single API.
- **Thread & Conversation Management:** Organize emails into threads with full read, reply, forward, and label support for structured conversation tracking.
- **Draft Workflow:** Compose drafts, update them, schedule sends, and dispatch when ready — perfect for review-before-send workflows.
- **Label Organization:** Tag threads and messages with custom labels for filtering, routing, and downstream automation.
**Using AgentMail in Sim**
Sim's AgentMail integration connects your agentic workflows directly to AgentMail using an API key. With 20 operations spanning inboxes, threads, messages, and drafts, you can build powerful email automations without writing backend code.
**Key benefits of using AgentMail in Sim:**
- **Dynamic inbox creation:** Spin up new inboxes on the fly for each agent, workflow, or customer — perfect for multi-tenant email handling.
- **Automated email processing:** List and read incoming messages, then trigger downstream actions based on content, sender, or labels.
- **Conversational email:** Reply to threads and forward messages to keep conversations flowing naturally within your automated workflows.
- **Draft and review workflows:** Create drafts, update them with AI-generated content, and send when approved — ideal for human-in-the-loop patterns.
- **Email organization:** Apply labels to threads and messages to categorize, filter, and route emails through your automation pipeline.
Whether you're building an AI email assistant, automating customer support replies, processing incoming leads, or managing multi-agent email workflows, AgentMail in Sim gives you direct, secure access to the full AgentMail API — no middleware required. Simply configure your API key, select the operation you need, and let Sim handle the rest.
{/* MANUAL-CONTENT-END */}
## Usage Instructions
Integrate AgentMail into your workflow. Create and manage email inboxes, send and receive messages, reply to threads, manage drafts, and organize threads with labels. Requires API Key.
## Tools
### `agentmail_create_draft`
Create a new email draft in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox to create the draft in |
| `to` | string | No | Recipient email addresses \(comma-separated\) |
| `subject` | string | No | Draft subject line |
| `text` | string | No | Plain text draft body |
| `html` | string | No | HTML draft body |
| `cc` | string | No | CC recipient email addresses \(comma-separated\) |
| `bcc` | string | No | BCC recipient email addresses \(comma-separated\) |
| `inReplyTo` | string | No | ID of message being replied to |
| `sendAt` | string | No | ISO 8601 timestamp to schedule sending |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `draftId` | string | Unique identifier for the draft |
| `inboxId` | string | Inbox the draft belongs to |
| `subject` | string | Draft subject |
| `to` | array | Recipient email addresses |
| `cc` | array | CC email addresses |
| `bcc` | array | BCC email addresses |
| `text` | string | Plain text content |
| `html` | string | HTML content |
| `preview` | string | Draft preview text |
| `labels` | array | Labels assigned to the draft |
| `inReplyTo` | string | Message ID this draft replies to |
| `sendStatus` | string | Send status \(scheduled, sending, failed\) |
| `sendAt` | string | Scheduled send time |
| `createdAt` | string | Creation timestamp |
| `updatedAt` | string | Last updated timestamp |
### `agentmail_create_inbox`
Create a new email inbox with AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `username` | string | No | Username for the inbox email address |
| `domain` | string | No | Domain for the inbox email address |
| `displayName` | string | No | Display name for the inbox |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `inboxId` | string | Unique identifier for the inbox |
| `email` | string | Email address of the inbox |
| `displayName` | string | Display name of the inbox |
| `createdAt` | string | Creation timestamp |
| `updatedAt` | string | Last updated timestamp |
### `agentmail_delete_draft`
Delete an email draft in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox containing the draft |
| `draftId` | string | Yes | ID of the draft to delete |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `deleted` | boolean | Whether the draft was successfully deleted |
### `agentmail_delete_inbox`
Delete an email inbox in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox to delete |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `deleted` | boolean | Whether the inbox was successfully deleted |
### `agentmail_delete_thread`
Delete an email thread in AgentMail (moves to trash, or permanently deletes if already in trash)
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox containing the thread |
| `threadId` | string | Yes | ID of the thread to delete |
| `permanent` | boolean | No | Force permanent deletion instead of moving to trash |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `deleted` | boolean | Whether the thread was successfully deleted |
### `agentmail_forward_message`
Forward an email message to new recipients in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox containing the message |
| `messageId` | string | Yes | ID of the message to forward |
| `to` | string | Yes | Recipient email addresses \(comma-separated\) |
| `subject` | string | No | Override subject line |
| `text` | string | No | Additional plain text to prepend |
| `html` | string | No | Additional HTML to prepend |
| `cc` | string | No | CC recipient email addresses \(comma-separated\) |
| `bcc` | string | No | BCC recipient email addresses \(comma-separated\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `messageId` | string | ID of the forwarded message |
| `threadId` | string | ID of the thread |
### `agentmail_get_draft`
Get details of a specific email draft in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox the draft belongs to |
| `draftId` | string | Yes | ID of the draft to retrieve |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `draftId` | string | Unique identifier for the draft |
| `inboxId` | string | Inbox the draft belongs to |
| `subject` | string | Draft subject |
| `to` | array | Recipient email addresses |
| `cc` | array | CC email addresses |
| `bcc` | array | BCC email addresses |
| `text` | string | Plain text content |
| `html` | string | HTML content |
| `preview` | string | Draft preview text |
| `labels` | array | Labels assigned to the draft |
| `inReplyTo` | string | Message ID this draft replies to |
| `sendStatus` | string | Send status \(scheduled, sending, failed\) |
| `sendAt` | string | Scheduled send time |
| `createdAt` | string | Creation timestamp |
| `updatedAt` | string | Last updated timestamp |
### `agentmail_get_inbox`
Get details of a specific email inbox in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox to retrieve |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `inboxId` | string | Unique identifier for the inbox |
| `email` | string | Email address of the inbox |
| `displayName` | string | Display name of the inbox |
| `createdAt` | string | Creation timestamp |
| `updatedAt` | string | Last updated timestamp |
### `agentmail_get_message`
Get details of a specific email message in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox containing the message |
| `messageId` | string | Yes | ID of the message to retrieve |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `messageId` | string | Unique identifier for the message |
| `threadId` | string | ID of the thread this message belongs to |
| `from` | string | Sender email address |
| `to` | array | Recipient email addresses |
| `cc` | array | CC email addresses |
| `bcc` | array | BCC email addresses |
| `subject` | string | Message subject |
| `text` | string | Plain text content |
| `html` | string | HTML content |
| `createdAt` | string | Creation timestamp |
### `agentmail_get_thread`
Get details of a specific email thread including messages in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox containing the thread |
| `threadId` | string | Yes | ID of the thread to retrieve |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `threadId` | string | Unique identifier for the thread |
| `subject` | string | Thread subject |
| `senders` | array | List of sender email addresses |
| `recipients` | array | List of recipient email addresses |
| `messageCount` | number | Number of messages in the thread |
| `labels` | array | Labels assigned to the thread |
| `lastMessageAt` | string | Timestamp of last message |
| `createdAt` | string | Creation timestamp |
| `updatedAt` | string | Last updated timestamp |
| `messages` | array | Messages in the thread |
| ↳ `messageId` | string | Unique identifier for the message |
| ↳ `from` | string | Sender email address |
| ↳ `to` | array | Recipient email addresses |
| ↳ `cc` | array | CC email addresses |
| ↳ `bcc` | array | BCC email addresses |
| ↳ `subject` | string | Message subject |
| ↳ `text` | string | Plain text content |
| ↳ `html` | string | HTML content |
| ↳ `createdAt` | string | Creation timestamp |
### `agentmail_list_drafts`
List email drafts in an inbox in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox to list drafts from |
| `limit` | number | No | Maximum number of drafts to return |
| `pageToken` | string | No | Pagination token for next page of results |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `drafts` | array | List of drafts |
| ↳ `draftId` | string | Unique identifier for the draft |
| ↳ `inboxId` | string | Inbox the draft belongs to |
| ↳ `subject` | string | Draft subject |
| ↳ `to` | array | Recipient email addresses |
| ↳ `cc` | array | CC email addresses |
| ↳ `bcc` | array | BCC email addresses |
| ↳ `preview` | string | Draft preview text |
| ↳ `sendStatus` | string | Send status \(scheduled, sending, failed\) |
| ↳ `sendAt` | string | Scheduled send time |
| ↳ `createdAt` | string | Creation timestamp |
| ↳ `updatedAt` | string | Last updated timestamp |
| `count` | number | Total number of drafts |
| `nextPageToken` | string | Token for retrieving the next page |
### `agentmail_list_inboxes`
List all email inboxes in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `limit` | number | No | Maximum number of inboxes to return |
| `pageToken` | string | No | Pagination token for next page of results |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `inboxes` | array | List of inboxes |
| ↳ `inboxId` | string | Unique identifier for the inbox |
| ↳ `email` | string | Email address of the inbox |
| ↳ `displayName` | string | Display name of the inbox |
| ↳ `createdAt` | string | Creation timestamp |
| ↳ `updatedAt` | string | Last updated timestamp |
| `count` | number | Total number of inboxes |
| `nextPageToken` | string | Token for retrieving the next page |
### `agentmail_list_messages`
List messages in an inbox in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox to list messages from |
| `limit` | number | No | Maximum number of messages to return |
| `pageToken` | string | No | Pagination token for next page of results |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `messages` | array | List of messages in the inbox |
| ↳ `messageId` | string | Unique identifier for the message |
| ↳ `from` | string | Sender email address |
| ↳ `to` | array | Recipient email addresses |
| ↳ `subject` | string | Message subject |
| ↳ `preview` | string | Message preview text |
| ↳ `createdAt` | string | Creation timestamp |
| `count` | number | Total number of messages |
| `nextPageToken` | string | Token for retrieving the next page |
### `agentmail_list_threads`
List email threads in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox to list threads from |
| `limit` | number | No | Maximum number of threads to return |
| `pageToken` | string | No | Pagination token for next page of results |
| `labels` | string | No | Comma-separated labels to filter threads by |
| `before` | string | No | Filter threads before this ISO 8601 timestamp |
| `after` | string | No | Filter threads after this ISO 8601 timestamp |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `threads` | array | List of email threads |
| ↳ `threadId` | string | Unique identifier for the thread |
| ↳ `subject` | string | Thread subject |
| ↳ `senders` | array | List of sender email addresses |
| ↳ `recipients` | array | List of recipient email addresses |
| ↳ `messageCount` | number | Number of messages in the thread |
| ↳ `lastMessageAt` | string | Timestamp of last message |
| ↳ `createdAt` | string | Creation timestamp |
| ↳ `updatedAt` | string | Last updated timestamp |
| `count` | number | Total number of threads |
| `nextPageToken` | string | Token for retrieving the next page |
### `agentmail_reply_message`
Reply to an existing email message in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox to reply from |
| `messageId` | string | Yes | ID of the message to reply to |
| `text` | string | No | Plain text reply body |
| `html` | string | No | HTML reply body |
| `to` | string | No | Override recipient email addresses \(comma-separated\) |
| `cc` | string | No | CC email addresses \(comma-separated\) |
| `bcc` | string | No | BCC email addresses \(comma-separated\) |
| `replyAll` | boolean | No | Reply to all recipients of the original message |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `messageId` | string | ID of the sent reply message |
| `threadId` | string | ID of the thread |
### `agentmail_send_draft`
Send an existing email draft in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox containing the draft |
| `draftId` | string | Yes | ID of the draft to send |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `messageId` | string | ID of the sent message |
| `threadId` | string | ID of the thread |
### `agentmail_send_message`
Send an email message from an AgentMail inbox
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox to send from |
| `to` | string | Yes | Recipient email address \(comma-separated for multiple\) |
| `subject` | string | Yes | Email subject line |
| `text` | string | No | Plain text email body |
| `html` | string | No | HTML email body |
| `cc` | string | No | CC recipient email addresses \(comma-separated\) |
| `bcc` | string | No | BCC recipient email addresses \(comma-separated\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `threadId` | string | ID of the created thread |
| `messageId` | string | ID of the sent message |
| `subject` | string | Email subject line |
| `to` | string | Recipient email address |
### `agentmail_update_draft`
Update an existing email draft in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox containing the draft |
| `draftId` | string | Yes | ID of the draft to update |
| `to` | string | No | Recipient email addresses \(comma-separated\) |
| `subject` | string | No | Draft subject line |
| `text` | string | No | Plain text draft body |
| `html` | string | No | HTML draft body |
| `cc` | string | No | CC recipient email addresses \(comma-separated\) |
| `bcc` | string | No | BCC recipient email addresses \(comma-separated\) |
| `sendAt` | string | No | ISO 8601 timestamp to schedule sending |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `draftId` | string | Unique identifier for the draft |
| `inboxId` | string | Inbox the draft belongs to |
| `subject` | string | Draft subject |
| `to` | array | Recipient email addresses |
| `cc` | array | CC email addresses |
| `bcc` | array | BCC email addresses |
| `text` | string | Plain text content |
| `html` | string | HTML content |
| `preview` | string | Draft preview text |
| `labels` | array | Labels assigned to the draft |
| `inReplyTo` | string | Message ID this draft replies to |
| `sendStatus` | string | Send status \(scheduled, sending, failed\) |
| `sendAt` | string | Scheduled send time |
| `createdAt` | string | Creation timestamp |
| `updatedAt` | string | Last updated timestamp |
### `agentmail_update_inbox`
Update the display name of an email inbox in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox to update |
| `displayName` | string | Yes | New display name for the inbox |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `inboxId` | string | Unique identifier for the inbox |
| `email` | string | Email address of the inbox |
| `displayName` | string | Display name of the inbox |
| `createdAt` | string | Creation timestamp |
| `updatedAt` | string | Last updated timestamp |
### `agentmail_update_message`
Add or remove labels on an email message in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox containing the message |
| `messageId` | string | Yes | ID of the message to update |
| `addLabels` | string | No | Comma-separated labels to add to the message |
| `removeLabels` | string | No | Comma-separated labels to remove from the message |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `messageId` | string | Unique identifier for the message |
| `labels` | array | Current labels on the message |
### `agentmail_update_thread`
Add or remove labels on an email thread in AgentMail
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | AgentMail API key |
| `inboxId` | string | Yes | ID of the inbox containing the thread |
| `threadId` | string | Yes | ID of the thread to update |
| `addLabels` | string | No | Comma-separated labels to add to the thread |
| `removeLabels` | string | No | Comma-separated labels to remove from the thread |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `threadId` | string | Unique identifier for the thread |
| `labels` | array | Current labels on the thread |

View File

@@ -2,6 +2,7 @@
"pages": [
"index",
"a2a",
"agentmail",
"ahrefs",
"airtable",
"airweave",
@@ -134,6 +135,7 @@
"resend",
"revenuecat",
"rippling",
"rootly",
"s3",
"salesforce",
"search",

View File

@@ -2201,7 +2201,7 @@ Create a new object category
### `rippling_update_object_category`
Update a object category
Update an object category
#### Input
@@ -2224,7 +2224,7 @@ Update a object category
### `rippling_delete_object_category`
Delete a object category
Delete an object category
#### Input

View File

@@ -0,0 +1,891 @@
---
title: Rootly
description: Manage incidents, alerts, and on-call with Rootly
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="rootly"
color="#6C72C8"
/>
{/* MANUAL-CONTENT-START:intro */}
[Rootly](https://rootly.com/) is an incident management platform that helps teams respond to, mitigate, and learn from incidents — all without leaving Slack or your existing tools. Rootly automates on-call alerting, incident workflows, status page updates, and retrospectives so engineering teams can resolve issues faster and reduce toil.
**Why Rootly?**
- **End-to-End Incident Management:** Create, track, update, and resolve incidents with full lifecycle support — from initial triage through retrospective.
- **On-Call Alerting:** Create and manage alerts with deduplication, routing, and escalation to ensure the right people are notified immediately.
- **Timeline Events:** Add structured timeline events to incidents for clear, auditable incident narratives.
- **Service Catalog:** Maintain a catalog of services and map them to incidents for precise impact tracking.
- **Severity & Prioritization:** Use configurable severity levels to prioritize incidents and drive appropriate response urgency.
- **Retrospectives:** Access post-incident retrospectives to identify root causes, capture learnings, and drive reliability improvements.
**Using Rootly in Sim**
Sim's Rootly integration connects your agentic workflows directly to your Rootly account using an API key. With operations spanning incidents, alerts, services, severities, teams, environments, functionalities, incident types, and retrospectives, you can build powerful incident management automations without writing backend code.
**Key benefits of using Rootly in Sim:**
- **Automated incident creation:** Trigger incident creation from monitoring alerts, customer reports, or anomaly detection workflows with full metadata including severity, services, and teams.
- **Incident lifecycle automation:** Automatically update incident status, add timeline events, and attach mitigation or resolution messages as your response progresses.
- **Alert management:** Create and list alerts with deduplication support to integrate Rootly into your existing monitoring and notification pipelines.
- **Organizational awareness:** Query services, severities, teams, environments, functionalities, and incident types to build context-aware incident workflows.
- **Retrospective insights:** List and filter retrospectives to feed post-incident learnings into continuous improvement workflows.
Whether you're automating incident response, building on-call alerting pipelines, or driving post-incident learning, Rootly in Sim gives you direct, secure access to the Rootly API — no middleware required. Simply configure your API key, select the operation you need, and let Sim handle the rest.
{/* MANUAL-CONTENT-END */}
## Usage Instructions
Integrate Rootly incident management into workflows. Create and manage incidents, alerts, services, severities, and retrospectives.
## Tools
### `rootly_create_incident`
Create a new incident in Rootly with optional severity, services, and teams.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `title` | string | No | The title of the incident \(auto-generated if not provided\) |
| `summary` | string | No | A summary of the incident |
| `severityId` | string | No | Severity ID to attach to the incident |
| `status` | string | No | Incident status \(in_triage, started, detected, acknowledged, mitigated, resolved, closed, cancelled, scheduled, in_progress, completed\) |
| `kind` | string | No | Incident kind \(normal, normal_sub, test, test_sub, example, example_sub, backfilled, scheduled, scheduled_sub\) |
| `serviceIds` | string | No | Comma-separated service IDs to attach |
| `environmentIds` | string | No | Comma-separated environment IDs to attach |
| `groupIds` | string | No | Comma-separated team/group IDs to attach |
| `incidentTypeIds` | string | No | Comma-separated incident type IDs to attach |
| `functionalityIds` | string | No | Comma-separated functionality IDs to attach |
| `labels` | string | No | Labels as JSON object, e.g. \{"platform":"osx","version":"1.29"\} |
| `private` | boolean | No | Create as a private incident \(cannot be undone\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `incident` | object | The created incident |
| ↳ `id` | string | Unique incident ID |
| ↳ `sequentialId` | number | Sequential incident number |
| ↳ `title` | string | Incident title |
| ↳ `slug` | string | Incident slug |
| ↳ `kind` | string | Incident kind |
| ↳ `summary` | string | Incident summary |
| ↳ `status` | string | Incident status |
| ↳ `private` | boolean | Whether the incident is private |
| ↳ `url` | string | URL to the incident |
| ↳ `shortUrl` | string | Short URL to the incident |
| ↳ `severityName` | string | Severity name |
| ↳ `severityId` | string | Severity ID |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| ↳ `startedAt` | string | Start date |
| ↳ `mitigatedAt` | string | Mitigation date |
| ↳ `resolvedAt` | string | Resolution date |
| ↳ `closedAt` | string | Closed date |
### `rootly_get_incident`
Retrieve a single incident by ID from Rootly.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `incidentId` | string | Yes | The ID of the incident to retrieve |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `incident` | object | The incident details |
| ↳ `id` | string | Unique incident ID |
| ↳ `sequentialId` | number | Sequential incident number |
| ↳ `title` | string | Incident title |
| ↳ `slug` | string | Incident slug |
| ↳ `kind` | string | Incident kind |
| ↳ `summary` | string | Incident summary |
| ↳ `status` | string | Incident status |
| ↳ `private` | boolean | Whether the incident is private |
| ↳ `url` | string | URL to the incident |
| ↳ `shortUrl` | string | Short URL to the incident |
| ↳ `severityName` | string | Severity name |
| ↳ `severityId` | string | Severity ID |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| ↳ `startedAt` | string | Start date |
| ↳ `mitigatedAt` | string | Mitigation date |
| ↳ `resolvedAt` | string | Resolution date |
| ↳ `closedAt` | string | Closed date |
### `rootly_update_incident`
Update an existing incident in Rootly (status, severity, summary, etc.).
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `incidentId` | string | Yes | The ID of the incident to update |
| `title` | string | No | Updated incident title |
| `summary` | string | No | Updated incident summary |
| `severityId` | string | No | Updated severity ID |
| `status` | string | No | Updated status \(in_triage, started, detected, acknowledged, mitigated, resolved, closed, cancelled, scheduled, in_progress, completed\) |
| `kind` | string | No | Incident kind \(normal, normal_sub, test, test_sub, example, example_sub, backfilled, scheduled, scheduled_sub\) |
| `private` | boolean | No | Set incident as private \(cannot be undone\) |
| `serviceIds` | string | No | Comma-separated service IDs |
| `environmentIds` | string | No | Comma-separated environment IDs |
| `groupIds` | string | No | Comma-separated team/group IDs |
| `incidentTypeIds` | string | No | Comma-separated incident type IDs to attach |
| `functionalityIds` | string | No | Comma-separated functionality IDs to attach |
| `labels` | string | No | Labels as JSON object, e.g. \{"platform":"osx","version":"1.29"\} |
| `mitigationMessage` | string | No | How was the incident mitigated? |
| `resolutionMessage` | string | No | How was the incident resolved? |
| `cancellationMessage` | string | No | Why was the incident cancelled? |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `incident` | object | The updated incident |
| ↳ `id` | string | Unique incident ID |
| ↳ `sequentialId` | number | Sequential incident number |
| ↳ `title` | string | Incident title |
| ↳ `slug` | string | Incident slug |
| ↳ `kind` | string | Incident kind |
| ↳ `summary` | string | Incident summary |
| ↳ `status` | string | Incident status |
| ↳ `private` | boolean | Whether the incident is private |
| ↳ `url` | string | URL to the incident |
| ↳ `shortUrl` | string | Short URL to the incident |
| ↳ `severityName` | string | Severity name |
| ↳ `severityId` | string | Severity ID |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| ↳ `startedAt` | string | Start date |
| ↳ `mitigatedAt` | string | Mitigation date |
| ↳ `resolvedAt` | string | Resolution date |
| ↳ `closedAt` | string | Closed date |
### `rootly_list_incidents`
List incidents from Rootly with optional filtering by status, severity, and more.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `status` | string | No | Filter by status \(in_triage, started, detected, acknowledged, mitigated, resolved, closed, cancelled, scheduled, in_progress, completed\) |
| `severity` | string | No | Filter by severity slug |
| `search` | string | No | Search term to filter incidents |
| `services` | string | No | Filter by service slugs \(comma-separated\) |
| `teams` | string | No | Filter by team slugs \(comma-separated\) |
| `environments` | string | No | Filter by environment slugs \(comma-separated\) |
| `sort` | string | No | Sort order \(e.g., -created_at, created_at, -started_at\) |
| `pageSize` | number | No | Number of items per page \(default: 20\) |
| `pageNumber` | number | No | Page number for pagination |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `incidents` | array | List of incidents |
| ↳ `id` | string | Unique incident ID |
| ↳ `sequentialId` | number | Sequential incident number |
| ↳ `title` | string | Incident title |
| ↳ `slug` | string | Incident slug |
| ↳ `kind` | string | Incident kind |
| ↳ `summary` | string | Incident summary |
| ↳ `status` | string | Incident status |
| ↳ `private` | boolean | Whether the incident is private |
| ↳ `url` | string | URL to the incident |
| ↳ `shortUrl` | string | Short URL to the incident |
| ↳ `severityName` | string | Severity name |
| ↳ `severityId` | string | Severity ID |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| ↳ `startedAt` | string | Start date |
| ↳ `mitigatedAt` | string | Mitigation date |
| ↳ `resolvedAt` | string | Resolution date |
| ↳ `closedAt` | string | Closed date |
| `totalCount` | number | Total number of incidents returned |
### `rootly_create_alert`
Create a new alert in Rootly for on-call notification and routing.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `summary` | string | Yes | The summary of the alert |
| `description` | string | No | A detailed description of the alert |
| `source` | string | Yes | The source of the alert \(e.g., api, manual, datadog, pagerduty\) |
| `status` | string | No | Alert status on creation \(open, triggered\) |
| `serviceIds` | string | No | Comma-separated service IDs to attach |
| `groupIds` | string | No | Comma-separated team/group IDs to attach |
| `environmentIds` | string | No | Comma-separated environment IDs to attach |
| `externalId` | string | No | External ID for the alert |
| `externalUrl` | string | No | External URL for the alert |
| `deduplicationKey` | string | No | Alerts sharing the same deduplication key are treated as a single alert |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `alert` | object | The created alert |
| ↳ `id` | string | Unique alert ID |
| ↳ `shortId` | string | Short alert ID |
| ↳ `summary` | string | Alert summary |
| ↳ `description` | string | Alert description |
| ↳ `source` | string | Alert source |
| ↳ `status` | string | Alert status |
| ↳ `externalId` | string | External ID |
| ↳ `externalUrl` | string | External URL |
| ↳ `deduplicationKey` | string | Deduplication key |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| ↳ `startedAt` | string | Start date |
| ↳ `endedAt` | string | End date |
### `rootly_list_alerts`
List alerts from Rootly with optional filtering by status, source, and services.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `status` | string | No | Filter by status \(open, triggered, acknowledged, resolved\) |
| `source` | string | No | Filter by source \(e.g., api, datadog, pagerduty\) |
| `services` | string | No | Filter by service slugs \(comma-separated\) |
| `environments` | string | No | Filter by environment slugs \(comma-separated\) |
| `groups` | string | No | Filter by team/group slugs \(comma-separated\) |
| `pageSize` | number | No | Number of items per page \(default: 20\) |
| `pageNumber` | number | No | Page number for pagination |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `alerts` | array | List of alerts |
| ↳ `id` | string | Unique alert ID |
| ↳ `shortId` | string | Short alert ID |
| ↳ `summary` | string | Alert summary |
| ↳ `description` | string | Alert description |
| ↳ `source` | string | Alert source |
| ↳ `status` | string | Alert status |
| ↳ `externalId` | string | External ID |
| ↳ `externalUrl` | string | External URL |
| ↳ `deduplicationKey` | string | Deduplication key |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| ↳ `startedAt` | string | Start date |
| ↳ `endedAt` | string | End date |
| `totalCount` | number | Total number of alerts returned |
### `rootly_add_incident_event`
Add a timeline event to an existing incident in Rootly.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `incidentId` | string | Yes | The ID of the incident to add the event to |
| `event` | string | Yes | The summary/description of the event |
| `visibility` | string | No | Event visibility \(internal or external\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `eventId` | string | The ID of the created event |
| `event` | string | The event summary |
| `visibility` | string | Event visibility \(internal or external\) |
| `occurredAt` | string | When the event occurred |
| `createdAt` | string | Creation date |
| `updatedAt` | string | Last update date |
### `rootly_list_services`
List services from Rootly with optional search filtering.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `search` | string | No | Search term to filter services |
| `pageSize` | number | No | Number of items per page \(default: 20\) |
| `pageNumber` | number | No | Page number for pagination |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `services` | array | List of services |
| ↳ `id` | string | Unique service ID |
| ↳ `name` | string | Service name |
| ↳ `slug` | string | Service slug |
| ↳ `description` | string | Service description |
| ↳ `color` | string | Service color |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| `totalCount` | number | Total number of services returned |
### `rootly_list_severities`
List severity levels configured in Rootly.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `search` | string | No | Search term to filter severities |
| `pageSize` | number | No | Number of items per page \(default: 20\) |
| `pageNumber` | number | No | Page number for pagination |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `severities` | array | List of severity levels |
| ↳ `id` | string | Unique severity ID |
| ↳ `name` | string | Severity name |
| ↳ `slug` | string | Severity slug |
| ↳ `description` | string | Severity description |
| ↳ `severity` | string | Severity level \(critical, high, medium, low\) |
| ↳ `color` | string | Severity color |
| ↳ `position` | number | Display position |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| `totalCount` | number | Total number of severities returned |
### `rootly_list_teams`
List teams (groups) configured in Rootly.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `search` | string | No | Search term to filter teams |
| `pageSize` | number | No | Number of items per page \(default: 20\) |
| `pageNumber` | number | No | Page number for pagination |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `teams` | array | List of teams |
| ↳ `id` | string | Unique team ID |
| ↳ `name` | string | Team name |
| ↳ `slug` | string | Team slug |
| ↳ `description` | string | Team description |
| ↳ `color` | string | Team color |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| `totalCount` | number | Total number of teams returned |
### `rootly_list_environments`
List environments configured in Rootly.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `search` | string | No | Search term to filter environments |
| `pageSize` | number | No | Number of items per page \(default: 20\) |
| `pageNumber` | number | No | Page number for pagination |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `environments` | array | List of environments |
| ↳ `id` | string | Unique environment ID |
| ↳ `name` | string | Environment name |
| ↳ `slug` | string | Environment slug |
| ↳ `description` | string | Environment description |
| ↳ `color` | string | Environment color |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| `totalCount` | number | Total number of environments returned |
### `rootly_list_incident_types`
List incident types configured in Rootly.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `search` | string | No | Filter incident types by name |
| `pageSize` | number | No | Number of items per page \(default: 20\) |
| `pageNumber` | number | No | Page number for pagination |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `incidentTypes` | array | List of incident types |
| ↳ `id` | string | Unique incident type ID |
| ↳ `name` | string | Incident type name |
| ↳ `slug` | string | Incident type slug |
| ↳ `description` | string | Incident type description |
| ↳ `color` | string | Incident type color |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| `totalCount` | number | Total number of incident types returned |
### `rootly_list_functionalities`
List functionalities configured in Rootly.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `search` | string | No | Search term to filter functionalities |
| `pageSize` | number | No | Number of items per page \(default: 20\) |
| `pageNumber` | number | No | Page number for pagination |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `functionalities` | array | List of functionalities |
| ↳ `id` | string | Unique functionality ID |
| ↳ `name` | string | Functionality name |
| ↳ `slug` | string | Functionality slug |
| ↳ `description` | string | Functionality description |
| ↳ `color` | string | Functionality color |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| `totalCount` | number | Total number of functionalities returned |
### `rootly_list_retrospectives`
List incident retrospectives (post-mortems) from Rootly.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `status` | string | No | Filter by status \(draft, published\) |
| `search` | string | No | Search term to filter retrospectives |
| `pageSize` | number | No | Number of items per page \(default: 20\) |
| `pageNumber` | number | No | Page number for pagination |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `retrospectives` | array | List of retrospectives |
| ↳ `id` | string | Unique retrospective ID |
| ↳ `title` | string | Retrospective title |
| ↳ `status` | string | Status \(draft or published\) |
| ↳ `url` | string | URL to the retrospective |
| ↳ `startedAt` | string | Incident start date |
| ↳ `mitigatedAt` | string | Mitigation date |
| ↳ `resolvedAt` | string | Resolution date |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| `totalCount` | number | Total number of retrospectives returned |
### `rootly_delete_incident`
Delete an incident by ID from Rootly.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `incidentId` | string | Yes | The ID of the incident to delete |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `success` | boolean | Whether the deletion succeeded |
| `message` | string | Result message |
### `rootly_get_alert`
Retrieve a single alert by ID from Rootly.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `alertId` | string | Yes | The ID of the alert to retrieve |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `alert` | object | The alert details |
| ↳ `id` | string | Unique alert ID |
| ↳ `shortId` | string | Short alert ID |
| ↳ `summary` | string | Alert summary |
| ↳ `description` | string | Alert description |
| ↳ `source` | string | Alert source |
| ↳ `status` | string | Alert status |
| ↳ `externalId` | string | External ID |
| ↳ `externalUrl` | string | External URL |
| ↳ `deduplicationKey` | string | Deduplication key |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| ↳ `startedAt` | string | Start date |
| ↳ `endedAt` | string | End date |
### `rootly_update_alert`
Update an existing alert in Rootly.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `alertId` | string | Yes | The ID of the alert to update |
| `summary` | string | No | Updated alert summary |
| `description` | string | No | Updated alert description |
| `source` | string | No | Updated alert source |
| `serviceIds` | string | No | Comma-separated service IDs to attach |
| `groupIds` | string | No | Comma-separated team/group IDs to attach |
| `environmentIds` | string | No | Comma-separated environment IDs to attach |
| `externalId` | string | No | Updated external ID |
| `externalUrl` | string | No | Updated external URL |
| `deduplicationKey` | string | No | Updated deduplication key |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `alert` | object | The updated alert |
| ↳ `id` | string | Unique alert ID |
| ↳ `shortId` | string | Short alert ID |
| ↳ `summary` | string | Alert summary |
| ↳ `description` | string | Alert description |
| ↳ `source` | string | Alert source |
| ↳ `status` | string | Alert status |
| ↳ `externalId` | string | External ID |
| ↳ `externalUrl` | string | External URL |
| ↳ `deduplicationKey` | string | Deduplication key |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| ↳ `startedAt` | string | Start date |
| ↳ `endedAt` | string | End date |
### `rootly_acknowledge_alert`
Acknowledge an alert in Rootly.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `alertId` | string | Yes | The ID of the alert to acknowledge |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `alert` | object | The acknowledged alert |
| ↳ `id` | string | Unique alert ID |
| ↳ `shortId` | string | Short alert ID |
| ↳ `summary` | string | Alert summary |
| ↳ `description` | string | Alert description |
| ↳ `source` | string | Alert source |
| ↳ `status` | string | Alert status |
| ↳ `externalId` | string | External ID |
| ↳ `externalUrl` | string | External URL |
| ↳ `deduplicationKey` | string | Deduplication key |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| ↳ `startedAt` | string | Start date |
| ↳ `endedAt` | string | End date |
### `rootly_resolve_alert`
Resolve an alert in Rootly.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `alertId` | string | Yes | The ID of the alert to resolve |
| `resolutionMessage` | string | No | Message describing how the alert was resolved |
| `resolveRelatedIncidents` | boolean | No | Whether to also resolve related incidents |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `alert` | object | The resolved alert |
| ↳ `id` | string | Unique alert ID |
| ↳ `shortId` | string | Short alert ID |
| ↳ `summary` | string | Alert summary |
| ↳ `description` | string | Alert description |
| ↳ `source` | string | Alert source |
| ↳ `status` | string | Alert status |
| ↳ `externalId` | string | External ID |
| ↳ `externalUrl` | string | External URL |
| ↳ `deduplicationKey` | string | Deduplication key |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| ↳ `startedAt` | string | Start date |
| ↳ `endedAt` | string | End date |
### `rootly_create_action_item`
Create a new action item for an incident in Rootly.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `incidentId` | string | Yes | The ID of the incident to add the action item to |
| `summary` | string | Yes | The title of the action item |
| `description` | string | No | A detailed description of the action item |
| `kind` | string | No | The kind of action item \(task, follow_up\) |
| `priority` | string | No | Priority level \(high, medium, low\) |
| `status` | string | No | Action item status \(open, in_progress, cancelled, done\) |
| `assignedToUserId` | string | No | The user ID to assign the action item to |
| `dueDate` | string | No | Due date for the action item |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `actionItem` | object | The created action item |
| ↳ `id` | string | Unique action item ID |
| ↳ `summary` | string | Action item title |
| ↳ `description` | string | Action item description |
| ↳ `kind` | string | Action item kind \(task, follow_up\) |
| ↳ `priority` | string | Priority level |
| ↳ `status` | string | Action item status |
| ↳ `dueDate` | string | Due date |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
### `rootly_list_action_items`
List action items for an incident in Rootly.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `incidentId` | string | Yes | The ID of the incident to list action items for |
| `pageSize` | number | No | Number of items per page \(default: 20\) |
| `pageNumber` | number | No | Page number for pagination |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `actionItems` | array | List of action items |
| ↳ `id` | string | Unique action item ID |
| ↳ `summary` | string | Action item title |
| ↳ `description` | string | Action item description |
| ↳ `kind` | string | Action item kind \(task, follow_up\) |
| ↳ `priority` | string | Priority level |
| ↳ `status` | string | Action item status |
| ↳ `dueDate` | string | Due date |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| `totalCount` | number | Total number of action items returned |
### `rootly_list_users`
List users from Rootly with optional search and email filtering.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `search` | string | No | Search term to filter users |
| `email` | string | No | Filter users by email address |
| `pageSize` | number | No | Number of items per page \(default: 20\) |
| `pageNumber` | number | No | Page number for pagination |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `users` | array | List of users |
| ↳ `id` | string | Unique user ID |
| ↳ `email` | string | User email address |
| ↳ `firstName` | string | User first name |
| ↳ `lastName` | string | User last name |
| ↳ `fullName` | string | User full name |
| ↳ `timeZone` | string | User time zone |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| `totalCount` | number | Total number of users returned |
### `rootly_list_on_calls`
List current on-call entries from Rootly with optional filtering.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `scheduleIds` | string | No | Comma-separated schedule IDs to filter by |
| `escalationPolicyIds` | string | No | Comma-separated escalation policy IDs to filter by |
| `userIds` | string | No | Comma-separated user IDs to filter by |
| `serviceIds` | string | No | Comma-separated service IDs to filter by |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `onCalls` | array | List of on-call entries |
| ↳ `id` | string | Unique on-call entry ID |
| ↳ `userId` | string | ID of the on-call user |
| ↳ `userName` | string | Name of the on-call user |
| ↳ `scheduleId` | string | ID of the associated schedule |
| ↳ `scheduleName` | string | Name of the associated schedule |
| ↳ `escalationPolicyId` | string | ID of the associated escalation policy |
| ↳ `startTime` | string | On-call start time |
| ↳ `endTime` | string | On-call end time |
| `totalCount` | number | Total number of on-call entries returned |
### `rootly_list_schedules`
List on-call schedules from Rootly with optional search filtering.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `search` | string | No | Search term to filter schedules |
| `pageSize` | number | No | Number of items per page \(default: 20\) |
| `pageNumber` | number | No | Page number for pagination |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `schedules` | array | List of schedules |
| ↳ `id` | string | Unique schedule ID |
| ↳ `name` | string | Schedule name |
| ↳ `description` | string | Schedule description |
| ↳ `allTimeCoverage` | boolean | Whether schedule provides 24/7 coverage |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| `totalCount` | number | Total number of schedules returned |
### `rootly_list_escalation_policies`
List escalation policies from Rootly with optional search filtering.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `search` | string | No | Search term to filter escalation policies |
| `pageSize` | number | No | Number of items per page \(default: 20\) |
| `pageNumber` | number | No | Page number for pagination |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `escalationPolicies` | array | List of escalation policies |
| ↳ `id` | string | Unique escalation policy ID |
| ↳ `name` | string | Escalation policy name |
| ↳ `description` | string | Escalation policy description |
| ↳ `repeatCount` | number | Number of times to repeat escalation |
| ↳ `groupIds` | array | Associated group IDs |
| ↳ `serviceIds` | array | Associated service IDs |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| `totalCount` | number | Total number of escalation policies returned |
### `rootly_list_causes`
List causes from Rootly with optional search filtering.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `search` | string | No | Search term to filter causes |
| `pageSize` | number | No | Number of items per page \(default: 20\) |
| `pageNumber` | number | No | Page number for pagination |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `causes` | array | List of causes |
| ↳ `id` | string | Unique cause ID |
| ↳ `name` | string | Cause name |
| ↳ `slug` | string | Cause slug |
| ↳ `description` | string | Cause description |
| ↳ `position` | number | Cause position |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| `totalCount` | number | Total number of causes returned |
### `rootly_list_playbooks`
List playbooks from Rootly with pagination support.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Rootly API key |
| `pageSize` | number | No | Number of items per page \(default: 20\) |
| `pageNumber` | number | No | Page number for pagination |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `playbooks` | array | List of playbooks |
| ↳ `id` | string | Unique playbook ID |
| ↳ `title` | string | Playbook title |
| ↳ `summary` | string | Playbook summary |
| ↳ `externalUrl` | string | External URL |
| ↳ `createdAt` | string | Creation date |
| ↳ `updatedAt` | string | Last update date |
| `totalCount` | number | Total number of playbooks returned |

View File

@@ -166,14 +166,14 @@ export function AuditLogPreview() {
const counterRef = useRef(ENTRY_TEMPLATES.length)
const templateIndexRef = useRef(6 % ENTRY_TEMPLATES.length)
const now = Date.now()
const [entries, setEntries] = useState<LogEntry[]>(() =>
ENTRY_TEMPLATES.slice(0, 6).map((t, i) => ({
const [entries, setEntries] = useState<LogEntry[]>(() => {
const now = Date.now()
return ENTRY_TEMPLATES.slice(0, 6).map((t, i) => ({
...t,
id: i,
insertedAt: now - INITIAL_OFFSETS_MS[i],
}))
)
})
const [, tick] = useState(0)
useEffect(() => {
@@ -208,10 +208,9 @@ export function AuditLogPreview() {
exit={{ opacity: 0 }}
transition={{
layout: {
type: 'spring',
stiffness: 350,
damping: 50,
mass: 0.8,
type: 'tween',
duration: 0.32,
ease: [0.25, 0.46, 0.45, 0.94],
},
y: { duration: 0.32, ease: [0.25, 0.46, 0.45, 0.94] },
opacity: { duration: 0.25 },

View File

@@ -5,6 +5,7 @@
import type { ComponentType, SVGProps } from 'react'
import {
A2AIcon,
AgentMailIcon,
AhrefsIcon,
AirtableIcon,
AirweaveIcon,
@@ -139,6 +140,7 @@ import {
ResendIcon,
RevenueCatIcon,
RipplingIcon,
RootlyIcon,
S3Icon,
SalesforceIcon,
SearchIcon,
@@ -188,6 +190,7 @@ type IconComponent = ComponentType<SVGProps<SVGSVGElement>>
export const blockTypeToIconMap: Record<string, IconComponent> = {
a2a: A2AIcon,
agentmail: AgentMailIcon,
ahrefs: AhrefsIcon,
airtable: AirtableIcon,
airweave: AirweaveIcon,
@@ -320,6 +323,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
resend: ResendIcon,
revenuecat: RevenueCatIcon,
rippling: RipplingIcon,
rootly: RootlyIcon,
s3: S3Icon,
salesforce: SalesforceIcon,
search: SearchIcon,

View File

@@ -105,6 +105,109 @@
"integrationType": "developer-tools",
"tags": ["agentic", "automation"]
},
{
"type": "agentmail",
"slug": "agentmail",
"name": "AgentMail",
"description": "Manage email inboxes, threads, and messages with AgentMail",
"longDescription": "Integrate AgentMail into your workflow. Create and manage email inboxes, send and receive messages, reply to threads, manage drafts, and organize threads with labels. Requires API Key.",
"bgColor": "#000000",
"iconName": "AgentMailIcon",
"docsUrl": "https://docs.sim.ai/tools/agentmail",
"operations": [
{
"name": "Send Message",
"description": "Send an email message from an AgentMail inbox"
},
{
"name": "Reply to Message",
"description": "Reply to an existing email message in AgentMail"
},
{
"name": "Forward Message",
"description": "Forward an email message to new recipients in AgentMail"
},
{
"name": "List Threads",
"description": "List email threads in AgentMail"
},
{
"name": "Get Thread",
"description": "Get details of a specific email thread including messages in AgentMail"
},
{
"name": "Update Thread Labels",
"description": "Add or remove labels on an email thread in AgentMail"
},
{
"name": "Delete Thread",
"description": "Delete an email thread in AgentMail (moves to trash, or permanently deletes if already in trash)"
},
{
"name": "List Messages",
"description": "List messages in an inbox in AgentMail"
},
{
"name": "Get Message",
"description": "Get details of a specific email message in AgentMail"
},
{
"name": "Update Message Labels",
"description": "Add or remove labels on an email message in AgentMail"
},
{
"name": "Create Draft",
"description": "Create a new email draft in AgentMail"
},
{
"name": "List Drafts",
"description": "List email drafts in an inbox in AgentMail"
},
{
"name": "Get Draft",
"description": "Get details of a specific email draft in AgentMail"
},
{
"name": "Update Draft",
"description": "Update an existing email draft in AgentMail"
},
{
"name": "Delete Draft",
"description": "Delete an email draft in AgentMail"
},
{
"name": "Send Draft",
"description": "Send an existing email draft in AgentMail"
},
{
"name": "Create Inbox",
"description": "Create a new email inbox with AgentMail"
},
{
"name": "List Inboxes",
"description": "List all email inboxes in AgentMail"
},
{
"name": "Get Inbox",
"description": "Get details of a specific email inbox in AgentMail"
},
{
"name": "Update Inbox",
"description": "Update the display name of an email inbox in AgentMail"
},
{
"name": "Delete Inbox",
"description": "Delete an email inbox in AgentMail"
}
],
"operationCount": 21,
"triggers": [],
"triggerCount": 0,
"authType": "api-key",
"category": "tools",
"integrationType": "email",
"tags": ["messaging"]
},
{
"type": "ahrefs",
"slug": "ahrefs",
@@ -9625,11 +9728,11 @@
},
{
"name": "Update Object Category",
"description": "Update a object category"
"description": "Update an object category"
},
{
"name": "Delete Object Category",
"description": "Delete a object category"
"description": "Delete an object category"
},
{
"name": "Get Report Run",
@@ -9652,6 +9755,133 @@
"integrationType": "hr",
"tags": ["hiring"]
},
{
"type": "rootly",
"slug": "rootly",
"name": "Rootly",
"description": "Manage incidents, alerts, and on-call with Rootly",
"longDescription": "Integrate Rootly incident management into workflows. Create and manage incidents, alerts, services, severities, and retrospectives.",
"bgColor": "#6C72C8",
"iconName": "RootlyIcon",
"docsUrl": "https://docs.sim.ai/tools/rootly",
"operations": [
{
"name": "Create Incident",
"description": "Create a new incident in Rootly with optional severity, services, and teams."
},
{
"name": "Get Incident",
"description": "Retrieve a single incident by ID from Rootly."
},
{
"name": "Update Incident",
"description": "Update an existing incident in Rootly (status, severity, summary, etc.)."
},
{
"name": "List Incidents",
"description": "List incidents from Rootly with optional filtering by status, severity, and more."
},
{
"name": "Create Alert",
"description": "Create a new alert in Rootly for on-call notification and routing."
},
{
"name": "List Alerts",
"description": "List alerts from Rootly with optional filtering by status, source, and services."
},
{
"name": "Add Incident Event",
"description": "Add a timeline event to an existing incident in Rootly."
},
{
"name": "List Services",
"description": "List services from Rootly with optional search filtering."
},
{
"name": "List Severities",
"description": "List severity levels configured in Rootly."
},
{
"name": "List Teams",
"description": "List teams (groups) configured in Rootly."
},
{
"name": "List Environments",
"description": "List environments configured in Rootly."
},
{
"name": "List Incident Types",
"description": "List incident types configured in Rootly."
},
{
"name": "List Functionalities",
"description": "List functionalities configured in Rootly."
},
{
"name": "List Retrospectives",
"description": "List incident retrospectives (post-mortems) from Rootly."
},
{
"name": "Delete Incident",
"description": "Delete an incident by ID from Rootly."
},
{
"name": "Get Alert",
"description": "Retrieve a single alert by ID from Rootly."
},
{
"name": "Update Alert",
"description": "Update an existing alert in Rootly."
},
{
"name": "Acknowledge Alert",
"description": "Acknowledge an alert in Rootly."
},
{
"name": "Resolve Alert",
"description": "Resolve an alert in Rootly."
},
{
"name": "Create Action Item",
"description": "Create a new action item for an incident in Rootly."
},
{
"name": "List Action Items",
"description": "List action items for an incident in Rootly."
},
{
"name": "List Users",
"description": "List users from Rootly with optional search and email filtering."
},
{
"name": "List On-Calls",
"description": "List current on-call entries from Rootly with optional filtering."
},
{
"name": "List Schedules",
"description": "List on-call schedules from Rootly with optional search filtering."
},
{
"name": "List Escalation Policies",
"description": "List escalation policies from Rootly with optional search filtering."
},
{
"name": "List Causes",
"description": "List causes from Rootly with optional search filtering."
},
{
"name": "List Playbooks",
"description": "List playbooks from Rootly with pagination support."
}
],
"operationCount": 27,
"triggers": [],
"triggerCount": 0,
"authType": "api-key",
"category": "tools",
"integrationType": "developer-tools",
"tags": ["incident-management", "monitoring"]
},
{
"type": "s3",
"slug": "s3",

View File

@@ -4,7 +4,7 @@ import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { recordUsage } from '@/lib/billing/core/usage-log'
import { checkAndBillOverageThreshold } from '@/lib/billing/threshold-billing'
import { checkInternalApiKey } from '@/lib/copilot/utils'
import { checkInternalApiKey } from '@/lib/copilot/request/http'
import { isBillingEnabled } from '@/lib/core/config/feature-flags'
import { generateRequestId } from '@/lib/core/utils/request'

View File

@@ -2,7 +2,7 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { checkServerSideUsageLimits } from '@/lib/billing/calculations/usage-monitor'
import { checkInternalApiKey } from '@/lib/copilot/utils'
import { checkInternalApiKey } from '@/lib/copilot/request/http'
const logger = createLogger('CopilotApiKeysValidate')

View File

@@ -1,10 +1,12 @@
import { createLogger } from '@sim/logger'
import { NextResponse } from 'next/server'
import { getLatestRunForStream } from '@/lib/copilot/async-runs/repository'
import { abortActiveStream, waitForPendingChatStream } from '@/lib/copilot/chat-streaming'
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http'
import { abortActiveStream } from '@/lib/copilot/request/session/abort'
import { env } from '@/lib/core/config/env'
const logger = createLogger('CopilotChatAbortAPI')
const GO_EXPLICIT_ABORT_TIMEOUT_MS = 3000
export async function POST(request: Request) {
@@ -15,7 +17,12 @@ export async function POST(request: Request) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const body = await request.json().catch(() => ({}))
const body = await request.json().catch((err) => {
logger.warn('Abort request body parse failed; continuing with empty object', {
error: err instanceof Error ? err.message : String(err),
})
return {}
})
const streamId = typeof body.streamId === 'string' ? body.streamId : ''
let chatId = typeof body.chatId === 'string' ? body.chatId : ''
@@ -24,7 +31,13 @@ export async function POST(request: Request) {
}
if (!chatId) {
const run = await getLatestRunForStream(streamId, authenticatedUserId).catch(() => null)
const run = await getLatestRunForStream(streamId, authenticatedUserId).catch((err) => {
logger.warn('getLatestRunForStream failed while resolving chatId for abort', {
streamId,
error: err instanceof Error ? err.message : String(err),
})
return null
})
if (run?.chatId) {
chatId = run.chatId
}
@@ -50,15 +63,13 @@ export async function POST(request: Request) {
if (!response.ok) {
throw new Error(`Explicit abort marker request failed: ${response.status}`)
}
} catch {
// best effort: local abort should still proceed even if Go marker fails
} catch (err) {
logger.warn('Explicit abort marker request failed; proceeding with local abort', {
streamId,
error: err instanceof Error ? err.message : String(err),
})
}
const aborted = await abortActiveStream(streamId)
if (chatId) {
await waitForPendingChatStream(chatId, GO_EXPLICIT_ABORT_TIMEOUT_MS + 1000, streamId).catch(
() => false
)
}
return NextResponse.json({ aborted })
}

View File

@@ -36,11 +36,11 @@ vi.mock('drizzle-orm', () => ({
eq: vi.fn((field: unknown, value: unknown) => ({ field, value, type: 'eq' })),
}))
vi.mock('@/lib/copilot/chat-lifecycle', () => ({
vi.mock('@/lib/copilot/chat/lifecycle', () => ({
getAccessibleCopilotChat: mockGetAccessibleCopilotChat,
}))
vi.mock('@/lib/copilot/task-events', () => ({
vi.mock('@/lib/copilot/tasks', () => ({
taskPubSub: { publishStatusChanged: vi.fn() },
}))

View File

@@ -5,8 +5,8 @@ import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat-lifecycle'
import { taskPubSub } from '@/lib/copilot/task-events'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import { taskPubSub } from '@/lib/copilot/tasks'
const logger = createLogger('DeleteChatAPI')

View File

@@ -0,0 +1,119 @@
import { db } from '@sim/db'
import { copilotChats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, desc, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request/http'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
import { assertActiveWorkspaceAccess } from '@/lib/workspaces/permissions/utils'
const logger = createLogger('CopilotChatAPI')
function transformChat(chat: {
id: string
title: string | null
model: string | null
messages: unknown
planArtifact?: unknown
config?: unknown
conversationId?: string | null
resources?: unknown
createdAt: Date | null
updatedAt: Date | null
}) {
return {
id: chat.id,
title: chat.title,
model: chat.model,
messages: Array.isArray(chat.messages) ? chat.messages : [],
messageCount: Array.isArray(chat.messages) ? chat.messages.length : 0,
planArtifact: chat.planArtifact || null,
config: chat.config || null,
...('conversationId' in chat ? { activeStreamId: chat.conversationId || null } : {}),
...('resources' in chat
? { resources: Array.isArray(chat.resources) ? chat.resources : [] }
: {}),
createdAt: chat.createdAt,
updatedAt: chat.updatedAt,
}
}
export async function GET(req: NextRequest) {
try {
const { searchParams } = new URL(req.url)
const workflowId = searchParams.get('workflowId')
const workspaceId = searchParams.get('workspaceId')
const chatId = searchParams.get('chatId')
const { userId: authenticatedUserId, isAuthenticated } =
await authenticateCopilotRequestSessionOnly()
if (!isAuthenticated || !authenticatedUserId) {
return createUnauthorizedResponse()
}
if (chatId) {
const chat = await getAccessibleCopilotChat(chatId, authenticatedUserId)
if (!chat) {
return NextResponse.json({ success: false, error: 'Chat not found' }, { status: 404 })
}
logger.info(`Retrieved chat ${chatId}`)
return NextResponse.json({ success: true, chat: transformChat(chat) })
}
if (!workflowId && !workspaceId) {
return createBadRequestResponse('workflowId, workspaceId, or chatId is required')
}
if (workspaceId) {
await assertActiveWorkspaceAccess(workspaceId, authenticatedUserId)
}
if (workflowId) {
const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId,
userId: authenticatedUserId,
action: 'read',
})
if (!authorization.allowed) {
return createUnauthorizedResponse()
}
}
const scopeFilter = workflowId
? eq(copilotChats.workflowId, workflowId)
: eq(copilotChats.workspaceId, workspaceId!)
const chats = await db
.select({
id: copilotChats.id,
title: copilotChats.title,
model: copilotChats.model,
messages: copilotChats.messages,
planArtifact: copilotChats.planArtifact,
config: copilotChats.config,
createdAt: copilotChats.createdAt,
updatedAt: copilotChats.updatedAt,
})
.from(copilotChats)
.where(and(eq(copilotChats.userId, authenticatedUserId), scopeFilter))
.orderBy(desc(copilotChats.updatedAt))
const scope = workflowId ? `workflow ${workflowId}` : `workspace ${workspaceId}`
logger.info(`Retrieved ${chats.length} chats for ${scope}`)
return NextResponse.json({
success: true,
chats: chats.map(transformChat),
})
} catch (error) {
logger.error('Error fetching copilot chats:', error)
return createInternalServerErrorResponse('Failed to fetch chats')
}
}

View File

@@ -0,0 +1,65 @@
import { db } from '@sim/db'
import { copilotChats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import { taskPubSub } from '@/lib/copilot/tasks'
const logger = createLogger('RenameChatAPI')
const RenameChatSchema = z.object({
chatId: z.string().min(1),
title: z.string().min(1).max(200),
})
export async function PATCH(request: NextRequest) {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ success: false, error: 'Unauthorized' }, { status: 401 })
}
const body = await request.json()
const { chatId, title } = RenameChatSchema.parse(body)
const chat = await getAccessibleCopilotChat(chatId, session.user.id)
if (!chat) {
return NextResponse.json({ success: false, error: 'Chat not found' }, { status: 404 })
}
const now = new Date()
const [updated] = await db
.update(copilotChats)
.set({ title, updatedAt: now, lastSeenAt: now })
.where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, session.user.id)))
.returning({ id: copilotChats.id, workspaceId: copilotChats.workspaceId })
if (!updated) {
return NextResponse.json({ success: false, error: 'Chat not found' }, { status: 404 })
}
logger.info('Chat renamed', { chatId, title })
if (updated.workspaceId) {
taskPubSub?.publishStatusChanged({
workspaceId: updated.workspaceId,
chatId,
type: 'renamed',
})
}
return NextResponse.json({ success: true })
} catch (error) {
if (error instanceof z.ZodError) {
return NextResponse.json(
{ success: false, error: 'Invalid request data', details: error.errors },
{ status: 400 }
)
}
logger.error('Error renaming chat:', error)
return NextResponse.json({ success: false, error: 'Failed to rename chat' }, { status: 500 })
}
}

View File

@@ -10,8 +10,8 @@ import {
createInternalServerErrorResponse,
createNotFoundResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
import type { ChatResource, ResourceType } from '@/lib/copilot/resources'
} from '@/lib/copilot/request/http'
import type { ChatResource, ResourceType } from '@/lib/copilot/resources/persistence'
const logger = createLogger('CopilotChatResourcesAPI')

View File

@@ -1,45 +1,45 @@
import { db } from '@sim/db'
import { copilotChats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, desc, eq, sql } from 'drizzle-orm'
import { eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { createRunSegment } from '@/lib/copilot/async-runs/repository'
import { getAccessibleCopilotChat, resolveOrCreateChat } from '@/lib/copilot/chat-lifecycle'
import { buildCopilotRequestPayload } from '@/lib/copilot/chat-payload'
import { type ChatLoadResult, resolveOrCreateChat } from '@/lib/copilot/chat/lifecycle'
import { buildCopilotRequestPayload } from '@/lib/copilot/chat/payload'
import {
acquirePendingChatStream,
createSSEStream,
releasePendingChatStream,
requestChatTitle,
SSE_RESPONSE_HEADERS,
} from '@/lib/copilot/chat-streaming'
import { COPILOT_REQUEST_MODES } from '@/lib/copilot/models'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import { getStreamMeta, readStreamEvents } from '@/lib/copilot/orchestrator/stream/buffer'
import type { OrchestratorResult } from '@/lib/copilot/orchestrator/types'
import { resolveActiveResourceContext } from '@/lib/copilot/process-contents'
buildPersistedAssistantMessage,
buildPersistedUserMessage,
} from '@/lib/copilot/chat/persisted-message'
import {
processContextsServer,
resolveActiveResourceContext,
} from '@/lib/copilot/chat/process-contents'
import { COPILOT_REQUEST_MODES } from '@/lib/copilot/constants'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { createSSEStream, SSE_RESPONSE_HEADERS } from '@/lib/copilot/request/lifecycle/start'
import {
authorizeWorkflowByWorkspacePermission,
resolveWorkflowIdForUser,
} from '@/lib/workflows/utils'
import {
assertActiveWorkspaceAccess,
getUserEntityPermissions,
} from '@/lib/workspaces/permissions/utils'
acquirePendingChatStream,
getPendingChatStreamId,
releasePendingChatStream,
} from '@/lib/copilot/request/session'
import type { OrchestratorResult } from '@/lib/copilot/request/types'
import { getWorkflowById, resolveWorkflowIdForUser } from '@/lib/workflows/utils'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
import type { ChatContext } from '@/stores/panel'
export const maxDuration = 3600
const logger = createLogger('CopilotChatAPI')
// ---------------------------------------------------------------------------
// Schemas
// ---------------------------------------------------------------------------
const FileAttachmentSchema = z.object({
id: z.string(),
key: z.string(),
@@ -66,7 +66,6 @@ const ChatMessageSchema = z.object({
mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'),
prefetch: z.boolean().optional(),
createNewChat: z.boolean().optional().default(false),
stream: z.boolean().optional().default(true),
implicitFeedback: z.string().optional(),
fileAttachments: z.array(FileAttachmentSchema).optional(),
resourceAttachments: z.array(ResourceAttachmentSchema).optional(),
@@ -104,27 +103,25 @@ const ChatMessageSchema = z.object({
userTimezone: z.string().optional(),
})
/**
* POST /api/copilot/chat
* Send messages to sim agent and handle chat persistence
*/
// ---------------------------------------------------------------------------
// POST /api/copilot/chat
// ---------------------------------------------------------------------------
export async function POST(req: NextRequest) {
const tracker = createRequestTracker()
let actualChatId: string | undefined
let pendingChatStreamAcquired = false
let pendingChatStreamHandedOff = false
let pendingChatStreamID: string | undefined
let chatStreamLockAcquired = false
let userMessageIdToUse = ''
try {
// Get session to access user information including name
// 1. Auth
const session = await getSession()
if (!session?.user?.id) {
return createUnauthorizedResponse()
}
const authenticatedUserId = session.user.id
// 2. Parse & validate
const body = await req.json()
const {
message,
@@ -137,7 +134,6 @@ export async function POST(req: NextRequest) {
mode,
prefetch,
createNewChat,
stream,
implicitFeedback,
fileAttachments,
resourceAttachments,
@@ -151,17 +147,12 @@ export async function POST(req: NextRequest) {
? contexts.map((ctx) => {
if (ctx.kind !== 'blocks') return ctx
if (Array.isArray(ctx.blockIds) && ctx.blockIds.length > 0) return ctx
if (ctx.blockId) {
return {
...ctx,
blockIds: [ctx.blockId],
}
}
if (ctx.blockId) return { ...ctx, blockIds: [ctx.blockId] }
return ctx
})
: contexts
// Copilot route always requires a workflow scope
// 3. Resolve workflow & workspace
const resolved = await resolveWorkflowIdForUser(
authenticatedUserId,
providedWorkflowId,
@@ -173,48 +164,29 @@ export async function POST(req: NextRequest) {
'No workflows found. Create a workflow first or provide a valid workflowId.'
)
}
const workflowId = resolved.workflowId
const workflowResolvedName = resolved.workflowName
const { workflowId, workflowName: workflowResolvedName } = resolved
// Resolve workspace from workflow so it can be sent as implicit context to the copilot.
let resolvedWorkspaceId: string | undefined
try {
const { getWorkflowById } = await import('@/lib/workflows/utils')
const wf = await getWorkflowById(workflowId)
resolvedWorkspaceId = wf?.workspaceId ?? undefined
} catch {
logger
.withMetadata({ requestId: tracker.requestId, messageId: userMessageId })
.warn('Failed to resolve workspaceId from workflow')
logger.warn(`[${tracker.requestId}] Failed to resolve workspaceId from workflow`)
}
const userMessageIdToUse = userMessageId || crypto.randomUUID()
const reqLogger = logger.withMetadata({
requestId: tracker.requestId,
messageId: userMessageIdToUse,
})
try {
reqLogger.info('Received chat POST', {
workflowId,
hasContexts: Array.isArray(normalizedContexts),
contextsCount: Array.isArray(normalizedContexts) ? normalizedContexts.length : 0,
contextsPreview: Array.isArray(normalizedContexts)
? normalizedContexts.map((c: any) => ({
kind: c?.kind,
chatId: c?.chatId,
workflowId: c?.workflowId,
executionId: (c as any)?.executionId,
label: c?.label,
}))
: undefined,
})
} catch {}
let currentChat: any = null
let conversationHistory: any[] = []
actualChatId = chatId
userMessageIdToUse = userMessageId || crypto.randomUUID()
const selectedModel = model || 'claude-opus-4-6'
logger.info(`[${tracker.requestId}] Received chat POST`, {
workflowId,
contextsCount: Array.isArray(normalizedContexts) ? normalizedContexts.length : 0,
})
// 4. Resolve or create chat
let currentChat: ChatLoadResult['chat'] = null
let conversationHistory: unknown[] = []
actualChatId = chatId
if (chatId || createNewChat) {
const chatResult = await resolveOrCreateChat({
chatId,
@@ -233,37 +205,48 @@ export async function POST(req: NextRequest) {
}
}
if (actualChatId) {
chatStreamLockAcquired = await acquirePendingChatStream(actualChatId, userMessageIdToUse)
if (!chatStreamLockAcquired) {
const activeStreamId = await getPendingChatStreamId(actualChatId)
return NextResponse.json(
{
error: 'A response is already in progress for this chat.',
...(activeStreamId ? { activeStreamId } : {}),
},
{ status: 409 }
)
}
}
// 5. Process contexts
let agentContexts: Array<{ type: string; content: string }> = []
if (Array.isArray(normalizedContexts) && normalizedContexts.length > 0) {
try {
const { processContextsServer } = await import('@/lib/copilot/process-contents')
const processed = await processContextsServer(
normalizedContexts as any,
normalizedContexts as ChatContext[],
authenticatedUserId,
message,
resolvedWorkspaceId,
actualChatId
)
agentContexts = processed
reqLogger.info('Contexts processed for request', {
logger.info(`[${tracker.requestId}] Contexts processed`, {
processedCount: agentContexts.length,
kinds: agentContexts.map((c) => c.type),
lengthPreview: agentContexts.map((c) => c.content?.length ?? 0),
})
if (
Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 &&
agentContexts.length === 0
) {
reqLogger.warn(
'Contexts provided but none processed. Check executionId for logs contexts.'
if (agentContexts.length === 0) {
logger.warn(
`[${tracker.requestId}] Contexts provided but none processed. Check executionId for logs contexts.`
)
}
} catch (e) {
reqLogger.error('Failed to process contexts', e)
logger.error(`[${tracker.requestId}] Failed to process contexts`, e)
}
}
// 5b. Process resource attachments
if (
Array.isArray(resourceAttachments) &&
resourceAttachments.length > 0 &&
@@ -279,26 +262,30 @@ export async function POST(req: NextRequest) {
actualChatId
)
if (!ctx) return null
return {
...ctx,
tag: r.active ? '@active_tab' : '@open_tab',
}
return { ...ctx, tag: r.active ? '@active_tab' : '@open_tab' }
})
)
for (const result of results) {
if (result.status === 'fulfilled' && result.value) {
agentContexts.push(result.value)
} else if (result.status === 'rejected') {
reqLogger.error('Failed to resolve resource attachment', result.reason)
logger.error(
`[${tracker.requestId}] Failed to resolve resource attachment`,
result.reason
)
}
}
}
const effectiveMode = mode === 'agent' ? 'build' : mode
// 6. Build copilot request payload
const userPermission = resolvedWorkspaceId
? await getUserEntityPermissions(authenticatedUserId, 'workspace', resolvedWorkspaceId).catch(
() => null
(err) => {
logger.warn('Failed to load user permissions', {
error: err instanceof Error ? err.message : String(err),
})
return null
}
)
: null
@@ -322,55 +309,24 @@ export async function POST(req: NextRequest) {
userPermission: userPermission ?? undefined,
userTimezone,
},
{
selectedModel,
}
{ selectedModel }
)
try {
reqLogger.info('About to call Sim Agent', {
hasContext: agentContexts.length > 0,
contextCount: agentContexts.length,
hasFileAttachments: Array.isArray(requestPayload.fileAttachments),
messageLength: message.length,
mode: effectiveMode,
hasTools: Array.isArray(requestPayload.tools),
toolCount: Array.isArray(requestPayload.tools) ? requestPayload.tools.length : 0,
hasBaseTools: Array.isArray(requestPayload.baseTools),
baseToolCount: Array.isArray(requestPayload.baseTools)
? requestPayload.baseTools.length
: 0,
hasCredentials: !!requestPayload.credentials,
})
} catch {}
if (stream && actualChatId) {
const acquired = await acquirePendingChatStream(actualChatId, userMessageIdToUse)
if (!acquired) {
return NextResponse.json(
{
error:
'A response is already in progress for this chat. Wait for it to finish or use Stop.',
},
{ status: 409 }
)
}
pendingChatStreamAcquired = true
pendingChatStreamID = userMessageIdToUse
}
logger.info(`[${tracker.requestId}] About to call Sim Agent`, {
contextCount: agentContexts.length,
hasFileAttachments: Array.isArray(requestPayload.fileAttachments),
messageLength: message.length,
mode,
})
// 7. Persist user message
if (actualChatId) {
const userMsg = {
const userMsg = buildPersistedUserMessage({
id: userMessageIdToUse,
role: 'user' as const,
content: message,
timestamp: new Date().toISOString(),
...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }),
...(Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 && {
contexts: normalizedContexts,
}),
}
fileAttachments,
contexts: normalizedContexts,
})
const [updated] = await db
.update(copilotChats)
@@ -383,268 +339,66 @@ export async function POST(req: NextRequest) {
.returning({ messages: copilotChats.messages })
if (updated) {
const freshMessages: any[] = Array.isArray(updated.messages) ? updated.messages : []
conversationHistory = freshMessages.filter((m: any) => m.id !== userMessageIdToUse)
const freshMessages: Record<string, unknown>[] = Array.isArray(updated.messages)
? updated.messages
: []
conversationHistory = freshMessages.filter(
(m: Record<string, unknown>) => m.id !== userMessageIdToUse
)
}
}
if (stream) {
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
const sseStream = createSSEStream({
requestPayload,
userId: authenticatedUserId,
streamId: userMessageIdToUse,
executionId,
runId,
chatId: actualChatId,
currentChat,
isNewChat: conversationHistory.length === 0,
message,
titleModel: selectedModel,
titleProvider: provider,
requestId: tracker.requestId,
workspaceId: resolvedWorkspaceId,
pendingChatStreamAlreadyRegistered: Boolean(actualChatId && stream),
orchestrateOptions: {
userId: authenticatedUserId,
workflowId,
chatId: actualChatId,
executionId,
runId,
goRoute: '/api/copilot',
autoExecuteTools: true,
interactive: true,
onComplete: async (result: OrchestratorResult) => {
if (!actualChatId) return
if (!result.success) return
// 8. Create SSE stream with onComplete for assistant message persistence
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
const assistantMessage: Record<string, unknown> = {
id: crypto.randomUUID(),
role: 'assistant' as const,
content: result.content,
timestamp: new Date().toISOString(),
...(result.requestId ? { requestId: result.requestId } : {}),
}
if (result.toolCalls.length > 0) {
assistantMessage.toolCalls = result.toolCalls
}
if (result.contentBlocks.length > 0) {
assistantMessage.contentBlocks = result.contentBlocks.map((block) => {
const stored: Record<string, unknown> = { type: block.type }
if (block.content) stored.content = block.content
if (block.type === 'tool_call' && block.toolCall) {
const state =
block.toolCall.result?.success !== undefined
? block.toolCall.result.success
? 'success'
: 'error'
: block.toolCall.status
const isSubagentTool = !!block.calledBy
const isNonTerminal =
state === 'cancelled' || state === 'pending' || state === 'executing'
stored.toolCall = {
id: block.toolCall.id,
name: block.toolCall.name,
state,
...(isSubagentTool && isNonTerminal ? {} : { result: block.toolCall.result }),
...(isSubagentTool && isNonTerminal
? {}
: block.toolCall.params
? { params: block.toolCall.params }
: {}),
...(block.calledBy ? { calledBy: block.calledBy } : {}),
}
}
return stored
})
}
try {
const [row] = await db
.select({ messages: copilotChats.messages })
.from(copilotChats)
.where(eq(copilotChats.id, actualChatId))
.limit(1)
const msgs: any[] = Array.isArray(row?.messages) ? row.messages : []
const userIdx = msgs.findIndex((m: any) => m.id === userMessageIdToUse)
const alreadyHasResponse =
userIdx >= 0 &&
userIdx + 1 < msgs.length &&
(msgs[userIdx + 1] as any)?.role === 'assistant'
if (!alreadyHasResponse) {
await db
.update(copilotChats)
.set({
messages: sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb`,
conversationId: sql`CASE WHEN ${copilotChats.conversationId} = ${userMessageIdToUse} THEN NULL ELSE ${copilotChats.conversationId} END`,
updatedAt: new Date(),
})
.where(eq(copilotChats.id, actualChatId))
}
} catch (error) {
reqLogger.error('Failed to persist chat messages', {
chatId: actualChatId,
error: error instanceof Error ? error.message : 'Unknown error',
})
}
},
},
})
pendingChatStreamHandedOff = true
return new Response(sseStream, { headers: SSE_RESPONSE_HEADERS })
}
const nsExecutionId = crypto.randomUUID()
const nsRunId = crypto.randomUUID()
if (actualChatId) {
await createRunSegment({
id: nsRunId,
executionId: nsExecutionId,
chatId: actualChatId,
const sseStream = createSSEStream({
requestPayload,
userId: authenticatedUserId,
streamId: userMessageIdToUse,
executionId,
runId,
chatId: actualChatId,
currentChat,
isNewChat: conversationHistory.length === 0,
message,
titleModel: selectedModel,
titleProvider: provider,
requestId: tracker.requestId,
workspaceId: resolvedWorkspaceId,
orchestrateOptions: {
userId: authenticatedUserId,
workflowId,
streamId: userMessageIdToUse,
}).catch(() => {})
}
const nonStreamingResult = await orchestrateCopilotStream(requestPayload, {
userId: authenticatedUserId,
workflowId,
chatId: actualChatId,
executionId: nsExecutionId,
runId: nsRunId,
goRoute: '/api/copilot',
autoExecuteTools: true,
interactive: true,
})
const responseData = {
content: nonStreamingResult.content,
toolCalls: nonStreamingResult.toolCalls,
model: selectedModel,
provider: typeof requestPayload?.provider === 'string' ? requestPayload.provider : undefined,
}
reqLogger.info('Non-streaming response from orchestrator', {
hasContent: !!responseData.content,
contentLength: responseData.content?.length || 0,
model: responseData.model,
provider: responseData.provider,
toolCallsCount: responseData.toolCalls?.length || 0,
})
// Save messages if we have a chat
if (currentChat && responseData.content) {
const userMessage = {
id: userMessageIdToUse, // Consistent ID used for request and persistence
role: 'user',
content: message,
timestamp: new Date().toISOString(),
...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }),
...(Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 && {
contexts: normalizedContexts,
}),
...(Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 && {
contentBlocks: [
{ type: 'contexts', contexts: normalizedContexts as any, timestamp: Date.now() },
],
}),
}
const assistantMessage = {
id: crypto.randomUUID(),
role: 'assistant',
content: responseData.content,
timestamp: new Date().toISOString(),
}
const updatedMessages = [...conversationHistory, userMessage, assistantMessage]
// Start title generation in parallel if this is first message (non-streaming)
if (actualChatId && !currentChat.title && conversationHistory.length === 0) {
reqLogger.info('Starting title generation for non-streaming response')
requestChatTitle({ message, model: selectedModel, provider, messageId: userMessageIdToUse })
.then(async (title) => {
if (title) {
await db
.update(copilotChats)
.set({
title,
updatedAt: new Date(),
})
.where(eq(copilotChats.id, actualChatId!))
reqLogger.info(`Generated and saved title: ${title}`)
}
})
.catch((error) => {
reqLogger.error('Title generation failed', error)
})
}
// Update chat in database immediately (without blocking for title)
await db
.update(copilotChats)
.set({
messages: updatedMessages,
updatedAt: new Date(),
})
.where(eq(copilotChats.id, actualChatId!))
}
reqLogger.info('Returning non-streaming response', {
duration: tracker.getDuration(),
chatId: actualChatId,
responseLength: responseData.content?.length || 0,
})
return NextResponse.json({
success: true,
response: responseData,
chatId: actualChatId,
metadata: {
requestId: tracker.requestId,
message,
duration: tracker.getDuration(),
chatId: actualChatId,
executionId,
runId,
goRoute: '/api/copilot',
autoExecuteTools: true,
interactive: true,
onComplete: buildOnComplete(actualChatId, userMessageIdToUse, tracker.requestId),
},
})
return new Response(sseStream, { headers: SSE_RESPONSE_HEADERS })
} catch (error) {
if (
actualChatId &&
pendingChatStreamAcquired &&
!pendingChatStreamHandedOff &&
pendingChatStreamID
) {
await releasePendingChatStream(actualChatId, pendingChatStreamID).catch(() => {})
if (chatStreamLockAcquired && actualChatId && userMessageIdToUse) {
await releasePendingChatStream(actualChatId, userMessageIdToUse)
}
const duration = tracker.getDuration()
if (error instanceof z.ZodError) {
logger
.withMetadata({ requestId: tracker.requestId, messageId: pendingChatStreamID ?? undefined })
.error('Validation error', {
duration,
errors: error.errors,
})
logger.error(`[${tracker.requestId}] Validation error:`, { duration, errors: error.errors })
return NextResponse.json(
{ error: 'Invalid request data', details: error.errors },
{ status: 400 }
)
}
logger
.withMetadata({ requestId: tracker.requestId, messageId: pendingChatStreamID ?? undefined })
.error('Error handling copilot chat', {
duration,
error: error instanceof Error ? error.message : 'Unknown error',
stack: error instanceof Error ? error.stack : undefined,
})
logger.error(`[${tracker.requestId}] Error handling copilot chat:`, {
duration,
error: error instanceof Error ? error.message : 'Unknown error',
stack: error instanceof Error ? error.stack : undefined,
})
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Internal server error' },
@@ -653,132 +407,55 @@ export async function POST(req: NextRequest) {
}
}
export async function GET(req: NextRequest) {
try {
const { searchParams } = new URL(req.url)
const workflowId = searchParams.get('workflowId')
const workspaceId = searchParams.get('workspaceId')
const chatId = searchParams.get('chatId')
// ---------------------------------------------------------------------------
// onComplete: persist assistant message after streaming finishes
// ---------------------------------------------------------------------------
const { userId: authenticatedUserId, isAuthenticated } =
await authenticateCopilotRequestSessionOnly()
if (!isAuthenticated || !authenticatedUserId) {
return createUnauthorizedResponse()
}
function buildOnComplete(
chatId: string | undefined,
userMessageId: string,
requestId: string
): (result: OrchestratorResult) => Promise<void> {
return async (result) => {
if (!chatId || !result.success) return
if (chatId) {
const chat = await getAccessibleCopilotChat(chatId, authenticatedUserId)
const assistantMessage = buildPersistedAssistantMessage(result, result.requestId)
if (!chat) {
return NextResponse.json({ success: false, error: 'Chat not found' }, { status: 404 })
try {
const [row] = await db
.select({ messages: copilotChats.messages })
.from(copilotChats)
.where(eq(copilotChats.id, chatId))
.limit(1)
const msgs: Record<string, unknown>[] = Array.isArray(row?.messages) ? row.messages : []
const userIdx = msgs.findIndex((m: Record<string, unknown>) => m.id === userMessageId)
const alreadyHasResponse =
userIdx >= 0 &&
userIdx + 1 < msgs.length &&
(msgs[userIdx + 1] as Record<string, unknown>)?.role === 'assistant'
if (!alreadyHasResponse) {
await db
.update(copilotChats)
.set({
messages: sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb`,
conversationId: sql`CASE WHEN ${copilotChats.conversationId} = ${userMessageId} THEN NULL ELSE ${copilotChats.conversationId} END`,
updatedAt: new Date(),
})
.where(eq(copilotChats.id, chatId))
}
let streamSnapshot: {
events: Array<{ eventId: number; streamId: string; event: Record<string, unknown> }>
status: string
} | null = null
if (chat.conversationId) {
try {
const [meta, events] = await Promise.all([
getStreamMeta(chat.conversationId),
readStreamEvents(chat.conversationId, 0),
])
streamSnapshot = {
events: events || [],
status: meta?.status || 'unknown',
}
} catch (err) {
logger
.withMetadata({ messageId: chat.conversationId || undefined })
.warn('Failed to read stream snapshot for chat', {
chatId,
conversationId: chat.conversationId,
error: err instanceof Error ? err.message : String(err),
})
}
}
const transformedChat = {
id: chat.id,
title: chat.title,
model: chat.model,
messages: Array.isArray(chat.messages) ? chat.messages : [],
messageCount: Array.isArray(chat.messages) ? chat.messages.length : 0,
planArtifact: chat.planArtifact || null,
config: chat.config || null,
conversationId: chat.conversationId || null,
resources: Array.isArray(chat.resources) ? chat.resources : [],
createdAt: chat.createdAt,
updatedAt: chat.updatedAt,
...(streamSnapshot ? { streamSnapshot } : {}),
}
logger
.withMetadata({ messageId: chat.conversationId || undefined })
.info(`Retrieved chat ${chatId}`)
return NextResponse.json({ success: true, chat: transformedChat })
}
if (!workflowId && !workspaceId) {
return createBadRequestResponse('workflowId, workspaceId, or chatId is required')
}
if (workspaceId) {
await assertActiveWorkspaceAccess(workspaceId, authenticatedUserId)
}
if (workflowId) {
const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId,
userId: authenticatedUserId,
action: 'read',
} catch (error) {
logger.error(`[${requestId}] Failed to persist chat messages`, {
chatId,
error: error instanceof Error ? error.message : 'Unknown error',
})
if (!authorization.allowed) {
return createUnauthorizedResponse()
}
}
const scopeFilter = workflowId
? eq(copilotChats.workflowId, workflowId)
: eq(copilotChats.workspaceId, workspaceId!)
const chats = await db
.select({
id: copilotChats.id,
title: copilotChats.title,
model: copilotChats.model,
messages: copilotChats.messages,
planArtifact: copilotChats.planArtifact,
config: copilotChats.config,
createdAt: copilotChats.createdAt,
updatedAt: copilotChats.updatedAt,
})
.from(copilotChats)
.where(and(eq(copilotChats.userId, authenticatedUserId), scopeFilter))
.orderBy(desc(copilotChats.updatedAt))
const transformedChats = chats.map((chat) => ({
id: chat.id,
title: chat.title,
model: chat.model,
messages: Array.isArray(chat.messages) ? chat.messages : [],
messageCount: Array.isArray(chat.messages) ? chat.messages.length : 0,
planArtifact: chat.planArtifact || null,
config: chat.config || null,
createdAt: chat.createdAt,
updatedAt: chat.updatedAt,
}))
const scope = workflowId ? `workflow ${workflowId}` : `workspace ${workspaceId}`
logger.info(`Retrieved ${transformedChats.length} chats for ${scope}`)
return NextResponse.json({
success: true,
chats: transformedChats,
})
} catch (error) {
logger.error('Error fetching copilot chats', error)
return createInternalServerErrorResponse('Failed to fetch chats')
}
}
// ---------------------------------------------------------------------------
// GET handler (read-only queries, extracted to queries.ts)
// ---------------------------------------------------------------------------
export { GET } from './queries'

View File

@@ -4,25 +4,67 @@
import { NextRequest } from 'next/server'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import {
MothershipStreamV1CompletionStatus,
MothershipStreamV1EventType,
} from '@/lib/copilot/generated/mothership-stream-v1'
const { getStreamMeta, readStreamEvents, authenticateCopilotRequestSessionOnly } = vi.hoisted(
() => ({
getStreamMeta: vi.fn(),
readStreamEvents: vi.fn(),
authenticateCopilotRequestSessionOnly: vi.fn(),
})
)
vi.mock('@/lib/copilot/orchestrator/stream/buffer', () => ({
getStreamMeta,
readStreamEvents,
const {
getLatestRunForStream,
readEvents,
checkForReplayGap,
authenticateCopilotRequestSessionOnly,
} = vi.hoisted(() => ({
getLatestRunForStream: vi.fn(),
readEvents: vi.fn(),
checkForReplayGap: vi.fn(),
authenticateCopilotRequestSessionOnly: vi.fn(),
}))
vi.mock('@/lib/copilot/request-helpers', () => ({
vi.mock('@/lib/copilot/async-runs/repository', () => ({
getLatestRunForStream,
}))
vi.mock('@/lib/copilot/request/session', () => ({
readEvents,
checkForReplayGap,
createEvent: (event: Record<string, unknown>) => ({
stream: {
streamId: event.streamId,
cursor: event.cursor,
},
seq: event.seq,
trace: { requestId: event.requestId ?? '' },
type: event.type,
payload: event.payload,
}),
encodeSSEEnvelope: (event: Record<string, unknown>) =>
new TextEncoder().encode(`data: ${JSON.stringify(event)}\n\n`),
SSE_RESPONSE_HEADERS: {
'Content-Type': 'text/event-stream',
},
}))
vi.mock('@/lib/copilot/request/http', () => ({
authenticateCopilotRequestSessionOnly,
}))
import { GET } from '@/app/api/copilot/chat/stream/route'
import { GET } from './route'
async function readAllChunks(response: Response): Promise<string[]> {
const reader = response.body?.getReader()
expect(reader).toBeTruthy()
const chunks: string[] = []
while (true) {
const { done, value } = await reader!.read()
if (done) {
break
}
chunks.push(new TextDecoder().decode(value))
}
return chunks
}
describe('copilot chat stream replay route', () => {
beforeEach(() => {
@@ -31,29 +73,54 @@ describe('copilot chat stream replay route', () => {
userId: 'user-1',
isAuthenticated: true,
})
readStreamEvents.mockResolvedValue([])
readEvents.mockResolvedValue([])
checkForReplayGap.mockResolvedValue(null)
})
it('stops replay polling when stream meta becomes cancelled', async () => {
getStreamMeta
it('stops replay polling when run becomes cancelled', async () => {
getLatestRunForStream
.mockResolvedValueOnce({
status: 'active',
userId: 'user-1',
executionId: 'exec-1',
id: 'run-1',
})
.mockResolvedValueOnce({
status: 'cancelled',
userId: 'user-1',
executionId: 'exec-1',
id: 'run-1',
})
const response = await GET(
new NextRequest('http://localhost:3000/api/copilot/chat/stream?streamId=stream-1')
new NextRequest('http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0')
)
const reader = response.body?.getReader()
expect(reader).toBeTruthy()
const chunks = await readAllChunks(response)
expect(chunks.join('')).toContain(
JSON.stringify({
status: MothershipStreamV1CompletionStatus.cancelled,
reason: 'terminal_status',
})
)
expect(getLatestRunForStream).toHaveBeenCalledTimes(2)
})
const first = await reader!.read()
expect(first.done).toBe(true)
expect(getStreamMeta).toHaveBeenCalledTimes(2)
it('emits structured terminal replay error when run metadata disappears', async () => {
getLatestRunForStream
.mockResolvedValueOnce({
status: 'active',
executionId: 'exec-1',
id: 'run-1',
})
.mockResolvedValueOnce(null)
const response = await GET(
new NextRequest('http://localhost:3000/api/copilot/chat/stream?streamId=stream-1&after=0')
)
const chunks = await readAllChunks(response)
const body = chunks.join('')
expect(body).toContain(`"type":"${MothershipStreamV1EventType.error}"`)
expect(body).toContain('"code":"resume_run_unavailable"')
expect(body).toContain(`"type":"${MothershipStreamV1EventType.complete}"`)
})
})

View File

@@ -1,12 +1,18 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { getLatestRunForStream } from '@/lib/copilot/async-runs/repository'
import {
getStreamMeta,
readStreamEvents,
type StreamMeta,
} from '@/lib/copilot/orchestrator/stream/buffer'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import { SSE_HEADERS } from '@/lib/core/utils/sse'
MothershipStreamV1CompletionStatus,
MothershipStreamV1EventType,
} from '@/lib/copilot/generated/mothership-stream-v1'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http'
import {
checkForReplayGap,
createEvent,
encodeSSEEnvelope,
readEvents,
SSE_RESPONSE_HEADERS,
} from '@/lib/copilot/request/session'
export const maxDuration = 3600
@@ -14,8 +20,59 @@ const logger = createLogger('CopilotChatStreamAPI')
const POLL_INTERVAL_MS = 250
const MAX_STREAM_MS = 60 * 60 * 1000
function encodeEvent(event: Record<string, any>): Uint8Array {
return new TextEncoder().encode(`data: ${JSON.stringify(event)}\n\n`)
function isTerminalStatus(
status: string | null | undefined
): status is MothershipStreamV1CompletionStatus {
return (
status === MothershipStreamV1CompletionStatus.complete ||
status === MothershipStreamV1CompletionStatus.error ||
status === MothershipStreamV1CompletionStatus.cancelled
)
}
function buildResumeTerminalEnvelopes(options: {
streamId: string
afterCursor: string
status: MothershipStreamV1CompletionStatus
message?: string
code: string
reason?: string
}) {
const baseSeq = Number(options.afterCursor || '0')
const seq = Number.isFinite(baseSeq) ? baseSeq : 0
const envelopes: ReturnType<typeof createEvent>[] = []
if (options.status === MothershipStreamV1CompletionStatus.error) {
envelopes.push(
createEvent({
streamId: options.streamId,
cursor: String(seq + 1),
seq: seq + 1,
requestId: '',
type: MothershipStreamV1EventType.error,
payload: {
message: options.message || 'Stream recovery failed before completion.',
code: options.code,
},
})
)
}
envelopes.push(
createEvent({
streamId: options.streamId,
cursor: String(seq + envelopes.length + 1),
seq: seq + envelopes.length + 1,
requestId: '',
type: MothershipStreamV1EventType.complete,
payload: {
status: options.status,
...(options.reason ? { reason: options.reason } : {}),
},
})
)
return envelopes
}
export async function GET(request: NextRequest) {
@@ -28,58 +85,49 @@ export async function GET(request: NextRequest) {
const url = new URL(request.url)
const streamId = url.searchParams.get('streamId') || ''
const fromParam = url.searchParams.get('from') || '0'
const fromEventId = Number(fromParam || 0)
// If batch=true, return buffered events as JSON instead of SSE
const afterCursor = url.searchParams.get('after') || ''
const batchMode = url.searchParams.get('batch') === 'true'
const toParam = url.searchParams.get('to')
const toEventId = toParam ? Number(toParam) : undefined
const reqLogger = logger.withMetadata({ messageId: streamId || undefined })
reqLogger.info('[Resume] Received resume request', {
streamId: streamId || undefined,
fromEventId,
toEventId,
batchMode,
})
if (!streamId) {
return NextResponse.json({ error: 'streamId is required' }, { status: 400 })
}
const meta = (await getStreamMeta(streamId)) as StreamMeta | null
reqLogger.info('[Resume] Stream lookup', {
streamId,
fromEventId,
toEventId,
batchMode,
hasMeta: !!meta,
metaStatus: meta?.status,
const run = await getLatestRunForStream(streamId, authenticatedUserId).catch((err) => {
logger.warn('Failed to fetch latest run for stream', {
streamId,
error: err instanceof Error ? err.message : String(err),
})
return null
})
if (!meta) {
logger.info('[Resume] Stream lookup', {
streamId,
afterCursor,
batchMode,
hasRun: !!run,
runStatus: run?.status,
})
if (!run) {
return NextResponse.json({ error: 'Stream not found' }, { status: 404 })
}
if (meta.userId && meta.userId !== authenticatedUserId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 403 })
}
// Batch mode: return all buffered events as JSON
if (batchMode) {
const events = await readStreamEvents(streamId, fromEventId)
const filteredEvents = toEventId ? events.filter((e) => e.eventId <= toEventId) : events
reqLogger.info('[Resume] Batch response', {
const afterSeq = afterCursor || '0'
const events = await readEvents(streamId, afterSeq)
const batchEvents = events.map((envelope) => ({
eventId: envelope.seq,
streamId: envelope.stream.streamId,
event: envelope,
}))
logger.info('[Resume] Batch response', {
streamId,
fromEventId,
toEventId,
eventCount: filteredEvents.length,
afterCursor: afterSeq,
eventCount: batchEvents.length,
runStatus: run.status,
})
return NextResponse.json({
success: true,
events: filteredEvents,
status: meta.status,
executionId: meta.executionId,
runId: meta.runId,
events: batchEvents,
status: run.status,
})
}
@@ -87,9 +135,9 @@ export async function GET(request: NextRequest) {
const stream = new ReadableStream({
async start(controller) {
let lastEventId = Number.isFinite(fromEventId) ? fromEventId : 0
let latestMeta = meta
let cursor = afterCursor || '0'
let controllerClosed = false
let sawTerminalEvent = false
const closeController = () => {
if (controllerClosed) return
@@ -97,14 +145,14 @@ export async function GET(request: NextRequest) {
try {
controller.close()
} catch {
// Controller already closed by runtime/client - treat as normal.
// Controller already closed by runtime/client
}
}
const enqueueEvent = (payload: Record<string, any>) => {
const enqueueEvent = (payload: unknown) => {
if (controllerClosed) return false
try {
controller.enqueue(encodeEvent(payload))
controller.enqueue(encodeSSEEnvelope(payload))
return true
} catch {
controllerClosed = true
@@ -118,47 +166,96 @@ export async function GET(request: NextRequest) {
request.signal.addEventListener('abort', abortListener, { once: true })
const flushEvents = async () => {
const events = await readStreamEvents(streamId, lastEventId)
const events = await readEvents(streamId, cursor)
if (events.length > 0) {
reqLogger.info('[Resume] Flushing events', {
logger.info('[Resume] Flushing events', {
streamId,
fromEventId: lastEventId,
afterCursor: cursor,
eventCount: events.length,
})
}
for (const entry of events) {
lastEventId = entry.eventId
const payload = {
...entry.event,
eventId: entry.eventId,
streamId: entry.streamId,
executionId: latestMeta?.executionId,
runId: latestMeta?.runId,
for (const envelope of events) {
cursor = envelope.stream.cursor ?? String(envelope.seq)
if (envelope.type === MothershipStreamV1EventType.complete) {
sawTerminalEvent = true
}
if (!enqueueEvent(payload)) {
if (!enqueueEvent(envelope)) {
break
}
}
}
const emitTerminalIfMissing = (
status: MothershipStreamV1CompletionStatus,
options?: { message?: string; code: string; reason?: string }
) => {
if (controllerClosed || sawTerminalEvent) {
return
}
for (const envelope of buildResumeTerminalEnvelopes({
streamId,
afterCursor: cursor,
status,
message: options?.message,
code: options?.code ?? 'resume_terminal',
reason: options?.reason,
})) {
cursor = envelope.stream.cursor ?? String(envelope.seq)
if (envelope.type === MothershipStreamV1EventType.complete) {
sawTerminalEvent = true
}
if (!enqueueEvent(envelope)) {
break
}
}
}
try {
const gap = await checkForReplayGap(streamId, afterCursor)
if (gap) {
for (const envelope of gap.envelopes) {
enqueueEvent(envelope)
}
return
}
await flushEvents()
while (!controllerClosed && Date.now() - startTime < MAX_STREAM_MS) {
const currentMeta = await getStreamMeta(streamId)
if (!currentMeta) break
latestMeta = currentMeta
const currentRun = await getLatestRunForStream(streamId, authenticatedUserId).catch(
(err) => {
logger.warn('Failed to poll latest run for stream', {
streamId,
error: err instanceof Error ? err.message : String(err),
})
return null
}
)
if (!currentRun) {
emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, {
message: 'The stream could not be recovered because its run metadata is unavailable.',
code: 'resume_run_unavailable',
reason: 'run_unavailable',
})
break
}
await flushEvents()
if (controllerClosed) {
break
}
if (
currentMeta.status === 'complete' ||
currentMeta.status === 'error' ||
currentMeta.status === 'cancelled'
) {
if (isTerminalStatus(currentRun.status)) {
emitTerminalIfMissing(currentRun.status, {
message:
currentRun.status === MothershipStreamV1CompletionStatus.error
? typeof currentRun.error === 'string'
? currentRun.error
: 'The recovered stream ended with an error.'
: undefined,
code: 'resume_terminal_status',
reason: 'terminal_status',
})
break
}
@@ -169,12 +266,24 @@ export async function GET(request: NextRequest) {
await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS))
}
if (!controllerClosed && Date.now() - startTime >= MAX_STREAM_MS) {
emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, {
message: 'The stream recovery timed out before completion.',
code: 'resume_timeout',
reason: 'timeout',
})
}
} catch (error) {
if (!controllerClosed && !request.signal.aborted) {
reqLogger.warn('Stream replay failed', {
logger.warn('Stream replay failed', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
emitTerminalIfMissing(MothershipStreamV1CompletionStatus.error, {
message: 'The stream replay failed before completion.',
code: 'resume_internal',
reason: 'stream_replay_failed',
})
}
} finally {
request.signal.removeEventListener('abort', abortListener)
@@ -183,5 +292,5 @@ export async function GET(request: NextRequest) {
},
})
return new Response(stream, { headers: SSE_HEADERS })
return new Response(stream, { headers: SSE_RESPONSE_HEADERS })
}

View File

@@ -327,7 +327,35 @@ describe('Copilot Chat Update Messages API Route', () => {
})
expect(mockSet).toHaveBeenCalledWith({
messages,
messages: [
{
id: 'msg-1',
role: 'user',
content: 'Hello',
timestamp: '2024-01-01T10:00:00.000Z',
},
{
id: 'msg-2',
role: 'assistant',
content: 'Hi there!',
timestamp: '2024-01-01T10:01:00.000Z',
contentBlocks: [
{
type: 'text',
content: 'Here is the weather information',
},
{
type: 'tool',
phase: 'call',
toolCall: {
id: 'tool-1',
name: 'get_weather',
state: 'pending',
},
},
],
},
],
updatedAt: expect.any(Date),
})
})

View File

@@ -4,15 +4,16 @@ import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat-lifecycle'
import { COPILOT_MODES } from '@/lib/copilot/models'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import { normalizeMessage, type PersistedMessage } from '@/lib/copilot/chat/persisted-message'
import { COPILOT_MODES } from '@/lib/copilot/constants'
import {
authenticateCopilotRequestSessionOnly,
createInternalServerErrorResponse,
createNotFoundResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
const logger = createLogger('CopilotChatUpdateAPI')
@@ -78,12 +79,15 @@ export async function POST(req: NextRequest) {
}
const { chatId, messages, planArtifact, config } = UpdateMessagesSchema.parse(body)
const normalizedMessages: PersistedMessage[] = messages.map((message) =>
normalizeMessage(message as Record<string, unknown>)
)
// Debug: Log what we're about to save
const lastMsgParsed = messages[messages.length - 1]
const lastMsgParsed = normalizedMessages[normalizedMessages.length - 1]
if (lastMsgParsed?.role === 'assistant') {
logger.info(`[${tracker.requestId}] Parsed messages to save`, {
messageCount: messages.length,
messageCount: normalizedMessages.length,
lastMsgId: lastMsgParsed.id,
lastMsgContentLength: lastMsgParsed.content?.length || 0,
lastMsgContentBlockCount: lastMsgParsed.contentBlocks?.length || 0,
@@ -99,8 +103,8 @@ export async function POST(req: NextRequest) {
}
// Update chat with new messages, plan artifact, and config
const updateData: Record<string, any> = {
messages: messages,
const updateData: Record<string, unknown> = {
messages: normalizedMessages,
updatedAt: new Date(),
}
@@ -116,14 +120,14 @@ export async function POST(req: NextRequest) {
logger.info(`[${tracker.requestId}] Successfully updated chat`, {
chatId,
newMessageCount: messages.length,
newMessageCount: normalizedMessages.length,
hasPlanArtifact: !!planArtifact,
hasConfig: !!config,
})
return NextResponse.json({
success: true,
messageCount: messages.length,
messageCount: normalizedMessages.length,
})
} catch (error) {
logger.error(`[${tracker.requestId}] Error updating chat messages:`, error)

View File

@@ -66,7 +66,7 @@ vi.mock('drizzle-orm', () => ({
sql: vi.fn(),
}))
vi.mock('@/lib/copilot/request-helpers', () => ({
vi.mock('@/lib/copilot/request/http', () => ({
authenticateCopilotRequestSessionOnly: mockAuthenticate,
createUnauthorizedResponse: mockCreateUnauthorizedResponse,
createInternalServerErrorResponse: mockCreateInternalServerErrorResponse,

View File

@@ -4,14 +4,14 @@ import { createLogger } from '@sim/logger'
import { and, desc, eq, isNull, or, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { resolveOrCreateChat } from '@/lib/copilot/chat-lifecycle'
import { resolveOrCreateChat } from '@/lib/copilot/chat/lifecycle'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
import { taskPubSub } from '@/lib/copilot/task-events'
} from '@/lib/copilot/request/http'
import { taskPubSub } from '@/lib/copilot/tasks'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
import { assertActiveWorkspaceAccess } from '@/lib/workspaces/permissions/utils'
@@ -37,7 +37,7 @@ export async function GET(_request: NextRequest) {
title: copilotChats.title,
workflowId: copilotChats.workflowId,
workspaceId: copilotChats.workspaceId,
conversationId: copilotChats.conversationId,
activeStreamId: copilotChats.conversationId,
updatedAt: copilotChats.updatedAt,
})
.from(copilotChats)

View File

@@ -43,7 +43,7 @@ vi.mock('@/lib/workflows/utils', () => ({
authorizeWorkflowByWorkspacePermission: mockAuthorize,
}))
vi.mock('@/lib/copilot/chat-lifecycle', () => ({
vi.mock('@/lib/copilot/chat/lifecycle', () => ({
getAccessibleCopilotChat: mockGetAccessibleCopilotChat,
}))

View File

@@ -4,14 +4,14 @@ import { createLogger } from '@sim/logger'
import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat-lifecycle'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import {
authenticateCopilotRequestSessionOnly,
createInternalServerErrorResponse,
createNotFoundResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { getInternalApiBaseUrl } from '@/lib/core/utils/urls'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
import { isUuidV4 } from '@/executor/constants'

View File

@@ -62,7 +62,7 @@ vi.mock('drizzle-orm', () => ({
desc: vi.fn((field: unknown) => ({ field, type: 'desc' })),
}))
vi.mock('@/lib/copilot/chat-lifecycle', () => ({
vi.mock('@/lib/copilot/chat/lifecycle', () => ({
getAccessibleCopilotChat: mockGetAccessibleCopilotChat,
}))

View File

@@ -4,14 +4,14 @@ import { createLogger } from '@sim/logger'
import { and, desc, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat-lifecycle'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
const logger = createLogger('WorkflowCheckpointsAPI')

View File

@@ -38,7 +38,7 @@ const {
publishToolConfirmation: vi.fn(),
}))
vi.mock('@/lib/copilot/request-helpers', () => ({
vi.mock('@/lib/copilot/request/http', () => ({
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
@@ -54,7 +54,7 @@ vi.mock('@/lib/copilot/async-runs/repository', () => ({
completeAsyncToolCall,
}))
vi.mock('@/lib/copilot/orchestrator/persistence', () => ({
vi.mock('@/lib/copilot/persistence/tool-confirm', () => ({
publishToolConfirmation,
}))

View File

@@ -1,13 +1,14 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { ASYNC_TOOL_STATUS } from '@/lib/copilot/async-runs/lifecycle'
import {
completeAsyncToolCall,
getAsyncToolCall,
getRunSegment,
upsertAsyncToolCall,
} from '@/lib/copilot/async-runs/repository'
import { publishToolConfirmation } from '@/lib/copilot/orchestrator/persistence'
import { publishToolConfirmation } from '@/lib/copilot/persistence/tool-confirm'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
@@ -16,7 +17,7 @@ import {
createRequestTracker,
createUnauthorizedResponse,
type NotificationStatus,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
const logger = createLogger('CopilotConfirmAPI')
@@ -42,17 +43,17 @@ async function updateToolCallStatus(
const toolCallId = existing.toolCallId
const durableStatus =
status === 'success'
? 'completed'
? ASYNC_TOOL_STATUS.completed
: status === 'cancelled'
? 'cancelled'
? ASYNC_TOOL_STATUS.cancelled
: status === 'error' || status === 'rejected'
? 'failed'
: 'pending'
? ASYNC_TOOL_STATUS.failed
: ASYNC_TOOL_STATUS.pending
try {
if (
durableStatus === 'completed' ||
durableStatus === 'failed' ||
durableStatus === 'cancelled'
durableStatus === ASYNC_TOOL_STATUS.completed ||
durableStatus === ASYNC_TOOL_STATUS.failed ||
durableStatus === ASYNC_TOOL_STATUS.cancelled
) {
await completeAsyncToolCall({
toolCallId,
@@ -107,13 +108,25 @@ export async function POST(req: NextRequest) {
const body = await req.json()
const { toolCallId, status, message, data } = ConfirmationSchema.parse(body)
const existing = await getAsyncToolCall(toolCallId).catch(() => null)
const existing = await getAsyncToolCall(toolCallId).catch((err) => {
logger.warn('Failed to fetch async tool call', {
toolCallId,
error: err instanceof Error ? err.message : String(err),
})
return null
})
if (!existing) {
return createNotFoundResponse('Tool call not found')
}
const run = await getRunSegment(existing.runId).catch(() => null)
const run = await getRunSegment(existing.runId).catch((err) => {
logger.warn('Failed to fetch run segment', {
runId: existing.runId,
error: err instanceof Error ? err.message : String(err),
})
return null
})
if (!run) {
return createNotFoundResponse('Tool call run not found')
}

View File

@@ -1,5 +1,5 @@
import { type NextRequest, NextResponse } from 'next/server'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http'
import { routeExecution } from '@/lib/copilot/tools/server/router'
/**

View File

@@ -57,7 +57,7 @@ vi.mock('drizzle-orm', () => ({
eq: vi.fn((field: unknown, value: unknown) => ({ field, value, type: 'eq' })),
}))
vi.mock('@/lib/copilot/request-helpers', () => ({
vi.mock('@/lib/copilot/request/http', () => ({
authenticateCopilotRequestSessionOnly: mockAuthenticate,
createUnauthorizedResponse: mockCreateUnauthorizedResponse,
createBadRequestResponse: mockCreateBadRequestResponse,

View File

@@ -10,7 +10,7 @@ import {
createInternalServerErrorResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
const logger = createLogger('CopilotFeedbackAPI')

View File

@@ -1,8 +1,14 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import type { AvailableModel } from '@/lib/copilot/types'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request/http'
interface AvailableModel {
id: string
friendlyName: string
provider: string
}
import { env } from '@/lib/core/config/env'
const logger = createLogger('CopilotModelsAPI')

View File

@@ -23,7 +23,7 @@ const {
mockFetch: vi.fn(),
}))
vi.mock('@/lib/copilot/request-helpers', () => ({
vi.mock('@/lib/copilot/request/http', () => ({
authenticateCopilotRequestSessionOnly: mockAuthenticateCopilotRequestSessionOnly,
createUnauthorizedResponse: mockCreateUnauthorizedResponse,
createBadRequestResponse: mockCreateBadRequestResponse,

View File

@@ -7,7 +7,7 @@ import {
createInternalServerErrorResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { env } from '@/lib/core/config/env'
const BodySchema = z.object({

View File

@@ -4,7 +4,7 @@ import { z } from 'zod'
import {
authenticateCopilotRequestSessionOnly,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { env } from '@/lib/core/config/env'
const logger = createLogger('CopilotTrainingExamplesAPI')

View File

@@ -4,7 +4,7 @@ import { z } from 'zod'
import {
authenticateCopilotRequestSessionOnly,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
} from '@/lib/copilot/request/http'
import { env } from '@/lib/core/config/env'
const logger = createLogger('CopilotTrainingAPI')

View File

@@ -75,6 +75,16 @@ vi.mock('@/lib/uploads/utils/file-utils', () => ({
vi.mock('@/lib/uploads/setup.server', () => ({}))
vi.mock('@/lib/execution/doc-vm', () => ({
generatePdfFromCode: vi.fn().mockResolvedValue(Buffer.from('%PDF-compiled')),
generateDocxFromCode: vi.fn().mockResolvedValue(Buffer.from('PK\x03\x04compiled')),
generatePptxFromCode: vi.fn().mockResolvedValue(Buffer.from('PK\x03\x04compiled')),
}))
vi.mock('@/lib/uploads/contexts/workspace/workspace-file-manager', () => ({
parseWorkspaceFileKey: vi.fn().mockReturnValue(undefined),
}))
vi.mock('@/app/api/files/utils', () => ({
FileNotFoundError,
createFileResponse: mockCreateFileResponse,

View File

@@ -4,7 +4,11 @@ import { createLogger } from '@sim/logger'
import type { NextRequest } from 'next/server'
import { NextResponse } from 'next/server'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid'
import { generatePptxFromCode } from '@/lib/execution/pptx-vm'
import {
generateDocxFromCode,
generatePdfFromCode,
generatePptxFromCode,
} from '@/lib/execution/doc-vm'
import { CopilotFiles, isUsingCloudStorage } from '@/lib/uploads'
import type { StorageContext } from '@/lib/uploads/config'
import { parseWorkspaceFileKey } from '@/lib/uploads/contexts/workspace/workspace-file-manager'
@@ -22,47 +26,73 @@ import {
const logger = createLogger('FilesServeAPI')
const ZIP_MAGIC = Buffer.from([0x50, 0x4b, 0x03, 0x04])
const PDF_MAGIC = Buffer.from([0x25, 0x50, 0x44, 0x46, 0x2d]) // %PDF-
const MAX_COMPILED_PPTX_CACHE = 10
const compiledPptxCache = new Map<string, Buffer>()
function compiledCacheSet(key: string, buffer: Buffer): void {
if (compiledPptxCache.size >= MAX_COMPILED_PPTX_CACHE) {
compiledPptxCache.delete(compiledPptxCache.keys().next().value as string)
}
compiledPptxCache.set(key, buffer)
interface CompilableFormat {
magic: Buffer
compile: (code: string, workspaceId: string) => Promise<Buffer>
contentType: string
}
async function compilePptxIfNeeded(
const COMPILABLE_FORMATS: Record<string, CompilableFormat> = {
'.pptx': {
magic: ZIP_MAGIC,
compile: generatePptxFromCode,
contentType: 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
},
'.docx': {
magic: ZIP_MAGIC,
compile: generateDocxFromCode,
contentType: 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
},
'.pdf': {
magic: PDF_MAGIC,
compile: generatePdfFromCode,
contentType: 'application/pdf',
},
}
const MAX_COMPILED_DOC_CACHE = 10
const compiledDocCache = new Map<string, Buffer>()
function compiledCacheSet(key: string, buffer: Buffer): void {
if (compiledDocCache.size >= MAX_COMPILED_DOC_CACHE) {
compiledDocCache.delete(compiledDocCache.keys().next().value as string)
}
compiledDocCache.set(key, buffer)
}
async function compileDocumentIfNeeded(
buffer: Buffer,
filename: string,
workspaceId?: string,
raw?: boolean
): Promise<{ buffer: Buffer; contentType: string }> {
const isPptx = filename.toLowerCase().endsWith('.pptx')
if (raw || !isPptx || buffer.subarray(0, 4).equals(ZIP_MAGIC)) {
if (raw) return { buffer, contentType: getContentType(filename) }
const ext = filename.slice(filename.lastIndexOf('.')).toLowerCase()
const format = COMPILABLE_FORMATS[ext]
if (!format) return { buffer, contentType: getContentType(filename) }
const magicLen = format.magic.length
if (buffer.length >= magicLen && buffer.subarray(0, magicLen).equals(format.magic)) {
return { buffer, contentType: getContentType(filename) }
}
const code = buffer.toString('utf-8')
const cacheKey = createHash('sha256')
.update(ext)
.update(code)
.update(workspaceId ?? '')
.digest('hex')
const cached = compiledPptxCache.get(cacheKey)
const cached = compiledDocCache.get(cacheKey)
if (cached) {
return {
buffer: cached,
contentType: 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
}
return { buffer: cached, contentType: format.contentType }
}
const compiled = await generatePptxFromCode(code, workspaceId || '')
const compiled = await format.compile(code, workspaceId || '')
compiledCacheSet(cacheKey, compiled)
return {
buffer: compiled,
contentType: 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
}
return { buffer: compiled, contentType: format.contentType }
}
const STORAGE_KEY_PREFIX_RE = /^\d{13}-[a-z0-9]{7}-/
@@ -169,7 +199,7 @@ async function handleLocalFile(
const segment = filename.split('/').pop() || filename
const displayName = stripStorageKeyPrefix(segment)
const workspaceId = getWorkspaceIdForCompile(filename)
const { buffer: fileBuffer, contentType } = await compilePptxIfNeeded(
const { buffer: fileBuffer, contentType } = await compileDocumentIfNeeded(
rawBuffer,
displayName,
workspaceId,
@@ -226,7 +256,7 @@ async function handleCloudProxy(
const segment = cloudKey.split('/').pop() || 'download'
const displayName = stripStorageKeyPrefix(segment)
const workspaceId = getWorkspaceIdForCompile(cloudKey)
const { buffer: fileBuffer, contentType } = await compilePptxIfNeeded(
const { buffer: fileBuffer, contentType } = await compileDocumentIfNeeded(
rawBuffer,
displayName,
workspaceId,

View File

@@ -24,6 +24,27 @@ vi.mock('@/lib/auth/hybrid', () => ({
vi.mock('@/lib/execution/e2b', () => ({
executeInE2B: mockExecuteInE2B,
executeShellInE2B: vi.fn(),
}))
vi.mock('@/lib/copilot/request/tools/files', () => ({
FORMAT_TO_CONTENT_TYPE: {
json: 'application/json',
csv: 'text/csv',
txt: 'text/plain',
md: 'text/markdown',
html: 'text/html',
},
normalizeOutputWorkspaceFileName: vi.fn((p: string) => p.replace(/^files\//, '')),
resolveOutputFormat: vi.fn(() => 'json'),
}))
vi.mock('@/lib/uploads/contexts/workspace/workspace-file-manager', () => ({
uploadWorkspaceFile: vi.fn(),
}))
vi.mock('@/lib/workflows/utils', () => ({
getWorkflowById: vi.fn(),
}))
vi.mock('@/lib/core/config/feature-flags', () => ({
@@ -32,6 +53,7 @@ vi.mock('@/lib/core/config/feature-flags', () => ({
isProd: false,
isDev: false,
isTest: true,
isEmailVerificationEnabled: false,
}))
import { validateProxyUrl } from '@/lib/core/security/input-validation'

View File

@@ -1,11 +1,18 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { checkInternalAuth } from '@/lib/auth/hybrid'
import {
FORMAT_TO_CONTENT_TYPE,
normalizeOutputWorkspaceFileName,
resolveOutputFormat,
} from '@/lib/copilot/request/tools/files'
import { isE2bEnabled } from '@/lib/core/config/feature-flags'
import { generateRequestId } from '@/lib/core/utils/request'
import { executeInE2B } from '@/lib/execution/e2b'
import { executeInE2B, executeShellInE2B } from '@/lib/execution/e2b'
import { executeInIsolatedVM } from '@/lib/execution/isolated-vm'
import { CodeLanguage, DEFAULT_CODE_LANGUAGE, isValidCodeLanguage } from '@/lib/execution/languages'
import { uploadWorkspaceFile } from '@/lib/uploads/contexts/workspace/workspace-file-manager'
import { getWorkflowById } from '@/lib/workflows/utils'
import { escapeRegExp, normalizeName, REFERENCE } from '@/executor/constants'
import { type OutputSchema, resolveBlockReference } from '@/executor/utils/block-reference'
import { formatLiteralForCode } from '@/executor/utils/code-formatting'
@@ -580,6 +587,107 @@ function cleanStdout(stdout: string): string {
return stdout
}
async function maybeExportSandboxFileToWorkspace(args: {
authUserId: string
workflowId?: string
workspaceId?: string
outputPath?: string
outputFormat?: string
outputMimeType?: string
outputSandboxPath?: string
exportedFileContent?: string
stdout: string
executionTime: number
}) {
const {
authUserId,
workflowId,
workspaceId,
outputPath,
outputFormat,
outputMimeType,
outputSandboxPath,
exportedFileContent,
stdout,
executionTime,
} = args
if (!outputSandboxPath) return null
if (!outputPath) {
return NextResponse.json(
{
success: false,
error:
'outputSandboxPath requires outputPath. Set outputPath to the destination workspace file, e.g. "files/result.csv".',
output: { result: null, stdout: cleanStdout(stdout), executionTime },
},
{ status: 400 }
)
}
const resolvedWorkspaceId =
workspaceId || (workflowId ? (await getWorkflowById(workflowId))?.workspaceId : undefined)
if (!resolvedWorkspaceId) {
return NextResponse.json(
{
success: false,
error: 'Workspace context required to save sandbox file to workspace',
output: { result: null, stdout: cleanStdout(stdout), executionTime },
},
{ status: 400 }
)
}
if (exportedFileContent === undefined) {
return NextResponse.json(
{
success: false,
error: `Sandbox file "${outputSandboxPath}" was not found or could not be read`,
output: { result: null, stdout: cleanStdout(stdout), executionTime },
},
{ status: 500 }
)
}
const fileName = normalizeOutputWorkspaceFileName(outputPath)
const TEXT_MIMES = new Set(Object.values(FORMAT_TO_CONTENT_TYPE))
const resolvedMimeType =
outputMimeType ||
FORMAT_TO_CONTENT_TYPE[resolveOutputFormat(fileName, outputFormat)] ||
'application/octet-stream'
const isBinary = !TEXT_MIMES.has(resolvedMimeType)
const fileBuffer = isBinary
? Buffer.from(exportedFileContent, 'base64')
: Buffer.from(exportedFileContent, 'utf-8')
const uploaded = await uploadWorkspaceFile(
resolvedWorkspaceId,
authUserId,
fileBuffer,
fileName,
resolvedMimeType
)
return NextResponse.json({
success: true,
output: {
result: {
message: `Sandbox file exported to files/${fileName}`,
fileId: uploaded.id,
fileName,
downloadUrl: uploaded.url,
sandboxPath: outputSandboxPath,
},
stdout: cleanStdout(stdout),
executionTime,
},
resources: [{ type: 'file', id: uploaded.id, title: fileName }],
})
}
export async function POST(req: NextRequest) {
const requestId = generateRequestId()
const startTime = Date.now()
@@ -603,12 +711,17 @@ export async function POST(req: NextRequest) {
params = {},
timeout = DEFAULT_EXECUTION_TIMEOUT_MS,
language = DEFAULT_CODE_LANGUAGE,
outputPath,
outputFormat,
outputMimeType,
outputSandboxPath,
envVars = {},
blockData = {},
blockNameMapping = {},
blockOutputSchemas = {},
workflowVariables = {},
workflowId,
workspaceId,
isCustomTool = false,
_sandboxFiles,
} = body
@@ -652,6 +765,83 @@ export async function POST(req: NextRequest) {
hasImports = jsImports.trim().length > 0 || hasRequireStatements
}
if (lang === CodeLanguage.Shell) {
if (!isE2bEnabled) {
throw new Error(
'Shell execution requires E2B to be enabled. Please contact your administrator to enable E2B.'
)
}
const shellEnvs: Record<string, string> = {}
for (const [k, v] of Object.entries(envVars)) {
shellEnvs[k] = String(v)
}
for (const [k, v] of Object.entries(contextVariables)) {
shellEnvs[k] = String(v)
}
logger.info(`[${requestId}] E2B shell execution`, {
enabled: isE2bEnabled,
hasApiKey: Boolean(process.env.E2B_API_KEY),
envVarCount: Object.keys(shellEnvs).length,
})
const execStart = Date.now()
const {
result: shellResult,
stdout: shellStdout,
sandboxId,
error: shellError,
exportedFileContent,
} = await executeShellInE2B({
code: resolvedCode,
envs: shellEnvs,
timeoutMs: timeout,
sandboxFiles: _sandboxFiles,
outputSandboxPath,
})
const executionTime = Date.now() - execStart
logger.info(`[${requestId}] E2B shell sandbox`, {
sandboxId,
stdoutPreview: shellStdout?.slice(0, 200),
error: shellError,
executionTime,
})
if (shellError) {
return NextResponse.json(
{
success: false,
error: shellError,
output: { result: null, stdout: cleanStdout(shellStdout), executionTime },
},
{ status: 500 }
)
}
if (outputSandboxPath) {
const fileExportResponse = await maybeExportSandboxFileToWorkspace({
authUserId: auth.userId,
workflowId,
workspaceId,
outputPath,
outputFormat,
outputMimeType,
outputSandboxPath,
exportedFileContent,
stdout: shellStdout,
executionTime,
})
if (fileExportResponse) return fileExportResponse
}
return NextResponse.json({
success: true,
output: { result: shellResult ?? null, stdout: cleanStdout(shellStdout), executionTime },
})
}
if (lang === CodeLanguage.Python && !isE2bEnabled) {
throw new Error(
'Python execution requires E2B to be enabled. Please contact your administrator to enable E2B, or use JavaScript instead.'
@@ -719,11 +909,13 @@ export async function POST(req: NextRequest) {
stdout: e2bStdout,
sandboxId,
error: e2bError,
exportedFileContent,
} = await executeInE2B({
code: codeForE2B,
language: CodeLanguage.JavaScript,
timeoutMs: timeout,
sandboxFiles: _sandboxFiles,
outputSandboxPath,
})
const executionTime = Date.now() - execStart
stdout += e2bStdout
@@ -752,6 +944,22 @@ export async function POST(req: NextRequest) {
)
}
if (outputSandboxPath) {
const fileExportResponse = await maybeExportSandboxFileToWorkspace({
authUserId: auth.userId,
workflowId,
workspaceId,
outputPath,
outputFormat,
outputMimeType,
outputSandboxPath,
exportedFileContent,
stdout,
executionTime,
})
if (fileExportResponse) return fileExportResponse
}
return NextResponse.json({
success: true,
output: { result: e2bResult ?? null, stdout: cleanStdout(stdout), executionTime },
@@ -783,11 +991,13 @@ export async function POST(req: NextRequest) {
stdout: e2bStdout,
sandboxId,
error: e2bError,
exportedFileContent,
} = await executeInE2B({
code: codeForE2B,
language: CodeLanguage.Python,
timeoutMs: timeout,
sandboxFiles: _sandboxFiles,
outputSandboxPath,
})
const executionTime = Date.now() - execStart
stdout += e2bStdout
@@ -816,6 +1026,22 @@ export async function POST(req: NextRequest) {
)
}
if (outputSandboxPath) {
const fileExportResponse = await maybeExportSandboxFileToWorkspace({
authUserId: auth.userId,
workflowId,
workspaceId,
outputPath,
outputFormat,
outputMimeType,
outputSandboxPath,
exportedFileContent,
stdout,
executionTime,
})
if (fileExportResponse) return fileExportResponse
}
return NextResponse.json({
success: true,
output: { result: e2bResult ?? null, stdout: cleanStdout(stdout), executionTime },

View File

@@ -4,19 +4,13 @@
import type { NextRequest } from 'next/server'
import { beforeEach, describe, expect, it, vi } from 'vitest'
const {
mockCheckHybridAuth,
mockGetDispatchJobRecord,
mockGetJobQueue,
mockVerifyWorkflowAccess,
mockGetWorkflowById,
} = vi.hoisted(() => ({
mockCheckHybridAuth: vi.fn(),
mockGetDispatchJobRecord: vi.fn(),
mockGetJobQueue: vi.fn(),
mockVerifyWorkflowAccess: vi.fn(),
mockGetWorkflowById: vi.fn(),
}))
const { mockCheckHybridAuth, mockGetJobQueue, mockVerifyWorkflowAccess, mockGetWorkflowById } =
vi.hoisted(() => ({
mockCheckHybridAuth: vi.fn(),
mockGetJobQueue: vi.fn(),
mockVerifyWorkflowAccess: vi.fn(),
mockGetWorkflowById: vi.fn(),
}))
vi.mock('@sim/logger', () => ({
createLogger: () => ({
@@ -32,19 +26,9 @@ vi.mock('@/lib/auth/hybrid', () => ({
}))
vi.mock('@/lib/core/async-jobs', () => ({
JOB_STATUS: {
PENDING: 'pending',
PROCESSING: 'processing',
COMPLETED: 'completed',
FAILED: 'failed',
},
getJobQueue: mockGetJobQueue,
}))
vi.mock('@/lib/core/workspace-dispatch/store', () => ({
getDispatchJobRecord: mockGetDispatchJobRecord,
}))
vi.mock('@/lib/core/utils/request', () => ({
generateRequestId: vi.fn().mockReturnValue('request-1'),
}))
@@ -89,72 +73,78 @@ describe('GET /api/jobs/[jobId]', () => {
})
})
it('returns dispatcher-aware waiting status with metadata', async () => {
mockGetDispatchJobRecord.mockResolvedValue({
id: 'dispatch-1',
workspaceId: 'workspace-1',
lane: 'runtime',
queueName: 'workflow-execution',
bullmqJobName: 'workflow-execution',
bullmqPayload: {},
metadata: {
workflowId: 'workflow-1',
},
priority: 10,
status: 'waiting',
createdAt: 1000,
admittedAt: 2000,
it('returns pending status for a queued job', async () => {
mockGetJobQueue.mockResolvedValue({
getJob: vi.fn().mockResolvedValue({
id: 'job-1',
type: 'workflow-execution',
payload: {},
status: 'pending',
createdAt: new Date('2025-01-01T00:00:00Z'),
attempts: 0,
maxAttempts: 1,
metadata: {
workflowId: 'workflow-1',
},
}),
})
const response = await GET(createMockRequest(), {
params: Promise.resolve({ jobId: 'dispatch-1' }),
params: Promise.resolve({ jobId: 'job-1' }),
})
const body = await response.json()
expect(response.status).toBe(200)
expect(body.status).toBe('waiting')
expect(body.metadata.queueName).toBe('workflow-execution')
expect(body.metadata.lane).toBe('runtime')
expect(body.metadata.workspaceId).toBe('workspace-1')
expect(body.status).toBe('pending')
})
it('returns completed output from dispatch state', async () => {
mockGetDispatchJobRecord.mockResolvedValue({
id: 'dispatch-2',
workspaceId: 'workspace-1',
lane: 'interactive',
queueName: 'workflow-execution',
bullmqJobName: 'direct-workflow-execution',
bullmqPayload: {},
metadata: {
workflowId: 'workflow-1',
},
priority: 1,
status: 'completed',
createdAt: 1000,
startedAt: 2000,
completedAt: 7000,
output: { success: true },
it('returns completed output from job', async () => {
mockGetJobQueue.mockResolvedValue({
getJob: vi.fn().mockResolvedValue({
id: 'job-2',
type: 'workflow-execution',
payload: {},
status: 'completed',
createdAt: new Date('2025-01-01T00:00:00Z'),
startedAt: new Date('2025-01-01T00:00:01Z'),
completedAt: new Date('2025-01-01T00:00:06Z'),
attempts: 1,
maxAttempts: 1,
output: { success: true },
metadata: {
workflowId: 'workflow-1',
},
}),
})
const response = await GET(createMockRequest(), {
params: Promise.resolve({ jobId: 'dispatch-2' }),
params: Promise.resolve({ jobId: 'job-2' }),
})
const body = await response.json()
expect(response.status).toBe(200)
expect(body.status).toBe('completed')
expect(body.output).toEqual({ success: true })
expect(body.metadata.duration).toBe(5000)
})
it('returns 404 when neither dispatch nor BullMQ job exists', async () => {
mockGetDispatchJobRecord.mockResolvedValue(null)
it('returns 404 when job does not exist', async () => {
const response = await GET(createMockRequest(), {
params: Promise.resolve({ jobId: 'missing-job' }),
})
expect(response.status).toBe(404)
})
it('returns 401 for unauthenticated requests', async () => {
mockCheckHybridAuth.mockResolvedValue({
success: false,
error: 'Not authenticated',
})
const response = await GET(createMockRequest(), {
params: Promise.resolve({ jobId: 'job-1' }),
})
expect(response.status).toBe(401)
})
})

View File

@@ -2,13 +2,27 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { checkHybridAuth } from '@/lib/auth/hybrid'
import { getJobQueue } from '@/lib/core/async-jobs'
import type { Job } from '@/lib/core/async-jobs/types'
import { generateRequestId } from '@/lib/core/utils/request'
import { presentDispatchOrJobStatus } from '@/lib/core/workspace-dispatch/status'
import { getDispatchJobRecord } from '@/lib/core/workspace-dispatch/store'
import { createErrorResponse } from '@/app/api/workflows/utils'
const logger = createLogger('TaskStatusAPI')
function presentJobStatus(job: Job) {
return {
status: job.status,
metadata: {
createdAt: job.createdAt.toISOString(),
startedAt: job.startedAt?.toISOString(),
completedAt: job.completedAt?.toISOString(),
attempts: job.attempts,
maxAttempts: job.maxAttempts,
},
output: job.output,
error: job.error,
}
}
export async function GET(
request: NextRequest,
{ params }: { params: Promise<{ jobId: string }> }
@@ -25,15 +39,14 @@ export async function GET(
const authenticatedUserId = authResult.userId
const dispatchJob = await getDispatchJobRecord(taskId)
const jobQueue = await getJobQueue()
const job = dispatchJob ? null : await jobQueue.getJob(taskId)
const job = await jobQueue.getJob(taskId)
if (!job && !dispatchJob) {
if (!job) {
return createErrorResponse('Task not found', 404)
}
const metadataToCheck = dispatchJob?.metadata ?? job?.metadata
const metadataToCheck = job.metadata
if (metadataToCheck?.workflowId) {
const { verifyWorkflowAccess } = await import('@/socket/middleware/permissions')
@@ -61,7 +74,7 @@ export async function GET(
return createErrorResponse('Access denied', 403)
}
const presented = presentDispatchOrJobStatus(dispatchJob, job)
const presented = presentJobStatus(job)
const response: any = {
success: true,
taskId,
@@ -71,9 +84,6 @@ export async function GET(
if (presented.output !== undefined) response.output = presented.output
if (presented.error !== undefined) response.error = presented.error
if (presented.estimatedDuration !== undefined) {
response.estimatedDuration = presented.estimatedDuration
}
return NextResponse.json(response)
} catch (error: any) {

View File

@@ -18,14 +18,11 @@ import { eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { validateOAuthAccessToken } from '@/lib/auth/oauth-token'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import { createRunSegment } from '@/lib/copilot/async-runs/repository'
import { ORCHESTRATION_TIMEOUT_MS, SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import { orchestrateSubagentStream } from '@/lib/copilot/orchestrator/subagent'
import {
executeToolServerSide,
prepareExecutionContext,
} from '@/lib/copilot/orchestrator/tool-executor'
import { runCopilotLifecycle } from '@/lib/copilot/request/lifecycle/run'
import { orchestrateSubagentStream } from '@/lib/copilot/request/subagent'
import { ensureHandlersRegistered, executeTool } from '@/lib/copilot/tool-executor'
import { prepareExecutionContext } from '@/lib/copilot/tools/handlers/context'
import { DIRECT_TOOL_DEFS, SUBAGENT_TOOL_DEFS } from '@/lib/copilot/tools/mcp/definitions'
import { env } from '@/lib/core/config/env'
import { RateLimiter } from '@/lib/core/rate-limiter'
@@ -645,7 +642,8 @@ async function handleDirectToolCall(
startTime: Date.now(),
}
const result = await executeToolServerSide(toolCall, execContext)
ensureHandlersRegistered()
const result = await executeTool(toolCall.name, toolCall.params || {}, execContext)
return {
content: [
@@ -728,25 +726,10 @@ async function handleBuildToolCall(
chatId,
}
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
const messageId = requestPayload.messageId as string
await createRunSegment({
id: runId,
executionId,
chatId,
userId,
workflowId: resolved.workflowId,
streamId: messageId,
}).catch(() => {})
const result = await orchestrateCopilotStream(requestPayload, {
const result = await runCopilotLifecycle(requestPayload, {
userId,
workflowId: resolved.workflowId,
chatId,
executionId,
runId,
goRoute: '/api/mcp',
autoExecuteTools: true,
timeout: ORCHESTRATION_TIMEOUT_MS,

View File

@@ -5,18 +5,26 @@ import { eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { resolveOrCreateChat } from '@/lib/copilot/chat-lifecycle'
import { buildCopilotRequestPayload } from '@/lib/copilot/chat-payload'
import { resolveOrCreateChat } from '@/lib/copilot/chat/lifecycle'
import { buildCopilotRequestPayload } from '@/lib/copilot/chat/payload'
import {
buildPersistedAssistantMessage,
buildPersistedUserMessage,
} from '@/lib/copilot/chat/persisted-message'
import {
processContextsServer,
resolveActiveResourceContext,
} from '@/lib/copilot/chat/process-contents'
import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context'
import { createRequestTracker, createUnauthorizedResponse } from '@/lib/copilot/request/http'
import { createSSEStream, SSE_RESPONSE_HEADERS } from '@/lib/copilot/request/lifecycle/start'
import {
acquirePendingChatStream,
createSSEStream,
SSE_RESPONSE_HEADERS,
} from '@/lib/copilot/chat-streaming'
import type { OrchestratorResult } from '@/lib/copilot/orchestrator/types'
import { processContextsServer, resolveActiveResourceContext } from '@/lib/copilot/process-contents'
import { createRequestTracker, createUnauthorizedResponse } from '@/lib/copilot/request-helpers'
import { taskPubSub } from '@/lib/copilot/task-events'
import { generateWorkspaceContext } from '@/lib/copilot/workspace-context'
getPendingChatStreamId,
releasePendingChatStream,
} from '@/lib/copilot/request/session'
import type { OrchestratorResult } from '@/lib/copilot/request/types'
import { taskPubSub } from '@/lib/copilot/tasks'
import {
assertActiveWorkspaceAccess,
getUserEntityPermissions,
@@ -37,7 +45,6 @@ const FileAttachmentSchema = z.object({
const ResourceAttachmentSchema = z.object({
type: z.enum(['workflow', 'table', 'file', 'knowledgebase']),
id: z.string().min(1),
title: z.string().optional(),
active: z.boolean().optional(),
})
@@ -87,7 +94,9 @@ const MothershipMessageSchema = z.object({
*/
export async function POST(req: NextRequest) {
const tracker = createRequestTracker()
let userMessageIdForLogs: string | undefined
let lockChatId: string | undefined
let lockStreamId = ''
let chatStreamLockAcquired = false
try {
const session = await getSession()
@@ -110,27 +119,23 @@ export async function POST(req: NextRequest) {
} = MothershipMessageSchema.parse(body)
const userMessageId = providedMessageId || crypto.randomUUID()
userMessageIdForLogs = userMessageId
const reqLogger = logger.withMetadata({
requestId: tracker.requestId,
messageId: userMessageId,
})
lockStreamId = userMessageId
reqLogger.info('Received mothership chat start request', {
workspaceId,
chatId,
createNewChat,
hasContexts: Array.isArray(contexts) && contexts.length > 0,
contextsCount: Array.isArray(contexts) ? contexts.length : 0,
hasResourceAttachments: Array.isArray(resourceAttachments) && resourceAttachments.length > 0,
resourceAttachmentCount: Array.isArray(resourceAttachments) ? resourceAttachments.length : 0,
hasFileAttachments: Array.isArray(fileAttachments) && fileAttachments.length > 0,
fileAttachmentCount: Array.isArray(fileAttachments) ? fileAttachments.length : 0,
})
// Phase 1: workspace access + chat resolution in parallel
const [accessResult, chatResult] = await Promise.allSettled([
assertActiveWorkspaceAccess(workspaceId, authenticatedUserId),
chatId || createNewChat
? resolveOrCreateChat({
chatId,
userId: authenticatedUserId,
workspaceId,
model: 'claude-opus-4-6',
type: 'mothership',
})
: null,
])
try {
await assertActiveWorkspaceAccess(workspaceId, authenticatedUserId)
} catch {
if (accessResult.status === 'rejected') {
return NextResponse.json({ error: 'Workspace not found or access denied' }, { status: 403 })
}
@@ -138,18 +143,12 @@ export async function POST(req: NextRequest) {
let conversationHistory: any[] = []
let actualChatId = chatId
if (chatId || createNewChat) {
const chatResult = await resolveOrCreateChat({
chatId,
userId: authenticatedUserId,
workspaceId,
model: 'claude-opus-4-6',
type: 'mothership',
})
currentChat = chatResult.chat
actualChatId = chatResult.chatId || chatId
conversationHistory = Array.isArray(chatResult.conversationHistory)
? chatResult.conversationHistory
if (chatResult.status === 'fulfilled' && chatResult.value) {
const resolved = chatResult.value
currentChat = resolved.chat
actualChatId = resolved.chatId || chatId
conversationHistory = Array.isArray(resolved.conversationHistory)
? resolved.conversationHistory
: []
if (chatId && !currentChat) {
@@ -157,76 +156,73 @@ export async function POST(req: NextRequest) {
}
}
let agentContexts: Array<{ type: string; content: string }> = []
if (Array.isArray(contexts) && contexts.length > 0) {
try {
agentContexts = await processContextsServer(
contexts as any,
authenticatedUserId,
message,
workspaceId,
actualChatId
if (actualChatId) {
chatStreamLockAcquired = await acquirePendingChatStream(actualChatId, userMessageId)
if (!chatStreamLockAcquired) {
const activeStreamId = await getPendingChatStreamId(actualChatId)
return NextResponse.json(
{
error: 'A response is already in progress for this chat.',
...(activeStreamId ? { activeStreamId } : {}),
},
{ status: 409 }
)
} catch (e) {
reqLogger.error('Failed to process contexts', e)
}
lockChatId = actualChatId
}
if (Array.isArray(resourceAttachments) && resourceAttachments.length > 0) {
const results = await Promise.allSettled(
resourceAttachments.map(async (r) => {
const ctx = await resolveActiveResourceContext(
r.type,
r.id,
workspaceId,
// Phase 2: contexts + workspace context + user message persistence in parallel
const contextPromise = (async () => {
let agentCtxs: Array<{ type: string; content: string }> = []
if (Array.isArray(contexts) && contexts.length > 0) {
try {
agentCtxs = await processContextsServer(
contexts as any,
authenticatedUserId,
message,
workspaceId,
actualChatId
)
if (!ctx) return null
return {
...ctx,
tag: r.active ? '@active_tab' : '@open_tab',
}
})
)
for (const result of results) {
if (result.status === 'fulfilled' && result.value) {
agentContexts.push(result.value)
} else if (result.status === 'rejected') {
reqLogger.error('Failed to resolve resource attachment', result.reason)
} catch (e) {
logger.error(`[${tracker.requestId}] Failed to process contexts`, e)
}
}
}
if (actualChatId) {
const userMsg = {
id: userMessageId,
role: 'user' as const,
content: message,
timestamp: new Date().toISOString(),
...(fileAttachments &&
fileAttachments.length > 0 && {
fileAttachments: fileAttachments.map((f) => ({
id: f.id,
key: f.key,
filename: f.filename,
media_type: f.media_type,
size: f.size,
})),
}),
...(contexts &&
contexts.length > 0 && {
contexts: contexts.map((c) => ({
kind: c.kind,
label: c.label,
...(c.workflowId && { workflowId: c.workflowId }),
...(c.knowledgeId && { knowledgeId: c.knowledgeId }),
...(c.tableId && { tableId: c.tableId }),
...(c.fileId && { fileId: c.fileId }),
})),
}),
if (Array.isArray(resourceAttachments) && resourceAttachments.length > 0) {
const results = await Promise.allSettled(
resourceAttachments.map(async (r) => {
const ctx = await resolveActiveResourceContext(
r.type,
r.id,
workspaceId,
authenticatedUserId,
actualChatId
)
if (!ctx) return null
return { ...ctx, tag: r.active ? '@active_tab' : '@open_tab' }
})
)
for (const result of results) {
if (result.status === 'fulfilled' && result.value) {
agentCtxs.push(result.value)
} else if (result.status === 'rejected') {
logger.error(
`[${tracker.requestId}] Failed to resolve resource attachment`,
result.reason
)
}
}
}
return agentCtxs
})()
const userMsgPromise = (async () => {
if (!actualChatId) return
const userMsg = buildPersistedUserMessage({
id: userMessageId,
content: message,
fileAttachments,
contexts,
})
const [updated] = await db
.update(copilotChats)
.set({
@@ -242,11 +238,15 @@ export async function POST(req: NextRequest) {
conversationHistory = freshMessages.filter((m: any) => m.id !== userMessageId)
taskPubSub?.publishStatusChanged({ workspaceId, chatId: actualChatId, type: 'started' })
}
}
})()
const [workspaceContext, userPermission] = await Promise.all([
generateWorkspaceContext(workspaceId, authenticatedUserId),
getUserEntityPermissions(authenticatedUserId, 'workspace', workspaceId).catch(() => null),
const [agentContexts, [workspaceContext, userPermission]] = await Promise.all([
contextPromise,
Promise.all([
generateWorkspaceContext(workspaceId, authenticatedUserId),
getUserEntityPermissions(authenticatedUserId, 'workspace', workspaceId).catch(() => null),
]),
userMsgPromise,
])
const requestPayload = await buildCopilotRequestPayload(
@@ -267,19 +267,6 @@ export async function POST(req: NextRequest) {
{ selectedModel: '' }
)
if (actualChatId) {
const acquired = await acquirePendingChatStream(actualChatId, userMessageId)
if (!acquired) {
return NextResponse.json(
{
error:
'A response is already in progress for this chat. Wait for it to finish or use Stop.',
},
{ status: 409 }
)
}
}
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
const stream = createSSEStream({
@@ -295,7 +282,6 @@ export async function POST(req: NextRequest) {
titleModel: 'claude-opus-4-6',
requestId: tracker.requestId,
workspaceId,
pendingChatStreamAlreadyRegistered: Boolean(actualChatId),
orchestrateOptions: {
userId: authenticatedUserId,
workspaceId,
@@ -309,46 +295,7 @@ export async function POST(req: NextRequest) {
if (!actualChatId) return
if (!result.success) return
const assistantMessage: Record<string, unknown> = {
id: crypto.randomUUID(),
role: 'assistant' as const,
content: result.content,
timestamp: new Date().toISOString(),
...(result.requestId ? { requestId: result.requestId } : {}),
}
if (result.toolCalls.length > 0) {
assistantMessage.toolCalls = result.toolCalls
}
if (result.contentBlocks.length > 0) {
assistantMessage.contentBlocks = result.contentBlocks.map((block) => {
const stored: Record<string, unknown> = { type: block.type }
if (block.content) stored.content = block.content
if (block.type === 'tool_call' && block.toolCall) {
const state =
block.toolCall.result?.success !== undefined
? block.toolCall.result.success
? 'success'
: 'error'
: block.toolCall.status
const isSubagentTool = !!block.calledBy
const isNonTerminal =
state === 'cancelled' || state === 'pending' || state === 'executing'
stored.toolCall = {
id: block.toolCall.id,
name: block.toolCall.name,
state,
...(isSubagentTool && isNonTerminal ? {} : { result: block.toolCall.result }),
...(isSubagentTool && isNonTerminal
? {}
: block.toolCall.params
? { params: block.toolCall.params }
: {}),
...(block.calledBy ? { calledBy: block.calledBy } : {}),
}
}
return stored
})
}
const assistantMessage = buildPersistedAssistantMessage(result, result.requestId)
try {
const [row] = await db
@@ -381,7 +328,7 @@ export async function POST(req: NextRequest) {
})
}
} catch (error) {
reqLogger.error('Failed to persist chat messages', {
logger.error(`[${tracker.requestId}] Failed to persist chat messages`, {
chatId: actualChatId,
error: error instanceof Error ? error.message : 'Unknown error',
})
@@ -392,6 +339,9 @@ export async function POST(req: NextRequest) {
return new Response(stream, { headers: SSE_RESPONSE_HEADERS })
} catch (error) {
if (chatStreamLockAcquired && lockChatId && lockStreamId) {
await releasePendingChatStream(lockChatId, lockStreamId)
}
if (error instanceof z.ZodError) {
return NextResponse.json(
{ error: 'Invalid request data', details: error.errors },
@@ -399,11 +349,9 @@ export async function POST(req: NextRequest) {
)
}
logger
.withMetadata({ requestId: tracker.requestId, messageId: userMessageIdForLogs })
.error('Error handling mothership chat', {
error: error instanceof Error ? error.message : 'Unknown error',
})
logger.error(`[${tracker.requestId}] Error handling mothership chat:`, {
error: error instanceof Error ? error.message : 'Unknown error',
})
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Internal server error' },

View File

@@ -5,8 +5,9 @@ import { and, eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { releasePendingChatStream } from '@/lib/copilot/chat-streaming'
import { taskPubSub } from '@/lib/copilot/task-events'
import { normalizeMessage, type PersistedMessage } from '@/lib/copilot/chat/persisted-message'
import { releasePendingChatStream } from '@/lib/copilot/request/session'
import { taskPubSub } from '@/lib/copilot/tasks'
const logger = createLogger('MothershipChatStopAPI')
@@ -26,15 +27,25 @@ const StoredToolCallSchema = z
display: z
.object({
text: z.string().optional(),
title: z.string().optional(),
phaseLabel: z.string().optional(),
})
.optional(),
calledBy: z.string().optional(),
durationMs: z.number().optional(),
error: z.string().optional(),
})
.nullable()
const ContentBlockSchema = z.object({
type: z.string(),
lane: z.enum(['main', 'subagent']).optional(),
content: z.string().optional(),
channel: z.enum(['assistant', 'thinking']).optional(),
phase: z.enum(['call', 'args_delta', 'result']).optional(),
kind: z.enum(['subagent', 'structured_result', 'subagent_result']).optional(),
lifecycle: z.enum(['start', 'end']).optional(),
status: z.enum(['complete', 'error', 'cancelled']).optional(),
toolCall: StoredToolCallSchema.optional(),
})
@@ -70,15 +81,14 @@ export async function POST(req: NextRequest) {
const hasBlocks = Array.isArray(contentBlocks) && contentBlocks.length > 0
if (hasContent || hasBlocks) {
const assistantMessage: Record<string, unknown> = {
const normalized = normalizeMessage({
id: crypto.randomUUID(),
role: 'assistant' as const,
role: 'assistant',
content,
timestamp: new Date().toISOString(),
}
if (hasBlocks) {
assistantMessage.contentBlocks = contentBlocks
}
...(hasBlocks ? { contentBlocks } : {}),
})
const assistantMessage: PersistedMessage = normalized
setClause.messages = sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb`
}

View File

@@ -4,15 +4,15 @@ import { createLogger } from '@sim/logger'
import { and, eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat-lifecycle'
import { getStreamMeta, readStreamEvents } from '@/lib/copilot/orchestrator/stream/buffer'
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
import { taskPubSub } from '@/lib/copilot/task-events'
} from '@/lib/copilot/request/http'
import { readEvents } from '@/lib/copilot/request/session/buffer'
import { taskPubSub } from '@/lib/copilot/tasks'
const logger = createLogger('MothershipChatAPI')
@@ -46,29 +46,24 @@ export async function GET(
}
let streamSnapshot: {
events: Array<{ eventId: number; streamId: string; event: Record<string, unknown> }>
events: unknown[]
status: string
} | null = null
if (chat.conversationId) {
try {
const [meta, events] = await Promise.all([
getStreamMeta(chat.conversationId),
readStreamEvents(chat.conversationId, 0),
])
const events = await readEvents(chat.conversationId, '0')
streamSnapshot = {
events: events || [],
status: meta?.status || 'unknown',
status: events.length > 0 ? 'active' : 'unknown',
}
} catch (error) {
logger
.withMetadata({ messageId: chat.conversationId || undefined })
.warn('Failed to read stream snapshot for mothership chat', {
chatId,
conversationId: chat.conversationId,
error: error instanceof Error ? error.message : String(error),
})
logger.warn('Failed to read stream snapshot for mothership chat', {
chatId,
conversationId: chat.conversationId,
error: error instanceof Error ? error.message : String(error),
})
}
}

View File

@@ -0,0 +1,43 @@
import { db } from '@sim/db'
import { copilotChats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request/http'
const logger = createLogger('MarkTaskReadAPI')
const MarkReadSchema = z.object({
chatId: z.string().min(1),
})
export async function POST(request: NextRequest) {
try {
const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly()
if (!isAuthenticated || !userId) {
return createUnauthorizedResponse()
}
const body = await request.json()
const { chatId } = MarkReadSchema.parse(body)
await db
.update(copilotChats)
.set({ lastSeenAt: sql`GREATEST(${copilotChats.updatedAt}, NOW())` })
.where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, userId)))
return NextResponse.json({ success: true })
} catch (error) {
if (error instanceof z.ZodError) {
return createBadRequestResponse('chatId is required')
}
logger.error('Error marking task as read:', error)
return createInternalServerErrorResponse('Failed to mark task as read')
}
}

View File

@@ -9,8 +9,8 @@ import {
createBadRequestResponse,
createInternalServerErrorResponse,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
import { taskPubSub } from '@/lib/copilot/task-events'
} from '@/lib/copilot/request/http'
import { taskPubSub } from '@/lib/copilot/tasks'
import { assertActiveWorkspaceAccess } from '@/lib/workspaces/permissions/utils'
const logger = createLogger('MothershipChatsAPI')
@@ -38,7 +38,7 @@ export async function GET(request: NextRequest) {
id: copilotChats.id,
title: copilotChats.title,
updatedAt: copilotChats.updatedAt,
conversationId: copilotChats.conversationId,
activeStreamId: copilotChats.conversationId,
lastSeenAt: copilotChats.lastSeenAt,
})
.from(copilotChats)

View File

@@ -7,7 +7,7 @@
* Auth is handled via session cookies (EventSource sends cookies automatically).
*/
import { taskPubSub } from '@/lib/copilot/task-events'
import { taskPubSub } from '@/lib/copilot/tasks'
import { createWorkspaceSSE } from '@/lib/events/sse-endpoint'
export const dynamic = 'force-dynamic'

View File

@@ -2,10 +2,9 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { checkInternalAuth } from '@/lib/auth/hybrid'
import { createRunSegment } from '@/lib/copilot/async-runs/repository'
import { buildIntegrationToolSchemas } from '@/lib/copilot/chat-payload'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import { generateWorkspaceContext } from '@/lib/copilot/workspace-context'
import { buildIntegrationToolSchemas } from '@/lib/copilot/chat/payload'
import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context'
import { runCopilotLifecycle } from '@/lib/copilot/request/lifecycle/run'
import {
assertActiveWorkspaceAccess,
getUserEntityPermissions,
@@ -72,34 +71,25 @@ export async function POST(req: NextRequest) {
...(userPermission ? { userPermission } : {}),
}
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
await createRunSegment({
id: runId,
executionId,
chatId: effectiveChatId,
userId,
workspaceId,
streamId: messageId,
}).catch(() => {})
const result = await orchestrateCopilotStream(requestPayload, {
const result = await runCopilotLifecycle(requestPayload, {
userId,
workspaceId,
chatId: effectiveChatId,
executionId,
runId,
goRoute: '/api/mothership/execute',
autoExecuteTools: true,
interactive: false,
})
if (!result.success) {
reqLogger.error('Mothership execute failed', {
error: result.error,
errors: result.errors,
})
logger.error(
messageId
? `Mothership execute failed [messageId:${messageId}]`
: 'Mothership execute failed',
{
error: result.error,
errors: result.errors,
}
)
return NextResponse.json(
{
error: result.error || 'Mothership execution failed',
@@ -135,9 +125,12 @@ export async function POST(req: NextRequest) {
)
}
logger.withMetadata({ messageId }).error('Mothership execute error', {
error: error instanceof Error ? error.message : 'Unknown error',
})
logger.error(
messageId ? `Mothership execute error [messageId:${messageId}]` : 'Mothership execute error',
{
error: error instanceof Error ? error.message : 'Unknown error',
}
)
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Internal server error' },

View File

@@ -14,7 +14,6 @@ const {
mockDbReturning,
mockDbUpdate,
mockEnqueue,
mockEnqueueWorkspaceDispatch,
mockStartJob,
mockCompleteJob,
mockMarkJobFailed,
@@ -24,7 +23,6 @@ const {
const mockDbSet = vi.fn().mockReturnValue({ where: mockDbWhere })
const mockDbUpdate = vi.fn().mockReturnValue({ set: mockDbSet })
const mockEnqueue = vi.fn().mockResolvedValue('job-id-1')
const mockEnqueueWorkspaceDispatch = vi.fn().mockResolvedValue('job-id-1')
const mockStartJob = vi.fn().mockResolvedValue(undefined)
const mockCompleteJob = vi.fn().mockResolvedValue(undefined)
const mockMarkJobFailed = vi.fn().mockResolvedValue(undefined)
@@ -42,7 +40,6 @@ const {
mockDbReturning,
mockDbUpdate,
mockEnqueue,
mockEnqueueWorkspaceDispatch,
mockStartJob,
mockCompleteJob,
mockMarkJobFailed,
@@ -75,15 +72,6 @@ vi.mock('@/lib/core/async-jobs', () => ({
shouldExecuteInline: vi.fn().mockReturnValue(false),
}))
vi.mock('@/lib/core/bullmq', () => ({
isBullMQEnabled: vi.fn().mockReturnValue(true),
createBullMQJobData: vi.fn((payload: unknown) => ({ payload })),
}))
vi.mock('@/lib/core/workspace-dispatch', () => ({
enqueueWorkspaceDispatch: mockEnqueueWorkspaceDispatch,
}))
vi.mock('@/lib/workflows/utils', () => ({
getWorkflowById: vi.fn().mockResolvedValue({
id: 'workflow-1',
@@ -246,29 +234,19 @@ describe('Scheduled Workflow Execution API Route', () => {
expect(data).toHaveProperty('executedCount', 2)
})
it('should queue mothership jobs to BullMQ when available', async () => {
it('should execute mothership jobs inline', async () => {
mockDbReturning.mockReturnValueOnce([]).mockReturnValueOnce(SINGLE_JOB)
const response = await GET(createMockRequest())
expect(response.status).toBe(200)
expect(mockEnqueueWorkspaceDispatch).toHaveBeenCalledWith(
expect(mockExecuteJobInline).toHaveBeenCalledWith(
expect.objectContaining({
workspaceId: 'workspace-1',
lane: 'runtime',
queueName: 'mothership-job-execution',
bullmqJobName: 'mothership-job-execution',
bullmqPayload: {
payload: {
scheduleId: 'job-1',
cronExpression: '0 * * * *',
failedCount: 0,
now: expect.any(String),
},
},
scheduleId: 'job-1',
cronExpression: '0 * * * *',
failedCount: 0,
})
)
expect(mockExecuteJobInline).not.toHaveBeenCalled()
})
it('should enqueue preassigned correlation metadata for schedules', async () => {
@@ -277,25 +255,23 @@ describe('Scheduled Workflow Execution API Route', () => {
const response = await GET(createMockRequest())
expect(response.status).toBe(200)
expect(mockEnqueueWorkspaceDispatch).toHaveBeenCalledWith(
expect(mockEnqueue).toHaveBeenCalledWith(
'schedule-execution',
expect.objectContaining({
id: 'schedule-execution-1',
workspaceId: 'workspace-1',
lane: 'runtime',
queueName: 'schedule-execution',
bullmqJobName: 'schedule-execution',
metadata: {
scheduleId: 'schedule-1',
workflowId: 'workflow-1',
executionId: 'schedule-execution-1',
}),
expect.objectContaining({
metadata: expect.objectContaining({
workflowId: 'workflow-1',
correlation: {
correlation: expect.objectContaining({
executionId: 'schedule-execution-1',
requestId: 'test-request-id',
source: 'schedule',
workflowId: 'workflow-1',
scheduleId: 'schedule-1',
triggerType: 'schedule',
scheduledFor: '2025-01-01T00:00:00.000Z',
},
},
}),
}),
})
)
})

View File

@@ -5,9 +5,7 @@ import { type NextRequest, NextResponse } from 'next/server'
import { v4 as uuidv4 } from 'uuid'
import { verifyCronAuth } from '@/lib/auth/internal'
import { getJobQueue, shouldExecuteInline } from '@/lib/core/async-jobs'
import { createBullMQJobData, isBullMQEnabled } from '@/lib/core/bullmq'
import { generateRequestId } from '@/lib/core/utils/request'
import { enqueueWorkspaceDispatch } from '@/lib/core/workspace-dispatch'
import {
executeJobInline,
executeScheduleJob,
@@ -121,38 +119,13 @@ export async function GET(request: NextRequest) {
: null
const resolvedWorkspaceId = resolvedWorkflow?.workspaceId
let jobId: string
if (isBullMQEnabled()) {
if (!resolvedWorkspaceId) {
throw new Error(
`Missing workspace for scheduled workflow ${schedule.workflowId}; refusing to bypass workspace admission`
)
}
jobId = await enqueueWorkspaceDispatch({
id: executionId,
workspaceId: resolvedWorkspaceId,
lane: 'runtime',
queueName: 'schedule-execution',
bullmqJobName: 'schedule-execution',
bullmqPayload: createBullMQJobData(payload, {
workflowId: schedule.workflowId ?? undefined,
correlation,
}),
metadata: {
workflowId: schedule.workflowId ?? undefined,
correlation,
},
})
} else {
jobId = await jobQueue.enqueue('schedule-execution', payload, {
metadata: {
workflowId: schedule.workflowId ?? undefined,
workspaceId: resolvedWorkspaceId ?? undefined,
correlation,
},
})
}
const jobId = await jobQueue.enqueue('schedule-execution', payload, {
metadata: {
workflowId: schedule.workflowId ?? undefined,
workspaceId: resolvedWorkspaceId ?? undefined,
correlation,
},
})
logger.info(
`[${requestId}] Queued schedule execution task ${jobId} for workflow ${schedule.workflowId}`
)
@@ -204,7 +177,7 @@ export async function GET(request: NextRequest) {
}
})
// Mothership jobs use BullMQ when available, otherwise direct inline execution.
// Mothership jobs execute inline directly.
const jobPromises = dueJobs.map(async (job) => {
const queueTime = job.lastQueuedAt ?? queuedAt
const payload = {
@@ -215,24 +188,7 @@ export async function GET(request: NextRequest) {
}
try {
if (isBullMQEnabled()) {
if (!job.sourceWorkspaceId || !job.sourceUserId) {
throw new Error(`Mothership job ${job.id} is missing workspace/user ownership`)
}
await enqueueWorkspaceDispatch({
workspaceId: job.sourceWorkspaceId!,
lane: 'runtime',
queueName: 'mothership-job-execution',
bullmqJobName: 'mothership-job-execution',
bullmqPayload: createBullMQJobData(payload),
metadata: {
userId: job.sourceUserId,
},
})
} else {
await executeJobInline(payload)
}
await executeJobInline(payload)
} catch (error) {
logger.error(`[${requestId}] Job execution failed for ${job.id}`, {
error: error instanceof Error ? error.message : String(error),

View File

@@ -3,7 +3,7 @@ import { templates } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { checkInternalApiKey } from '@/lib/copilot/utils'
import { checkInternalApiKey } from '@/lib/copilot/request/http'
import { generateRequestId } from '@/lib/core/utils/request'
import { sanitizeForCopilot } from '@/lib/workflows/sanitization/json-sanitizer'

View File

@@ -1,9 +1,8 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { createRunSegment } from '@/lib/copilot/async-runs/repository'
import { COPILOT_REQUEST_MODES } from '@/lib/copilot/models'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import { COPILOT_REQUEST_MODES } from '@/lib/copilot/constants'
import { runCopilotLifecycle } from '@/lib/copilot/request/lifecycle/run'
import { getWorkflowById, resolveWorkflowIdForUser } from '@/lib/workflows/utils'
import { authenticateV1Request } from '@/app/api/v1/auth'
@@ -83,15 +82,19 @@ export async function POST(req: NextRequest) {
const chatId = parsed.chatId || crypto.randomUUID()
messageId = crypto.randomUUID()
const reqLogger = logger.withMetadata({ messageId })
reqLogger.info('Received headless copilot chat start request', {
workflowId: resolved.workflowId,
workflowName: parsed.workflowName,
chatId,
mode: transportMode,
autoExecuteTools: parsed.autoExecuteTools,
timeout: parsed.timeout,
})
logger.info(
messageId
? `Received headless copilot chat start request [messageId:${messageId}]`
: 'Received headless copilot chat start request',
{
workflowId: resolved.workflowId,
workflowName: parsed.workflowName,
chatId,
mode: transportMode,
autoExecuteTools: parsed.autoExecuteTools,
timeout: parsed.timeout,
}
)
const requestPayload = {
message: parsed.message,
workflowId: resolved.workflowId,
@@ -102,24 +105,10 @@ export async function POST(req: NextRequest) {
chatId,
}
const executionId = crypto.randomUUID()
const runId = crypto.randomUUID()
await createRunSegment({
id: runId,
executionId,
chatId,
userId: auth.userId,
workflowId: resolved.workflowId,
streamId: messageId,
}).catch(() => {})
const result = await orchestrateCopilotStream(requestPayload, {
const result = await runCopilotLifecycle(requestPayload, {
userId: auth.userId,
workflowId: resolved.workflowId,
chatId,
executionId,
runId,
goRoute: '/api/mcp',
autoExecuteTools: parsed.autoExecuteTools,
timeout: parsed.timeout,
@@ -141,9 +130,14 @@ export async function POST(req: NextRequest) {
)
}
logger.withMetadata({ messageId }).error('Headless copilot request failed', {
error: error instanceof Error ? error.message : String(error),
})
logger.error(
messageId
? `Headless copilot request failed [messageId:${messageId}]`
: 'Headless copilot request failed',
{
error: error instanceof Error ? error.message : String(error),
}
)
return NextResponse.json({ success: false, error: 'Internal server error' }, { status: 500 })
}
}

View File

@@ -2,7 +2,6 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { admissionRejectedResponse, tryAdmit } from '@/lib/core/admission/gate'
import { generateRequestId } from '@/lib/core/utils/request'
import { DispatchQueueFullError } from '@/lib/core/workspace-dispatch'
import {
checkWebhookPreprocessing,
findAllWebhooksForPath,
@@ -156,29 +155,14 @@ async function handleWebhookPost(
if (shouldSkipWebhookEvent(foundWebhook, body, requestId)) {
continue
}
try {
const response = await queueWebhookExecution(foundWebhook, foundWorkflow, body, request, {
requestId,
path,
actorUserId: preprocessResult.actorUserId,
executionId: preprocessResult.executionId,
correlation: preprocessResult.correlation,
})
responses.push(response)
} catch (error) {
if (error instanceof DispatchQueueFullError) {
return NextResponse.json(
{
error: 'Service temporarily at capacity',
message: error.message,
retryAfterSeconds: 10,
},
{ status: 503, headers: { 'Retry-After': '10' } }
)
}
throw error
}
const response = await queueWebhookExecution(foundWebhook, foundWorkflow, body, request, {
requestId,
path,
actorUserId: preprocessResult.actorUserId,
executionId: preprocessResult.executionId,
correlation: preprocessResult.correlation,
})
responses.push(response)
}
if (responses.length === 0) {

View File

@@ -10,13 +10,11 @@ const {
mockAuthorizeWorkflowByWorkspacePermission,
mockPreprocessExecution,
mockEnqueue,
mockEnqueueWorkspaceDispatch,
} = vi.hoisted(() => ({
mockCheckHybridAuth: vi.fn(),
mockAuthorizeWorkflowByWorkspacePermission: vi.fn(),
mockPreprocessExecution: vi.fn(),
mockEnqueue: vi.fn().mockResolvedValue('job-123'),
mockEnqueueWorkspaceDispatch: vi.fn().mockResolvedValue('job-123'),
}))
vi.mock('@/lib/auth/hybrid', () => ({
@@ -47,16 +45,6 @@ vi.mock('@/lib/core/async-jobs', () => ({
markJobFailed: vi.fn(),
}),
shouldExecuteInline: vi.fn().mockReturnValue(false),
shouldUseBullMQ: vi.fn().mockReturnValue(true),
}))
vi.mock('@/lib/core/bullmq', () => ({
createBullMQJobData: vi.fn((payload: unknown, metadata?: unknown) => ({ payload, metadata })),
}))
vi.mock('@/lib/core/workspace-dispatch', () => ({
enqueueWorkspaceDispatch: mockEnqueueWorkspaceDispatch,
waitForDispatchJob: vi.fn(),
}))
vi.mock('@/lib/core/utils/request', () => ({
@@ -147,24 +135,20 @@ describe('workflow execute async route', () => {
expect(response.status).toBe(202)
expect(body.executionId).toBe('execution-123')
expect(body.jobId).toBe('job-123')
expect(mockEnqueueWorkspaceDispatch).toHaveBeenCalledWith(
expect(mockEnqueue).toHaveBeenCalledWith(
'workflow-execution',
expect.objectContaining({
id: 'execution-123',
workflowId: 'workflow-1',
userId: 'actor-1',
workspaceId: 'workspace-1',
lane: 'runtime',
queueName: 'workflow-execution',
bullmqJobName: 'workflow-execution',
metadata: {
executionId: 'execution-123',
}),
expect.objectContaining({
metadata: expect.objectContaining({
workflowId: 'workflow-1',
userId: 'actor-1',
correlation: {
executionId: 'execution-123',
requestId: 'req-12345678',
source: 'workflow',
workflowId: 'workflow-1',
triggerType: 'manual',
},
},
workspaceId: 'workspace-1',
}),
})
)
})

View File

@@ -4,8 +4,7 @@ import { validate as uuidValidate, v4 as uuidv4 } from 'uuid'
import { z } from 'zod'
import { AuthType, checkHybridAuth, hasExternalApiCredentials } from '@/lib/auth/hybrid'
import { admissionRejectedResponse, tryAdmit } from '@/lib/core/admission/gate'
import { getJobQueue, shouldExecuteInline, shouldUseBullMQ } from '@/lib/core/async-jobs'
import { createBullMQJobData } from '@/lib/core/bullmq'
import { getJobQueue, shouldExecuteInline } from '@/lib/core/async-jobs'
import {
createTimeoutAbortController,
getTimeoutErrorMessage,
@@ -14,13 +13,6 @@ import {
import { generateRequestId } from '@/lib/core/utils/request'
import { SSE_HEADERS } from '@/lib/core/utils/sse'
import { getBaseUrl } from '@/lib/core/utils/urls'
import {
DispatchQueueFullError,
enqueueWorkspaceDispatch,
type WorkspaceDispatchLane,
waitForDispatchJob,
} from '@/lib/core/workspace-dispatch'
import { createBufferedExecutionStream } from '@/lib/execution/buffered-stream'
import {
buildNextCallChain,
parseCallChain,
@@ -42,11 +34,6 @@ import {
import { executeWorkflowCore } from '@/lib/workflows/executor/execution-core'
import { type ExecutionEvent, encodeSSEEvent } from '@/lib/workflows/executor/execution-events'
import { PauseResumeManager } from '@/lib/workflows/executor/human-in-the-loop-manager'
import {
DIRECT_WORKFLOW_JOB_NAME,
type QueuedWorkflowExecutionPayload,
type QueuedWorkflowExecutionResult,
} from '@/lib/workflows/executor/queued-workflow-execution'
import {
loadDeployedWorkflowState,
loadWorkflowFromNormalizedTables,
@@ -118,8 +105,6 @@ const ExecuteWorkflowSchema = z.object({
export const runtime = 'nodejs'
export const dynamic = 'force-dynamic'
const INLINE_TRIGGER_TYPES = new Set<CoreTriggerType>(['manual', 'workflow'])
function resolveOutputIds(
selectedOutputs: string[] | undefined,
blocks: Record<string, any>
@@ -216,39 +201,19 @@ async function handleAsyncExecution(params: AsyncExecutionParams): Promise<NextR
}
try {
const useBullMQ = shouldUseBullMQ()
const jobQueue = useBullMQ ? null : await getJobQueue()
const jobId = useBullMQ
? await enqueueWorkspaceDispatch({
id: executionId,
workspaceId,
lane: 'runtime',
queueName: 'workflow-execution',
bullmqJobName: 'workflow-execution',
bullmqPayload: createBullMQJobData(payload, {
workflowId,
userId,
correlation,
}),
metadata: {
workflowId,
userId,
correlation,
},
})
: await jobQueue!.enqueue('workflow-execution', payload, {
metadata: { workflowId, workspaceId, userId, correlation },
})
const jobQueue = await getJobQueue()
const jobId = await jobQueue.enqueue('workflow-execution', payload, {
metadata: { workflowId, workspaceId, userId, correlation },
})
asyncLogger.info('Queued async workflow execution', { jobId })
if (shouldExecuteInline() && jobQueue) {
const inlineJobQueue = jobQueue
if (shouldExecuteInline()) {
void (async () => {
try {
await inlineJobQueue.startJob(jobId)
await jobQueue.startJob(jobId)
const output = await executeWorkflowJob(payload)
await inlineJobQueue.completeJob(jobId, output)
await jobQueue.completeJob(jobId, output)
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error)
asyncLogger.error('Async workflow execution failed', {
@@ -256,7 +221,7 @@ async function handleAsyncExecution(params: AsyncExecutionParams): Promise<NextR
error: errorMessage,
})
try {
await inlineJobQueue.markJobFailed(jobId, errorMessage)
await jobQueue.markJobFailed(jobId, errorMessage)
} catch (markFailedError) {
asyncLogger.error('Failed to mark job as failed', {
jobId,
@@ -282,17 +247,6 @@ async function handleAsyncExecution(params: AsyncExecutionParams): Promise<NextR
{ status: 202 }
)
} catch (error: any) {
if (error instanceof DispatchQueueFullError) {
return NextResponse.json(
{
error: 'Service temporarily at capacity',
message: error.message,
retryAfterSeconds: 10,
},
{ status: 503, headers: { 'Retry-After': '10' } }
)
}
asyncLogger.error('Failed to queue async execution', error)
return NextResponse.json(
{ error: `Failed to queue async execution: ${error.message}` },
@@ -301,31 +255,6 @@ async function handleAsyncExecution(params: AsyncExecutionParams): Promise<NextR
}
}
async function enqueueDirectWorkflowExecution(
payload: QueuedWorkflowExecutionPayload,
priority: number,
lane: WorkspaceDispatchLane
) {
return enqueueWorkspaceDispatch({
id: payload.metadata.executionId,
workspaceId: payload.metadata.workspaceId,
lane,
queueName: 'workflow-execution',
bullmqJobName: DIRECT_WORKFLOW_JOB_NAME,
bullmqPayload: createBullMQJobData(payload, {
workflowId: payload.metadata.workflowId,
userId: payload.metadata.userId,
correlation: payload.metadata.correlation,
}),
metadata: {
workflowId: payload.metadata.workflowId,
userId: payload.metadata.userId,
correlation: payload.metadata.correlation,
},
priority,
})
}
/**
* POST /api/workflows/[id]/execute
*
@@ -793,92 +722,6 @@ async function handleExecutePost(
const executionVariables = cachedWorkflowData?.variables ?? workflow.variables ?? {}
if (shouldUseBullMQ() && !INLINE_TRIGGER_TYPES.has(triggerType)) {
try {
const dispatchJobId = await enqueueDirectWorkflowExecution(
{
workflow,
metadata,
input: processedInput,
variables: executionVariables,
selectedOutputs,
includeFileBase64,
base64MaxBytes,
stopAfterBlockId,
timeoutMs: preprocessResult.executionTimeout?.sync,
runFromBlock: resolvedRunFromBlock,
},
5,
'interactive'
)
const resultRecord = await waitForDispatchJob(
dispatchJobId,
(preprocessResult.executionTimeout?.sync ?? 300000) + 30000
)
if (resultRecord.status === 'failed') {
return NextResponse.json(
{
success: false,
executionId,
error: resultRecord.error ?? 'Workflow execution failed',
},
{ status: 500 }
)
}
const result = resultRecord.output as QueuedWorkflowExecutionResult
const resultForResponseBlock = {
success: result.success,
logs: result.logs,
output: result.output,
}
if (
auth.authType !== AuthType.INTERNAL_JWT &&
workflowHasResponseBlock(resultForResponseBlock)
) {
return createHttpResponseFromBlock(resultForResponseBlock)
}
return NextResponse.json(
{
success: result.success,
executionId,
output: result.output,
error: result.error,
metadata: result.metadata,
},
{ status: result.statusCode ?? 200 }
)
} catch (error: unknown) {
if (error instanceof DispatchQueueFullError) {
return NextResponse.json(
{
error: 'Service temporarily at capacity',
message: error.message,
retryAfterSeconds: 10,
},
{ status: 503, headers: { 'Retry-After': '10' } }
)
}
const errorMessage = error instanceof Error ? error.message : 'Unknown error'
reqLogger.error(`Queued non-SSE execution failed: ${errorMessage}`)
return NextResponse.json(
{
success: false,
error: errorMessage,
},
{ status: 500 }
)
}
}
const timeoutController = createTimeoutAbortController(
preprocessResult.executionTimeout?.sync
)
@@ -993,53 +836,6 @@ async function handleExecutePost(
}
if (shouldUseDraftState) {
const shouldDispatchViaQueue = shouldUseBullMQ() && !INLINE_TRIGGER_TYPES.has(triggerType)
if (shouldDispatchViaQueue) {
const metadata: ExecutionMetadata = {
requestId,
executionId,
workflowId,
workspaceId,
userId: actorUserId,
sessionUserId: isClientSession ? userId : undefined,
workflowUserId: workflow.userId,
triggerType,
useDraftState: shouldUseDraftState,
startTime: new Date().toISOString(),
isClientSession,
enforceCredentialAccess: useAuthenticatedUserAsActor,
workflowStateOverride: effectiveWorkflowStateOverride,
callChain,
}
const executionVariables = cachedWorkflowData?.variables ?? workflow.variables ?? {}
await enqueueDirectWorkflowExecution(
{
workflow,
metadata,
input: processedInput,
variables: executionVariables,
selectedOutputs,
includeFileBase64,
base64MaxBytes,
stopAfterBlockId,
timeoutMs: preprocessResult.executionTimeout?.sync,
runFromBlock: resolvedRunFromBlock,
streamEvents: true,
},
1,
'interactive'
)
return new NextResponse(createBufferedExecutionStream(executionId), {
headers: {
...SSE_HEADERS,
'X-Execution-Id': executionId,
},
})
}
reqLogger.info('Using SSE console log streaming (manual execution)')
} else {
reqLogger.info('Using streaming API response')
@@ -1505,17 +1301,6 @@ async function handleExecutePost(
},
})
} catch (error: any) {
if (error instanceof DispatchQueueFullError) {
return NextResponse.json(
{
error: 'Service temporarily at capacity',
message: error.message,
retryAfterSeconds: 10,
},
{ status: 503, headers: { 'Retry-After': '10' } }
)
}
reqLogger.error('Failed to start workflow execution:', error)
return NextResponse.json(
{ error: error.message || 'Failed to start workflow execution' },

View File

@@ -1,7 +1,7 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { generatePptxFromCode } from '@/lib/execution/pptx-vm'
import { generatePptxFromCode } from '@/lib/execution/doc-vm'
import { verifyWorkspaceMembership } from '@/app/api/workflows/utils'
export const dynamic = 'force-dynamic'

View File

@@ -44,7 +44,7 @@ const TEXT_EDITABLE_EXTENSIONS = new Set([
'svg',
])
const IFRAME_PREVIEWABLE_MIME_TYPES = new Set(['application/pdf'])
const IFRAME_PREVIEWABLE_MIME_TYPES = new Set(['application/pdf', 'text/x-pdflibjs'])
const IFRAME_PREVIEWABLE_EXTENSIONS = new Set(['pdf'])
const IMAGE_PREVIEWABLE_MIME_TYPES = new Set(['image/png', 'image/jpeg', 'image/gif', 'image/webp'])
@@ -52,26 +52,36 @@ const IMAGE_PREVIEWABLE_EXTENSIONS = new Set(['png', 'jpg', 'jpeg', 'gif', 'webp
const PPTX_PREVIEWABLE_MIME_TYPES = new Set([
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'text/x-pptxgenjs',
])
const PPTX_PREVIEWABLE_EXTENSIONS = new Set(['pptx'])
const DOCX_PREVIEWABLE_MIME_TYPES = new Set([
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'text/x-docxjs',
])
const DOCX_PREVIEWABLE_EXTENSIONS = new Set(['docx'])
type FileCategory =
| 'text-editable'
| 'iframe-previewable'
| 'image-previewable'
| 'pptx-previewable'
| 'docx-previewable'
| 'unsupported'
function resolveFileCategory(mimeType: string | null, filename: string): FileCategory {
if (mimeType && TEXT_EDITABLE_MIME_TYPES.has(mimeType)) return 'text-editable'
if (mimeType && IFRAME_PREVIEWABLE_MIME_TYPES.has(mimeType)) return 'iframe-previewable'
if (mimeType && IMAGE_PREVIEWABLE_MIME_TYPES.has(mimeType)) return 'image-previewable'
if (mimeType && DOCX_PREVIEWABLE_MIME_TYPES.has(mimeType)) return 'docx-previewable'
if (mimeType && PPTX_PREVIEWABLE_MIME_TYPES.has(mimeType)) return 'pptx-previewable'
const ext = getFileExtension(filename)
if (TEXT_EDITABLE_EXTENSIONS.has(ext)) return 'text-editable'
if (IFRAME_PREVIEWABLE_EXTENSIONS.has(ext)) return 'iframe-previewable'
if (IMAGE_PREVIEWABLE_EXTENSIONS.has(ext)) return 'image-previewable'
if (DOCX_PREVIEWABLE_EXTENSIONS.has(ext)) return 'docx-previewable'
if (PPTX_PREVIEWABLE_EXTENSIONS.has(ext)) return 'pptx-previewable'
return 'unsupported'
@@ -131,13 +141,17 @@ export function FileViewer({
}
if (category === 'iframe-previewable') {
return <IframePreview file={file} />
return <IframePreview file={file} workspaceId={workspaceId} />
}
if (category === 'image-previewable') {
return <ImagePreview file={file} />
}
if (category === 'docx-previewable') {
return <DocxPreview file={file} workspaceId={workspaceId} />
}
if (category === 'pptx-previewable') {
return <PptxPreview file={file} workspaceId={workspaceId} streamingContent={streamingContent} />
}
@@ -181,7 +195,14 @@ function TextEditor({
isLoading,
error,
dataUpdatedAt,
} = useWorkspaceFileContent(workspaceId, file.id, file.key, file.type === 'text/x-pptxgenjs')
} = useWorkspaceFileContent(
workspaceId,
file.id,
file.key,
file.type === 'text/x-pptxgenjs' ||
file.type === 'text/x-docxjs' ||
file.type === 'text/x-pdflibjs'
)
const updateContent = useUpdateWorkspaceFileContent()
const updateContentRef = useRef(updateContent)
@@ -416,13 +437,36 @@ function TextEditor({
)
}
const IframePreview = memo(function IframePreview({ file }: { file: WorkspaceFileRecord }) {
const serveUrl = `/api/files/serve/${encodeURIComponent(file.key)}?context=workspace`
const IframePreview = memo(function IframePreview({
file,
workspaceId,
}: {
file: WorkspaceFileRecord
workspaceId: string
}) {
const { data: fileData, isLoading } = useWorkspaceFileBinary(workspaceId, file.id, file.key)
const [blobUrl, setBlobUrl] = useState<string | null>(null)
useEffect(() => {
if (!fileData) return
const blob = new Blob([fileData], { type: 'application/pdf' })
const url = URL.createObjectURL(blob)
setBlobUrl(url)
return () => URL.revokeObjectURL(url)
}, [fileData])
if (isLoading || !blobUrl) {
return (
<div className='flex h-full items-center justify-center'>
<Skeleton className='h-[200px] w-[80%]' />
</div>
)
}
return (
<div className='flex flex-1 overflow-hidden'>
<iframe
src={serveUrl}
src={blobUrl}
className='h-full w-full border-0'
title={file.name}
onError={() => {
@@ -551,6 +595,71 @@ const ImagePreview = memo(function ImagePreview({ file }: { file: WorkspaceFileR
)
})
const DocxPreview = memo(function DocxPreview({
file,
workspaceId,
}: {
file: WorkspaceFileRecord
workspaceId: string
}) {
const containerRef = useRef<HTMLDivElement>(null)
const {
data: fileData,
isLoading,
error: fetchError,
} = useWorkspaceFileBinary(workspaceId, file.id, file.key)
const [renderError, setRenderError] = useState<string | null>(null)
useEffect(() => {
if (!containerRef.current || !fileData) return
let cancelled = false
async function render() {
try {
const { renderAsync } = await import('docx-preview')
if (cancelled || !containerRef.current) return
containerRef.current.innerHTML = ''
await renderAsync(fileData, containerRef.current, undefined, {
inWrapper: true,
ignoreWidth: false,
ignoreHeight: false,
})
} catch (err) {
if (!cancelled) {
const msg = err instanceof Error ? err.message : 'Failed to render document'
logger.error('DOCX render failed', { error: msg })
setRenderError(msg)
}
}
}
render()
return () => {
cancelled = true
}
}, [fileData])
if (isLoading) {
return (
<div className='flex h-full items-center justify-center'>
<Skeleton className='h-[200px] w-[80%]' />
</div>
)
}
if (fetchError || renderError) {
return (
<div className='flex h-full flex-col items-center justify-center gap-2 text-[var(--text-muted)]'>
<p className='text-[13px]'>Failed to preview document</p>
<p className='text-[11px]'>{renderError || 'Could not load file'}</p>
</div>
)
}
return <div ref={containerRef} className='h-full w-full overflow-auto bg-white' />
})
const pptxSlideCache = new Map<string, string[]>()
function pptxCacheKey(fileId: string, dataUpdatedAt: number, byteLength: number): string {

View File

@@ -1,21 +1,17 @@
'use client'
import type { AgentGroupItem } from '@/app/workspace/[workspaceId]/home/components/message-content/components'
import {
AgentGroup,
ChatContent,
CircleStop,
Options,
PendingTagIndicator,
} from '@/app/workspace/[workspaceId]/home/components/message-content/components'
import type {
ContentBlock,
MothershipToolName,
OptionItem,
SubagentName,
ToolCallData,
} from '@/app/workspace/[workspaceId]/home/types'
import { SUBAGENT_LABELS, TOOL_UI_METADATA } from '@/app/workspace/[workspaceId]/home/types'
FileWrite,
Read as ReadTool,
ToolSearchToolRegex,
WorkspaceFile,
} from '@/lib/copilot/generated/tool-catalog-v1'
import { resolveToolDisplay } from '@/lib/copilot/tools/client/store-utils'
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
import type { ContentBlock, OptionItem, ToolCallData } from '../../types'
import { SUBAGENT_LABELS, TOOL_UI_METADATA } from '../../types'
import type { AgentGroupItem } from './components'
import { AgentGroup, ChatContent, CircleStop, Options, PendingTagIndicator } from './components'
interface TextSegment {
type: 'text'
@@ -52,11 +48,19 @@ const SUBAGENT_KEYS = new Set(Object.keys(SUBAGENT_LABELS))
* group is absorbed so it doesn't render as a separate Mothership entry.
*/
const SUBAGENT_DISPATCH_TOOLS: Record<string, string> = {
file_write: 'workspace_file',
[FileWrite.id]: WorkspaceFile.id,
}
function formatToolName(name: string): string {
return name
.replace(/_v\d+$/, '')
.split('_')
.map((w) => w.charAt(0).toUpperCase() + w.slice(1))
.join(' ')
}
function resolveAgentLabel(key: string): string {
return SUBAGENT_LABELS[key as SubagentName] ?? key
return SUBAGENT_LABELS[key] ?? formatToolName(key)
}
function isToolDone(status: ToolCallData['status']): boolean {
@@ -67,12 +71,41 @@ function isDelegatingTool(tc: NonNullable<ContentBlock['toolCall']>): boolean {
return tc.status === 'executing'
}
function mapToolStatusToClientState(
status: ContentBlock['toolCall'] extends { status: infer T } ? T : string
) {
switch (status) {
case 'success':
return ClientToolCallState.success
case 'error':
return ClientToolCallState.error
case 'cancelled':
return ClientToolCallState.cancelled
default:
return ClientToolCallState.executing
}
}
function getOverrideDisplayTitle(tc: NonNullable<ContentBlock['toolCall']>): string | undefined {
if (tc.name === ReadTool.id || tc.name.endsWith('_respond')) {
return resolveToolDisplay(tc.name, mapToolStatusToClientState(tc.status), tc.id, tc.params)
?.text
}
return undefined
}
function toToolData(tc: NonNullable<ContentBlock['toolCall']>): ToolCallData {
const overrideDisplayTitle = getOverrideDisplayTitle(tc)
const displayTitle =
overrideDisplayTitle ||
tc.displayTitle ||
TOOL_UI_METADATA[tc.name as keyof typeof TOOL_UI_METADATA]?.title ||
formatToolName(tc.name)
return {
id: tc.id,
toolName: tc.name,
displayTitle:
tc.displayTitle ?? TOOL_UI_METADATA[tc.name as MothershipToolName]?.title ?? tc.name,
displayTitle,
status: tc.status,
params: tc.params,
result: tc.result,
@@ -172,7 +205,7 @@ function parseBlocks(blocks: ContentBlock[]): MessageSegment[] {
if (block.type === 'tool_call') {
if (!block.toolCall) continue
const tc = block.toolCall
if (tc.name === 'tool_search_tool_regex') continue
if (tc.name === ToolSearchToolRegex.id) continue
const isDispatch = SUBAGENT_KEYS.has(tc.name) && !tc.calledBy
if (isDispatch) {
@@ -312,7 +345,7 @@ export function MessageContent({
if (segments.length === 0) {
if (isStreaming) {
return (
<div className='space-y-2.5'>
<div className='space-y-[10px]'>
<PendingTagIndicator />
</div>
)
@@ -341,7 +374,7 @@ export function MessageContent({
)?.id
return (
<div className='space-y-2.5'>
<div className='space-y-[10px]'>
{segments.map((segment, i) => {
switch (segment.type) {
case 'text':
@@ -384,9 +417,11 @@ export function MessageContent({
)
case 'stopped':
return (
<div key={`stopped-${i}`} className='flex items-center gap-2'>
<div key={`stopped-${i}`} className='flex items-center gap-[8px]'>
<CircleStop className='h-[16px] w-[16px] flex-shrink-0 text-[var(--text-icon)]' />
<span className='font-base text-[var(--text-body)] text-sm'>Stopped by user</span>
<span className='font-base text-[14px] text-[var(--text-body)]'>
Stopped by user
</span>
</div>
)
}

View File

@@ -23,96 +23,33 @@ import {
} from '@/components/emcn'
import { Table as TableIcon } from '@/components/emcn/icons'
import { AgentIcon } from '@/components/icons'
import type { MothershipToolName, SubagentName } from '@/app/workspace/[workspaceId]/home/types'
export type IconComponent = ComponentType<SVGProps<SVGSVGElement>>
const TOOL_ICONS: Record<MothershipToolName | SubagentName | 'mothership', IconComponent> = {
const TOOL_ICONS: Record<string, IconComponent> = {
mothership: Blimp,
// Workspace
glob: FolderCode,
grep: Search,
read: File,
// Search
search_online: Search,
scrape_page: Search,
get_page_contents: Search,
search_library_docs: Library,
crawl_website: Search,
// Execution
function_execute: TerminalWindow,
superagent: Blimp,
run_workflow: PlayOutline,
run_block: PlayOutline,
run_from_block: PlayOutline,
run_workflow_until_block: PlayOutline,
complete_job: PlayOutline,
get_execution_summary: ClipboardList,
get_job_logs: ClipboardList,
get_workflow_logs: ClipboardList,
get_workflow_data: Layout,
get_block_outputs: ClipboardList,
get_block_upstream_references: ClipboardList,
get_deployed_workflow_state: Rocket,
check_deployment_status: Rocket,
// Workflows & folders
create_workflow: Layout,
delete_workflow: Layout,
edit_workflow: Pencil,
rename_workflow: Pencil,
move_workflow: Layout,
create_folder: FolderCode,
delete_folder: FolderCode,
move_folder: FolderCode,
list_folders: FolderCode,
list_user_workspaces: Layout,
revert_to_version: Rocket,
get_deployment_version: Rocket,
open_resource: Eye,
// Files
workspace_file: File,
download_to_workspace_file: File,
materialize_file: File,
generate_image: File,
generate_visualization: File,
// Tables & knowledge
user_table: TableIcon,
knowledge_base: Database,
// Jobs
create_job: Calendar,
manage_job: Calendar,
update_job_history: Calendar,
job_respond: Calendar,
// Management
manage_mcp_tool: Settings,
manage_skill: Asterisk,
manage_credential: Integration,
manage_custom_tool: Wrench,
update_workspace_mcp_server: Settings,
delete_workspace_mcp_server: Settings,
create_workspace_mcp_server: Settings,
list_workspace_mcp_servers: Settings,
oauth_get_auth_link: Integration,
oauth_request_access: Integration,
set_environment_variables: Settings,
set_global_workflow_variables: Settings,
get_platform_actions: Settings,
search_documentation: Library,
search_patterns: Search,
deploy_api: Rocket,
deploy_chat: Rocket,
deploy_mcp: Rocket,
redeploy: Rocket,
generate_api_key: Asterisk,
user_memory: Database,
context_write: Pencil,
context_compaction: Asterisk,
// Subagents
function_execute: TerminalWindow,
superagent: Blimp,
user_table: TableIcon,
workspace_file: File,
create_workflow: Layout,
edit_workflow: Pencil,
build: Hammer,
run: PlayOutline,
deploy: Rocket,
auth: Integration,
knowledge: Database,
knowledge_base: Database,
table: TableIcon,
job: Calendar,
agent: AgentIcon,
@@ -122,6 +59,8 @@ const TOOL_ICONS: Record<MothershipToolName | SubagentName | 'mothership', IconC
debug: Bug,
edit: Pencil,
fast_edit: Pencil,
context_compaction: Asterisk,
open_resource: Eye,
file_write: File,
}

View File

@@ -10,7 +10,7 @@ import {
cancelRunToolExecution,
markRunToolManuallyStopped,
reportManualRunToolStop,
} from '@/lib/copilot/client-sse/run-tool-execution'
} from '@/lib/copilot/tools/client/run-tool-execution'
import {
downloadWorkspaceFile,
getFileExtension,
@@ -83,7 +83,12 @@ export const ResourceContent = memo(function ResourceContent({
}, [streamingFile])
const syntheticFile = useMemo(() => {
const ext = getFileExtension(streamFileName)
const type = ext === 'pptx' ? 'text/x-pptxgenjs' : getMimeTypeFromExtension(ext)
const SOURCE_MIME_MAP: Record<string, string> = {
pptx: 'text/x-pptxgenjs',
docx: 'text/x-docxjs',
pdf: 'text/x-pdflibjs',
}
const type = SOURCE_MIME_MAP[ext] ?? getMimeTypeFromExtension(ext)
return {
id: 'streaming-file',
workspaceId,

View File

@@ -9,7 +9,7 @@ import {
} from 'react'
import { Button, Tooltip } from '@/components/emcn'
import { Columns3, Eye, PanelLeft, Pencil } from '@/components/emcn/icons'
import { isEphemeralResource } from '@/lib/copilot/resource-extraction'
import { isEphemeralResource } from '@/lib/copilot/resources/types'
import { cn } from '@/lib/core/utils/cn'
import type { PreviewMode } from '@/app/workspace/[workspaceId]/files/components/file-viewer'
import { AddResourceDropdown } from '@/app/workspace/[workspaceId]/home/components/mothership-view/components/add-resource-dropdown'

View File

@@ -369,7 +369,7 @@ export function Home({ chatId }: HomeProps = {}) {
onCollapse={collapseResource}
isCollapsed={isResourceCollapsed}
streamingFile={streamingFile}
genericResourceData={genericResourceData}
genericResourceData={genericResourceData ?? undefined}
className={skipResourceTransition ? '!transition-none' : undefined}
/>

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,39 @@
import type { MothershipResourceType } from '@/lib/copilot/resource-types'
import {
Agent,
Auth,
Build,
CreateWorkflow,
Debug,
Deploy,
EditWorkflow,
FunctionExecute,
GetPageContents,
Glob,
Grep,
Job,
Knowledge,
KnowledgeBase,
ManageMcpTool,
ManageSkill,
OpenResource,
Read as ReadTool,
Research,
Run,
ScrapePage,
SearchLibraryDocs,
SearchOnline,
Superagent,
Table,
UserMemory,
UserTable,
WorkspaceFile,
} from '@/lib/copilot/generated/tool-catalog-v1'
import type { ChatContext } from '@/stores/panel'
export type {
MothershipResource,
MothershipResourceType,
} from '@/lib/copilot/resource-types'
} from '@/lib/copilot/resources/types'
export interface FileAttachmentForApi {
id: string
@@ -21,169 +50,34 @@ export interface QueuedMessage {
contexts?: ChatContext[]
}
/**
* SSE event types emitted by the Go orchestrator backend.
*
* @example
* ```json
* { "type": "content", "data": "Hello world" }
* { "type": "tool_call", "state": "executing", "toolCallId": "toolu_...", "toolName": "glob", "ui": { "title": "..." } }
* { "type": "subagent_start", "subagent": "build" }
* ```
*/
export type SSEEventType =
| 'chat_id'
| 'request_id'
| 'title_updated'
| 'content'
| 'reasoning' // openai reasoning - render as thinking text
| 'tool_call' // tool call name
| 'tool_call_delta' // chunk of tool call
| 'tool_generating' // start a tool call
| 'tool_result' // tool call result
| 'tool_error' // tool call error
| 'resource_added' // add a resource to the chat
| 'resource_deleted' // delete a resource from the chat
| 'subagent_start' // start a subagent
| 'subagent_end' // end a subagent
| 'structured_result' // structured result from a tool call
| 'subagent_result' // result from a subagent
| 'done' // end of the chat
| 'context_compaction_start' // context compaction started
| 'context_compaction' // conversation context was compacted
| 'error' // error in the chat
| 'start' // start of the chat
/**
* All tool names observed in the mothership SSE stream, grouped by phase.
*
* @example
* ```json
* { "type": "tool_generating", "toolName": "glob" }
* { "type": "tool_call", "toolName": "function_execute", "ui": { "title": "Running code", "icon": "code" } }
* { "type": "tool", "phase": "call", "toolName": "glob" }
* { "type": "tool", "phase": "call", "toolName": "function_execute", "ui": { "title": "Running code", "icon": "code" } }
* ```
* Stream `type` is `MothershipStreamV1EventType.tool` (`mothership-stream-v1`) with `phase: 'call'`.
*/
export type MothershipToolName =
| 'glob'
| 'grep'
| 'read'
| 'search_online'
| 'scrape_page'
| 'get_page_contents'
| 'search_library_docs'
| 'manage_mcp_tool'
| 'manage_skill'
| 'manage_credential'
| 'manage_custom_tool'
| 'manage_job'
| 'user_memory'
| 'function_execute'
| 'superagent'
| 'user_table'
| 'workspace_file'
| 'create_workflow'
| 'delete_workflow'
| 'edit_workflow'
| 'rename_workflow'
| 'move_workflow'
| 'run_workflow'
| 'run_block'
| 'run_from_block'
| 'run_workflow_until_block'
| 'create_folder'
| 'delete_folder'
| 'move_folder'
| 'list_folders'
| 'list_user_workspaces'
| 'create_job'
| 'complete_job'
| 'update_job_history'
| 'job_respond'
| 'download_to_workspace_file'
| 'materialize_file'
| 'context_write'
| 'generate_image'
| 'generate_visualization'
| 'crawl_website'
| 'get_execution_summary'
| 'get_job_logs'
| 'get_deployment_version'
| 'revert_to_version'
| 'check_deployment_status'
| 'get_deployed_workflow_state'
| 'get_workflow_data'
| 'get_workflow_logs'
| 'get_block_outputs'
| 'get_block_upstream_references'
| 'set_global_workflow_variables'
| 'set_environment_variables'
| 'get_platform_actions'
| 'search_documentation'
| 'search_patterns'
| 'update_workspace_mcp_server'
| 'delete_workspace_mcp_server'
| 'create_workspace_mcp_server'
| 'list_workspace_mcp_servers'
| 'deploy_api'
| 'deploy_chat'
| 'deploy_mcp'
| 'redeploy'
| 'generate_api_key'
| 'oauth_get_auth_link'
| 'oauth_request_access'
| 'build'
| 'run'
| 'deploy'
| 'auth'
| 'knowledge'
| 'knowledge_base'
| 'table'
| 'job'
| 'agent'
| 'custom_tool'
| 'research'
| 'plan'
| 'debug'
| 'edit'
| 'fast_edit'
| 'open_resource'
| 'context_compaction'
/**
* Subagent identifiers dispatched via `subagent_start` SSE events.
*
* @example
* ```json
* { "type": "subagent_start", "subagent": "build" }
* ```
*/
export type SubagentName =
| 'build'
| 'deploy'
| 'auth'
| 'research'
| 'knowledge'
| 'table'
| 'custom_tool'
| 'superagent'
| 'plan'
| 'debug'
| 'edit'
| 'fast_edit'
| 'run'
| 'agent'
| 'job'
| 'file_write'
export const ToolPhase = {
workspace: 'workspace',
search: 'search',
management: 'management',
execution: 'execution',
resource: 'resource',
subagent: 'subagent',
} as const
export type ToolPhase = (typeof ToolPhase)[keyof typeof ToolPhase]
export type ToolPhase =
| 'workspace'
| 'search'
| 'management'
| 'execution'
| 'resource'
| 'subagent'
export type ToolCallStatus = 'executing' | 'success' | 'error' | 'cancelled'
export const ToolCallStatus = {
executing: 'executing',
success: 'success',
error: 'error',
cancelled: 'cancelled',
} as const
export type ToolCallStatus = (typeof ToolCallStatus)[keyof typeof ToolCallStatus]
export interface ToolCallResult {
success: boolean
@@ -191,7 +85,6 @@ export interface ToolCallResult {
error?: string
}
/** A single tool call result entry in the generic Results resource tab. */
export interface GenericResourceEntry {
toolCallId: string
toolName: string
@@ -202,7 +95,6 @@ export interface GenericResourceEntry {
result?: ToolCallResult
}
/** Accumulated feed of tool call results shown in the generic Results tab. */
export interface GenericResourceData {
entries: GenericResourceEntry[]
}
@@ -225,7 +117,7 @@ export interface ToolCallInfo {
phaseLabel?: string
params?: Record<string, unknown>
calledBy?: string
result?: { success: boolean; output?: unknown; error?: string }
result?: ToolCallResult
streamingArgs?: string
}
@@ -234,14 +126,16 @@ export interface OptionItem {
label: string
}
export type ContentBlockType =
| 'text'
| 'tool_call'
| 'subagent'
| 'subagent_end'
| 'subagent_text'
| 'options'
| 'stopped'
export const ContentBlockType = {
text: 'text',
tool_call: 'tool_call',
subagent: 'subagent',
subagent_end: 'subagent_end',
subagent_text: 'subagent_text',
options: 'options',
stopped: 'stopped',
} as const
export type ContentBlockType = (typeof ContentBlockType)[keyof typeof ContentBlockType]
export interface ContentBlock {
type: ContentBlockType
@@ -278,7 +172,7 @@ export interface ChatMessage {
requestId?: string
}
export const SUBAGENT_LABELS: Record<SubagentName, string> = {
export const SUBAGENT_LABELS: Record<string, string> = {
build: 'Build agent',
deploy: 'Deploy agent',
auth: 'Integration agent',
@@ -304,206 +198,130 @@ export interface ToolUIMetadata {
}
/**
* Primary UI metadata for tools observed in the SSE stream.
* Maps tool IDs to human-readable display names shown in the chat.
* This is the single source of truth — server-sent `ui.title` values are not used.
* Default UI metadata for tools observed in the SSE stream.
* The backend may send `ui` on some `MothershipStreamV1EventType.tool` payloads (`phase: 'call'`);
* this map provides fallback metadata when `ui` is absent.
*/
export const TOOL_UI_METADATA: Record<MothershipToolName, ToolUIMetadata> = {
// Workspace
glob: { title: 'Searching workspace', phaseLabel: 'Workspace', phase: 'workspace' },
grep: { title: 'Searching workspace', phaseLabel: 'Workspace', phase: 'workspace' },
read: { title: 'Reading file', phaseLabel: 'Workspace', phase: 'workspace' },
// Search
search_online: { title: 'Searching online', phaseLabel: 'Search', phase: 'search' },
scrape_page: { title: 'Reading webpage', phaseLabel: 'Search', phase: 'search' },
get_page_contents: { title: 'Reading page', phaseLabel: 'Search', phase: 'search' },
search_library_docs: { title: 'Searching docs', phaseLabel: 'Search', phase: 'search' },
crawl_website: { title: 'Browsing website', phaseLabel: 'Search', phase: 'search' },
// Execution
function_execute: { title: 'Running code', phaseLabel: 'Code', phase: 'execution' },
superagent: { title: 'Taking action', phaseLabel: 'Action', phase: 'execution' },
run_workflow: { title: 'Running workflow', phaseLabel: 'Execution', phase: 'execution' },
run_block: { title: 'Running block', phaseLabel: 'Execution', phase: 'execution' },
run_from_block: { title: 'Running from block', phaseLabel: 'Execution', phase: 'execution' },
run_workflow_until_block: {
title: 'Running partial workflow',
phaseLabel: 'Execution',
export const TOOL_UI_METADATA: Record<string, ToolUIMetadata> = {
[Glob.id]: {
title: 'Searching files',
phaseLabel: 'Workspace',
phase: 'workspace',
},
[Grep.id]: {
title: 'Searching code',
phaseLabel: 'Workspace',
phase: 'workspace',
},
[ReadTool.id]: { title: 'Reading file', phaseLabel: 'Workspace', phase: 'workspace' },
[SearchOnline.id]: {
title: 'Searching online',
phaseLabel: 'Search',
phase: 'search',
},
[ScrapePage.id]: {
title: 'Scraping page',
phaseLabel: 'Search',
phase: 'search',
},
[GetPageContents.id]: {
title: 'Getting page contents',
phaseLabel: 'Search',
phase: 'search',
},
[SearchLibraryDocs.id]: {
title: 'Searching library docs',
phaseLabel: 'Search',
phase: 'search',
},
[ManageMcpTool.id]: {
title: 'Managing MCP tool',
phaseLabel: 'Management',
phase: 'management',
},
[ManageSkill.id]: {
title: 'Managing skill',
phaseLabel: 'Management',
phase: 'management',
},
[UserMemory.id]: {
title: 'Accessing memory',
phaseLabel: 'Management',
phase: 'management',
},
[FunctionExecute.id]: {
title: 'Running code',
phaseLabel: 'Code',
phase: 'execution',
},
complete_job: { title: 'Completing job', phaseLabel: 'Execution', phase: 'execution' },
get_execution_summary: { title: 'Checking results', phaseLabel: 'Execution', phase: 'execution' },
get_job_logs: { title: 'Checking logs', phaseLabel: 'Execution', phase: 'execution' },
get_workflow_logs: { title: 'Checking logs', phaseLabel: 'Execution', phase: 'execution' },
get_workflow_data: { title: 'Loading workflow', phaseLabel: 'Execution', phase: 'execution' },
get_block_outputs: {
title: 'Checking block outputs',
phaseLabel: 'Execution',
[Superagent.id]: {
title: 'Executing action',
phaseLabel: 'Action',
phase: 'execution',
},
get_block_upstream_references: {
title: 'Checking references',
phaseLabel: 'Execution',
phase: 'execution',
},
get_deployed_workflow_state: {
title: 'Checking deployment',
phaseLabel: 'Execution',
phase: 'execution',
},
check_deployment_status: {
title: 'Checking deployment',
phaseLabel: 'Execution',
phase: 'execution',
},
// Workflows & folders
create_workflow: { title: 'Creating workflow', phaseLabel: 'Resource', phase: 'resource' },
delete_workflow: { title: 'Deleting workflow', phaseLabel: 'Resource', phase: 'resource' },
edit_workflow: { title: 'Editing workflow', phaseLabel: 'Resource', phase: 'resource' },
rename_workflow: { title: 'Renaming workflow', phaseLabel: 'Resource', phase: 'resource' },
move_workflow: { title: 'Moving workflow', phaseLabel: 'Resource', phase: 'resource' },
create_folder: { title: 'Creating folder', phaseLabel: 'Resource', phase: 'resource' },
delete_folder: { title: 'Deleting folder', phaseLabel: 'Resource', phase: 'resource' },
move_folder: { title: 'Moving folder', phaseLabel: 'Resource', phase: 'resource' },
list_folders: { title: 'Browsing folders', phaseLabel: 'Resource', phase: 'resource' },
list_user_workspaces: { title: 'Browsing workspaces', phaseLabel: 'Resource', phase: 'resource' },
revert_to_version: { title: 'Restoring version', phaseLabel: 'Resource', phase: 'resource' },
get_deployment_version: {
title: 'Checking deployment',
[UserTable.id]: {
title: 'Managing table',
phaseLabel: 'Resource',
phase: 'resource',
},
open_resource: { title: 'Opening resource', phaseLabel: 'Resource', phase: 'resource' },
// Files
workspace_file: { title: 'Working with files', phaseLabel: 'Resource', phase: 'resource' },
download_to_workspace_file: {
title: 'Downloading file',
[WorkspaceFile.id]: {
title: 'Managing file',
phaseLabel: 'Resource',
phase: 'resource',
},
materialize_file: { title: 'Saving file', phaseLabel: 'Resource', phase: 'resource' },
generate_image: { title: 'Generating image', phaseLabel: 'Resource', phase: 'resource' },
generate_visualization: {
title: 'Generating visualization',
[CreateWorkflow.id]: {
title: 'Creating workflow',
phaseLabel: 'Resource',
phase: 'resource',
},
// Tables & knowledge
user_table: { title: 'Editing table', phaseLabel: 'Resource', phase: 'resource' },
knowledge_base: { title: 'Updating knowledge base', phaseLabel: 'Resource', phase: 'resource' },
// Jobs
create_job: { title: 'Creating job', phaseLabel: 'Resource', phase: 'resource' },
manage_job: { title: 'Updating job', phaseLabel: 'Management', phase: 'management' },
update_job_history: { title: 'Updating job', phaseLabel: 'Management', phase: 'management' },
job_respond: { title: 'Explaining job scheduled', phaseLabel: 'Execution', phase: 'execution' },
// Management
manage_mcp_tool: { title: 'Updating integration', phaseLabel: 'Management', phase: 'management' },
manage_skill: { title: 'Updating skill', phaseLabel: 'Management', phase: 'management' },
manage_credential: { title: 'Connecting account', phaseLabel: 'Management', phase: 'management' },
manage_custom_tool: { title: 'Updating tool', phaseLabel: 'Management', phase: 'management' },
update_workspace_mcp_server: {
title: 'Updating MCP server',
phaseLabel: 'Management',
phase: 'management',
[EditWorkflow.id]: {
title: 'Editing workflow',
phaseLabel: 'Resource',
phase: 'resource',
},
delete_workspace_mcp_server: {
title: 'Removing MCP server',
phaseLabel: 'Management',
phase: 'management',
[Build.id]: { title: 'Building', phaseLabel: 'Build', phase: 'subagent' },
[Run.id]: { title: 'Running', phaseLabel: 'Run', phase: 'subagent' },
[Deploy.id]: { title: 'Deploying', phaseLabel: 'Deploy', phase: 'subagent' },
[Auth.id]: {
title: 'Connecting credentials',
phaseLabel: 'Auth',
phase: 'subagent',
},
create_workspace_mcp_server: {
title: 'Creating MCP server',
phaseLabel: 'Management',
phase: 'management',
[Knowledge.id]: {
title: 'Managing knowledge',
phaseLabel: 'Knowledge',
phase: 'subagent',
},
list_workspace_mcp_servers: {
title: 'Browsing MCP servers',
phaseLabel: 'Management',
phase: 'management',
[KnowledgeBase.id]: {
title: 'Managing knowledge base',
phaseLabel: 'Resource',
phase: 'resource',
},
oauth_get_auth_link: {
title: 'Connecting account',
phaseLabel: 'Management',
phase: 'management',
[Table.id]: { title: 'Managing tables', phaseLabel: 'Table', phase: 'subagent' },
[Job.id]: { title: 'Managing jobs', phaseLabel: 'Job', phase: 'subagent' },
[Agent.id]: { title: 'Agent action', phaseLabel: 'Agent', phase: 'subagent' },
custom_tool: {
title: 'Creating tool',
phaseLabel: 'Tool',
phase: 'subagent',
},
oauth_request_access: {
title: 'Connecting account',
phaseLabel: 'Management',
phase: 'management',
},
set_environment_variables: {
title: 'Updating environment',
phaseLabel: 'Management',
phase: 'management',
},
set_global_workflow_variables: {
title: 'Updating variables',
phaseLabel: 'Management',
phase: 'management',
},
get_platform_actions: { title: 'Loading actions', phaseLabel: 'Management', phase: 'management' },
search_documentation: { title: 'Searching docs', phaseLabel: 'Search', phase: 'search' },
search_patterns: { title: 'Searching patterns', phaseLabel: 'Search', phase: 'search' },
deploy_api: { title: 'Deploying API', phaseLabel: 'Deploy', phase: 'management' },
deploy_chat: { title: 'Deploying chat', phaseLabel: 'Deploy', phase: 'management' },
deploy_mcp: { title: 'Deploying MCP', phaseLabel: 'Deploy', phase: 'management' },
redeploy: { title: 'Redeploying', phaseLabel: 'Deploy', phase: 'management' },
generate_api_key: { title: 'Generating API key', phaseLabel: 'Deploy', phase: 'management' },
user_memory: { title: 'Updating memory', phaseLabel: 'Management', phase: 'management' },
context_write: { title: 'Writing notes', phaseLabel: 'Management', phase: 'management' },
context_compaction: {
title: 'Optimizing context',
phaseLabel: 'Management',
phase: 'management',
},
// Subagents
build: { title: 'Building', phaseLabel: 'Build', phase: 'subagent' },
run: { title: 'Running', phaseLabel: 'Run', phase: 'subagent' },
deploy: { title: 'Deploying', phaseLabel: 'Deploy', phase: 'subagent' },
auth: { title: 'Connecting integration', phaseLabel: 'Auth', phase: 'subagent' },
knowledge: { title: 'Working with knowledge', phaseLabel: 'Knowledge', phase: 'subagent' },
table: { title: 'Working with tables', phaseLabel: 'Table', phase: 'subagent' },
job: { title: 'Working with jobs', phaseLabel: 'Job', phase: 'subagent' },
agent: { title: 'Taking action', phaseLabel: 'Agent', phase: 'subagent' },
custom_tool: { title: 'Creating tool', phaseLabel: 'Tool', phase: 'subagent' },
research: { title: 'Researching', phaseLabel: 'Research', phase: 'subagent' },
[Research.id]: { title: 'Researching', phaseLabel: 'Research', phase: 'subagent' },
plan: { title: 'Planning', phaseLabel: 'Plan', phase: 'subagent' },
debug: { title: 'Debugging', phaseLabel: 'Debug', phase: 'subagent' },
[Debug.id]: { title: 'Debugging', phaseLabel: 'Debug', phase: 'subagent' },
edit: { title: 'Editing workflow', phaseLabel: 'Edit', phase: 'subagent' },
fast_edit: { title: 'Editing workflow', phaseLabel: 'Edit', phase: 'subagent' },
}
export interface SSEPayloadUI {
hidden?: boolean
title?: string
phaseLabel?: string
icon?: string
internal?: boolean
clientExecutable?: boolean
}
export interface SSEPayloadData {
name?: string
ui?: SSEPayloadUI
id?: string
agent?: string
partial?: boolean
arguments?: Record<string, unknown>
input?: Record<string, unknown>
result?: unknown
error?: string
}
export interface SSEPayload {
type: SSEEventType | (string & {})
chatId?: string
data?: string | SSEPayloadData
content?: string
toolCallId?: string
toolName?: string
ui?: SSEPayloadUI
success?: boolean
result?: unknown
error?: string
subagent?: string
resource?: { type: MothershipResourceType; id: string; title: string }
fast_edit: {
title: 'Editing workflow',
phaseLabel: 'Edit',
phase: 'subagent',
},
[OpenResource.id]: {
title: 'Opening resource',
phaseLabel: 'Resource',
phase: 'resource',
},
context_compaction: {
title: 'Compacted context',
phaseLabel: 'Context',
phase: 'management',
},
}

View File

@@ -21,7 +21,7 @@ import { useWorkflowStore } from '@/stores/workflows/workflow/store'
/**
* Constants for ComboBox component behavior
*/
const DEFAULT_MODEL = 'claude-sonnet-4-5'
const DEFAULT_MODEL = 'claude-sonnet-4-6'
const ZOOM_FACTOR_BASE = 0.96
const MIN_ZOOM = 0.1
const MAX_ZOOM = 1
@@ -234,7 +234,7 @@ export const ComboBox = memo(function ComboBox({
/**
* Determines the default option value to use.
* Priority: explicit defaultValue > claude-sonnet-4-5 for model field > first option
* Priority: explicit defaultValue > claude-sonnet-4-6 for model field > first option
*/
const defaultOptionValue = useMemo(() => {
if (defaultValue !== undefined) {
@@ -246,11 +246,13 @@ export const ComboBox = memo(function ComboBox({
// Default not available (e.g. provider disabled) — fall through to other fallbacks
}
// For model field, default to claude-sonnet-4-5 if available
// For model field, default to claude-sonnet-4-6 if available
if (subBlockId === 'model') {
const claudeSonnet45 = evaluatedOptions.find((opt) => getOptionValue(opt) === DEFAULT_MODEL)
if (claudeSonnet45) {
return getOptionValue(claudeSonnet45)
const defaultModelOption = evaluatedOptions.find(
(opt) => getOptionValue(opt) === DEFAULT_MODEL
)
if (defaultModelOption) {
return getOptionValue(defaultModelOption)
}
}

View File

@@ -218,7 +218,7 @@ export const Panel = memo(function Panel({ workspaceId: propWorkspaceId }: Panel
const [copilotChatId, setCopilotChatId] = useState<string | undefined>(undefined)
const [copilotChatTitle, setCopilotChatTitle] = useState<string | null>(null)
const [copilotChatList, setCopilotChatList] = useState<
{ id: string; title: string | null; updatedAt: string; conversationId: string | null }[]
{ id: string; title: string | null; updatedAt: string; activeStreamId: string | null }[]
>([])
const [isCopilotHistoryOpen, setIsCopilotHistoryOpen] = useState(false)
@@ -238,7 +238,7 @@ export const Panel = memo(function Panel({ workspaceId: propWorkspaceId }: Panel
id: string
title: string | null
updatedAt: string
conversationId: string | null
activeStreamId: string | null
}>
setCopilotChatList(filtered)
@@ -784,7 +784,7 @@ export const Panel = memo(function Panel({ workspaceId: propWorkspaceId }: Panel
>
<ConversationListItem
title={chat.title || 'New Chat'}
isActive={Boolean(chat.conversationId)}
isActive={Boolean(chat.activeStreamId)}
titleClassName='text-[13px]'
actions={
<div

View File

@@ -1,5 +1,5 @@
import { createHmac } from 'crypto'
import { db, workflowExecutionLogs } from '@sim/db'
import { db } from '@sim/db'
import {
account,
workspaceNotificationDelivery,
@@ -17,14 +17,11 @@ import {
import { checkUsageStatus } from '@/lib/billing/calculations/usage-monitor'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import { dollarsToCredits } from '@/lib/billing/credits/conversion'
import { createBullMQJobData, isBullMQEnabled } from '@/lib/core/bullmq'
import { acquireLock } from '@/lib/core/config/redis'
import { RateLimiter } from '@/lib/core/rate-limiter'
import { decryptSecret } from '@/lib/core/security/encryption'
import { secureFetchWithValidation } from '@/lib/core/security/input-validation.server'
import { formatDuration } from '@/lib/core/utils/formatting'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { enqueueWorkspaceDispatch } from '@/lib/core/workspace-dispatch'
import type { TraceSpan, WorkflowExecutionLog } from '@/lib/logs/types'
import { sendEmail } from '@/lib/messaging/email/mailer'
import type { AlertConfig } from '@/lib/notifications/alert-rules'
@@ -35,7 +32,6 @@ const logger = createLogger('WorkspaceNotificationDelivery')
const MAX_ATTEMPTS = 5
const RETRY_DELAYS = [5 * 1000, 15 * 1000, 60 * 1000, 3 * 60 * 1000, 10 * 60 * 1000]
const NOTIFICATION_DISPATCH_LOCK_TTL_SECONDS = 3
function getRetryDelayWithJitter(baseDelay: number): number {
const jitter = Math.random() * 0.1 * baseDelay
@@ -500,157 +496,6 @@ export type NotificationDeliveryResult =
| { status: 'success' | 'skipped' | 'failed' }
| { status: 'retry'; retryDelayMs: number }
async function buildRetryLog(params: NotificationDeliveryParams): Promise<WorkflowExecutionLog> {
const conditions = [eq(workflowExecutionLogs.executionId, params.log.executionId)]
if (params.log.workflowId) {
conditions.push(eq(workflowExecutionLogs.workflowId, params.log.workflowId))
}
const [storedLog] = await db
.select()
.from(workflowExecutionLogs)
.where(and(...conditions))
.limit(1)
if (storedLog) {
return storedLog as unknown as WorkflowExecutionLog
}
const now = new Date().toISOString()
return {
id: `retry_log_${params.deliveryId}`,
workflowId: params.log.workflowId,
executionId: params.log.executionId,
stateSnapshotId: '',
level: 'info',
trigger: 'system',
startedAt: now,
endedAt: now,
totalDurationMs: 0,
executionData: {},
cost: { total: 0 },
createdAt: now,
}
}
export async function enqueueNotificationDeliveryDispatch(
params: NotificationDeliveryParams
): Promise<boolean> {
if (!isBullMQEnabled()) {
return false
}
const lockAcquired = await acquireLock(
`workspace-notification-dispatch:${params.deliveryId}`,
params.deliveryId,
NOTIFICATION_DISPATCH_LOCK_TTL_SECONDS
)
if (!lockAcquired) {
return false
}
await enqueueWorkspaceDispatch({
workspaceId: params.workspaceId,
lane: 'lightweight',
queueName: 'workspace-notification-delivery',
bullmqJobName: 'workspace-notification-delivery',
bullmqPayload: createBullMQJobData(params),
metadata: {
workflowId: params.log.workflowId ?? undefined,
},
})
return true
}
const STUCK_IN_PROGRESS_THRESHOLD_MS = 5 * 60 * 1000
export async function sweepPendingNotificationDeliveries(limit = 50): Promise<number> {
if (!isBullMQEnabled()) {
return 0
}
const stuckThreshold = new Date(Date.now() - STUCK_IN_PROGRESS_THRESHOLD_MS)
await db
.update(workspaceNotificationDelivery)
.set({
status: 'pending',
updatedAt: new Date(),
})
.where(
and(
eq(workspaceNotificationDelivery.status, 'in_progress'),
lte(workspaceNotificationDelivery.lastAttemptAt, stuckThreshold)
)
)
const dueDeliveries = await db
.select({
deliveryId: workspaceNotificationDelivery.id,
subscriptionId: workspaceNotificationDelivery.subscriptionId,
workflowId: workspaceNotificationDelivery.workflowId,
executionId: workspaceNotificationDelivery.executionId,
workspaceId: workspaceNotificationSubscription.workspaceId,
alertConfig: workspaceNotificationSubscription.alertConfig,
notificationType: workspaceNotificationSubscription.notificationType,
})
.from(workspaceNotificationDelivery)
.innerJoin(
workspaceNotificationSubscription,
eq(workspaceNotificationDelivery.subscriptionId, workspaceNotificationSubscription.id)
)
.where(
and(
eq(workspaceNotificationDelivery.status, 'pending'),
or(
isNull(workspaceNotificationDelivery.nextAttemptAt),
lte(workspaceNotificationDelivery.nextAttemptAt, new Date())
)
)
)
.limit(limit)
let enqueued = 0
for (const delivery of dueDeliveries) {
const params: NotificationDeliveryParams = {
deliveryId: delivery.deliveryId,
subscriptionId: delivery.subscriptionId,
workspaceId: delivery.workspaceId,
notificationType: delivery.notificationType,
log: await buildRetryLog({
deliveryId: delivery.deliveryId,
subscriptionId: delivery.subscriptionId,
workspaceId: delivery.workspaceId,
notificationType: delivery.notificationType,
log: {
id: '',
workflowId: delivery.workflowId,
executionId: delivery.executionId,
stateSnapshotId: '',
level: 'info',
trigger: 'system',
startedAt: '',
endedAt: '',
totalDurationMs: 0,
executionData: {},
cost: { total: 0 },
createdAt: '',
},
alertConfig: (delivery.alertConfig as AlertConfig | null) ?? undefined,
}),
alertConfig: (delivery.alertConfig as AlertConfig | null) ?? undefined,
}
if (await enqueueNotificationDeliveryDispatch(params)) {
enqueued += 1
}
}
return enqueued
}
export async function executeNotificationDelivery(
params: NotificationDeliveryParams
): Promise<NotificationDeliveryResult> {

View File

@@ -337,7 +337,7 @@ describe.concurrent('Blocks Module', () => {
expect(modelSubBlock).toBeDefined()
expect(modelSubBlock?.type).toBe('combobox')
expect(modelSubBlock?.required).toBe(true)
expect(modelSubBlock?.defaultValue).toBe('claude-sonnet-4-5')
expect(modelSubBlock?.defaultValue).toBe('claude-sonnet-4-6')
})
it('should have LLM tool access', () => {

View File

@@ -130,7 +130,7 @@ Return ONLY the JSON array.`,
type: 'combobox',
placeholder: 'Type or select a model...',
required: true,
defaultValue: 'claude-sonnet-4-5',
defaultValue: 'claude-sonnet-4-6',
options: getModelOptions,
},
{
@@ -458,7 +458,7 @@ Return ONLY the JSON array.`,
],
config: {
tool: (params: Record<string, any>) => {
const model = params.model || 'claude-sonnet-4-5'
const model = params.model || 'claude-sonnet-4-6'
if (!model) {
throw new Error('No model selected')
}

View File

@@ -0,0 +1,621 @@
import { AgentMailIcon } from '@/components/icons'
import type { BlockConfig } from '@/blocks/types'
import { AuthMode, IntegrationType } from '@/blocks/types'
export const AgentMailBlock: BlockConfig = {
type: 'agentmail',
name: 'AgentMail',
description: 'Manage email inboxes, threads, and messages with AgentMail',
longDescription:
'Integrate AgentMail into your workflow. Create and manage email inboxes, send and receive messages, reply to threads, manage drafts, and organize threads with labels. Requires API Key.',
docsLink: 'https://docs.sim.ai/tools/agentmail',
category: 'tools',
integrationType: IntegrationType.Email,
tags: ['messaging'],
bgColor: '#000000',
icon: AgentMailIcon,
authMode: AuthMode.ApiKey,
subBlocks: [
{
id: 'operation',
title: 'Operation',
type: 'dropdown',
options: [
{ label: 'Send Message', id: 'send_message' },
{ label: 'Reply to Message', id: 'reply_message' },
{ label: 'Forward Message', id: 'forward_message' },
{ label: 'List Threads', id: 'list_threads' },
{ label: 'Get Thread', id: 'get_thread' },
{ label: 'Update Thread Labels', id: 'update_thread' },
{ label: 'Delete Thread', id: 'delete_thread' },
{ label: 'List Messages', id: 'list_messages' },
{ label: 'Get Message', id: 'get_message' },
{ label: 'Update Message Labels', id: 'update_message' },
{ label: 'Create Draft', id: 'create_draft' },
{ label: 'List Drafts', id: 'list_drafts' },
{ label: 'Get Draft', id: 'get_draft' },
{ label: 'Update Draft', id: 'update_draft' },
{ label: 'Delete Draft', id: 'delete_draft' },
{ label: 'Send Draft', id: 'send_draft' },
{ label: 'Create Inbox', id: 'create_inbox' },
{ label: 'List Inboxes', id: 'list_inboxes' },
{ label: 'Get Inbox', id: 'get_inbox' },
{ label: 'Update Inbox', id: 'update_inbox' },
{ label: 'Delete Inbox', id: 'delete_inbox' },
],
value: () => 'send_message',
},
{
id: 'apiKey',
title: 'API Key',
type: 'short-input',
placeholder: 'Enter your AgentMail API key',
required: true,
password: true,
},
// Send Message fields
{
id: 'inboxId',
title: 'Inbox ID',
type: 'short-input',
placeholder: 'Inbox ID',
condition: {
field: 'operation',
value: [
'send_message',
'reply_message',
'forward_message',
'list_threads',
'get_thread',
'update_thread',
'delete_thread',
'list_messages',
'get_message',
'update_message',
'create_draft',
'list_drafts',
'get_draft',
'update_draft',
'delete_draft',
'send_draft',
],
},
required: {
field: 'operation',
value: [
'send_message',
'reply_message',
'forward_message',
'list_threads',
'get_thread',
'update_thread',
'delete_thread',
'list_messages',
'get_message',
'update_message',
'create_draft',
'list_drafts',
'get_draft',
'update_draft',
'delete_draft',
'send_draft',
],
},
},
{
id: 'to',
title: 'To',
type: 'short-input',
placeholder: 'recipient@example.com',
condition: {
field: 'operation',
value: ['send_message', 'forward_message', 'create_draft', 'update_draft'],
},
required: { field: 'operation', value: ['send_message', 'forward_message'] },
},
{
id: 'subject',
title: 'Subject',
type: 'short-input',
placeholder: 'Email subject',
condition: {
field: 'operation',
value: ['send_message', 'forward_message', 'create_draft', 'update_draft'],
},
required: { field: 'operation', value: 'send_message' },
wandConfig: {
enabled: true,
prompt:
'Generate a compelling email subject line based on the description. Keep it concise. Return ONLY the subject line.',
placeholder: 'Describe the email topic...',
},
},
{
id: 'text',
title: 'Text',
type: 'long-input',
placeholder: 'Plain text email body',
condition: {
field: 'operation',
value: ['send_message', 'reply_message', 'forward_message', 'create_draft', 'update_draft'],
},
wandConfig: {
enabled: true,
prompt:
'Generate email content based on the description. Use clear formatting with short paragraphs. Return ONLY the email body.',
placeholder: 'Describe the email content...',
},
},
{
id: 'html',
title: 'HTML',
type: 'long-input',
placeholder: '<p>HTML email body</p>',
condition: {
field: 'operation',
value: ['send_message', 'reply_message', 'forward_message', 'create_draft', 'update_draft'],
},
mode: 'advanced',
},
{
id: 'cc',
title: 'CC',
type: 'short-input',
placeholder: 'cc@example.com',
condition: {
field: 'operation',
value: ['send_message', 'reply_message', 'forward_message', 'create_draft', 'update_draft'],
},
mode: 'advanced',
},
{
id: 'bcc',
title: 'BCC',
type: 'short-input',
placeholder: 'bcc@example.com',
condition: {
field: 'operation',
value: ['send_message', 'reply_message', 'forward_message', 'create_draft', 'update_draft'],
},
mode: 'advanced',
},
// Reply to Message fields
{
id: 'replyMessageId',
title: 'Message ID to Reply To',
type: 'short-input',
placeholder: 'Message ID',
condition: { field: 'operation', value: 'reply_message' },
required: { field: 'operation', value: 'reply_message' },
},
{
id: 'replyTo',
title: 'Override To',
type: 'short-input',
placeholder: 'Override recipient (optional)',
condition: { field: 'operation', value: 'reply_message' },
mode: 'advanced',
},
{
id: 'replyAll',
title: 'Reply All',
type: 'dropdown',
options: [
{ label: 'No', id: 'false' },
{ label: 'Yes', id: 'true' },
],
value: () => 'false',
condition: { field: 'operation', value: 'reply_message' },
mode: 'advanced',
},
// Thread ID fields (shared across thread operations)
{
id: 'threadId',
title: 'Thread ID',
type: 'short-input',
placeholder: 'Thread ID',
condition: {
field: 'operation',
value: ['get_thread', 'update_thread', 'delete_thread'],
},
required: {
field: 'operation',
value: ['get_thread', 'update_thread', 'delete_thread'],
},
},
// Update Thread Labels fields
{
id: 'addLabels',
title: 'Add Labels',
type: 'short-input',
placeholder: 'important, follow-up',
condition: { field: 'operation', value: 'update_thread' },
},
{
id: 'removeLabels',
title: 'Remove Labels',
type: 'short-input',
placeholder: 'inbox, unread',
condition: { field: 'operation', value: 'update_thread' },
},
// Delete Thread fields
{
id: 'permanent',
title: 'Permanent Delete',
type: 'dropdown',
options: [
{ label: 'No (move to trash)', id: 'false' },
{ label: 'Yes (permanent)', id: 'true' },
],
value: () => 'false',
condition: { field: 'operation', value: 'delete_thread' },
mode: 'advanced',
},
// Forward Message fields
{
id: 'forwardMessageId',
title: 'Message ID to Forward',
type: 'short-input',
placeholder: 'Message ID',
condition: { field: 'operation', value: 'forward_message' },
required: { field: 'operation', value: 'forward_message' },
},
// Update Message Labels fields
{
id: 'updateMessageId',
title: 'Message ID',
type: 'short-input',
placeholder: 'Message ID',
condition: { field: 'operation', value: 'update_message' },
required: { field: 'operation', value: 'update_message' },
},
{
id: 'msgAddLabels',
title: 'Add Labels',
type: 'short-input',
placeholder: 'important, follow-up',
condition: { field: 'operation', value: 'update_message' },
},
{
id: 'msgRemoveLabels',
title: 'Remove Labels',
type: 'short-input',
placeholder: 'inbox, unread',
condition: { field: 'operation', value: 'update_message' },
},
// Get Message fields
{
id: 'messageId',
title: 'Message ID',
type: 'short-input',
placeholder: 'Message ID',
condition: { field: 'operation', value: 'get_message' },
required: { field: 'operation', value: 'get_message' },
},
// Draft ID fields (shared across draft operations)
{
id: 'draftId',
title: 'Draft ID',
type: 'short-input',
placeholder: 'Draft ID',
condition: {
field: 'operation',
value: ['get_draft', 'update_draft', 'delete_draft', 'send_draft'],
},
required: {
field: 'operation',
value: ['get_draft', 'update_draft', 'delete_draft', 'send_draft'],
},
},
// Create/Update Draft fields
{
id: 'draftInReplyTo',
title: 'In Reply To',
type: 'short-input',
placeholder: 'Message ID this draft replies to',
condition: { field: 'operation', value: 'create_draft' },
mode: 'advanced',
},
{
id: 'sendAt',
title: 'Schedule Send',
type: 'short-input',
placeholder: 'ISO 8601 timestamp to schedule sending',
condition: { field: 'operation', value: ['create_draft', 'update_draft'] },
mode: 'advanced',
wandConfig: {
enabled: true,
generationType: 'timestamp',
prompt: 'Generate an ISO 8601 timestamp. Return ONLY the timestamp string.',
placeholder: 'Describe when to send (e.g., "tomorrow at 9am")...',
},
},
// Create Inbox fields
{
id: 'username',
title: 'Username',
type: 'short-input',
placeholder: 'Optional username for email address',
condition: { field: 'operation', value: 'create_inbox' },
},
{
id: 'domain',
title: 'Domain',
type: 'short-input',
placeholder: 'Optional domain for email address',
condition: { field: 'operation', value: 'create_inbox' },
mode: 'advanced',
},
{
id: 'displayName',
title: 'Display Name',
type: 'short-input',
placeholder: 'Inbox display name',
condition: { field: 'operation', value: ['create_inbox', 'update_inbox'] },
required: { field: 'operation', value: 'update_inbox' },
},
// Inbox ID for get/update/delete inbox
{
id: 'inboxIdParam',
title: 'Inbox ID',
type: 'short-input',
placeholder: 'Inbox ID',
condition: {
field: 'operation',
value: ['get_inbox', 'update_inbox', 'delete_inbox'],
},
required: {
field: 'operation',
value: ['get_inbox', 'update_inbox', 'delete_inbox'],
},
},
// Pagination fields (advanced)
{
id: 'limit',
title: 'Limit',
type: 'short-input',
placeholder: 'Max results to return',
condition: {
field: 'operation',
value: ['list_inboxes', 'list_threads', 'list_messages', 'list_drafts'],
},
mode: 'advanced',
},
{
id: 'pageToken',
title: 'Page Token',
type: 'short-input',
placeholder: 'Pagination token',
condition: {
field: 'operation',
value: ['list_inboxes', 'list_threads', 'list_messages', 'list_drafts'],
},
mode: 'advanced',
},
// List Threads filters (advanced)
{
id: 'labels',
title: 'Labels Filter',
type: 'short-input',
placeholder: 'Filter by labels (comma-separated)',
condition: { field: 'operation', value: 'list_threads' },
mode: 'advanced',
},
{
id: 'before',
title: 'Before',
type: 'short-input',
placeholder: 'Filter threads before this date',
condition: { field: 'operation', value: 'list_threads' },
mode: 'advanced',
wandConfig: {
enabled: true,
generationType: 'timestamp',
prompt: 'Generate an ISO 8601 timestamp. Return ONLY the timestamp string.',
placeholder: 'Describe the date (e.g., "yesterday")...',
},
},
{
id: 'after',
title: 'After',
type: 'short-input',
placeholder: 'Filter threads after this date',
condition: { field: 'operation', value: 'list_threads' },
mode: 'advanced',
wandConfig: {
enabled: true,
generationType: 'timestamp',
prompt: 'Generate an ISO 8601 timestamp. Return ONLY the timestamp string.',
placeholder: 'Describe the date (e.g., "last week")...',
},
},
],
tools: {
access: [
'agentmail_create_draft',
'agentmail_create_inbox',
'agentmail_delete_draft',
'agentmail_delete_inbox',
'agentmail_delete_thread',
'agentmail_forward_message',
'agentmail_get_draft',
'agentmail_get_inbox',
'agentmail_get_message',
'agentmail_get_thread',
'agentmail_list_drafts',
'agentmail_list_inboxes',
'agentmail_list_messages',
'agentmail_list_threads',
'agentmail_reply_message',
'agentmail_send_draft',
'agentmail_send_message',
'agentmail_update_draft',
'agentmail_update_inbox',
'agentmail_update_message',
'agentmail_update_thread',
],
config: {
tool: (params) => `agentmail_${params.operation || 'send_message'}`,
params: (params) => {
const {
operation,
inboxIdParam,
permanent,
replyMessageId,
replyTo,
replyAll,
forwardMessageId,
updateMessageId,
msgAddLabels,
msgRemoveLabels,
addLabels,
removeLabels,
draftInReplyTo,
...rest
} = params
if (['get_inbox', 'update_inbox', 'delete_inbox'].includes(operation) && inboxIdParam) {
rest.inboxId = inboxIdParam
}
if (operation === 'delete_thread' && permanent !== undefined) {
rest.permanent = permanent === 'true'
}
if (operation === 'reply_message' && replyAll !== undefined) {
rest.replyAll = replyAll === 'true'
}
if (operation === 'reply_message' && replyMessageId) {
rest.messageId = replyMessageId
}
if (operation === 'reply_message' && replyTo) {
rest.to = replyTo
} else if (operation === 'reply_message') {
rest.to = undefined
}
if (operation === 'forward_message' && forwardMessageId) {
rest.messageId = forwardMessageId
}
if (operation === 'update_message' && updateMessageId) {
rest.messageId = updateMessageId
}
if (operation === 'update_message' && msgAddLabels) {
rest.addLabels = msgAddLabels
}
if (operation === 'update_message' && msgRemoveLabels) {
rest.removeLabels = msgRemoveLabels
}
if (operation === 'update_thread' && addLabels) {
rest.addLabels = addLabels
}
if (operation === 'update_thread' && removeLabels) {
rest.removeLabels = removeLabels
}
if (operation === 'create_draft' && draftInReplyTo) {
rest.inReplyTo = draftInReplyTo
}
if (rest.limit) {
rest.limit = Number(rest.limit)
}
return rest
},
},
},
inputs: {
operation: { type: 'string', description: 'Operation to perform' },
apiKey: { type: 'string', description: 'AgentMail API key' },
inboxId: { type: 'string', description: 'Inbox ID' },
inboxIdParam: {
type: 'string',
description: 'Inbox ID for get/update/delete inbox operations',
},
to: { type: 'string', description: 'Recipient email address' },
subject: { type: 'string', description: 'Email subject' },
text: { type: 'string', description: 'Plain text email body' },
html: { type: 'string', description: 'HTML email body' },
cc: { type: 'string', description: 'CC email addresses' },
bcc: { type: 'string', description: 'BCC email addresses' },
replyMessageId: { type: 'string', description: 'Message ID to reply to' },
replyTo: { type: 'string', description: 'Override recipient for reply' },
replyAll: { type: 'string', description: 'Reply to all recipients' },
forwardMessageId: { type: 'string', description: 'Message ID to forward' },
updateMessageId: { type: 'string', description: 'Message ID to update labels on' },
msgAddLabels: { type: 'string', description: 'Labels to add to message' },
msgRemoveLabels: { type: 'string', description: 'Labels to remove from message' },
threadId: { type: 'string', description: 'Thread ID' },
addLabels: { type: 'string', description: 'Labels to add to thread (comma-separated)' },
removeLabels: { type: 'string', description: 'Labels to remove from thread (comma-separated)' },
permanent: { type: 'string', description: 'Whether to permanently delete' },
messageId: { type: 'string', description: 'Message ID' },
draftId: { type: 'string', description: 'Draft ID' },
draftInReplyTo: { type: 'string', description: 'Message ID this draft replies to' },
sendAt: { type: 'string', description: 'ISO 8601 timestamp to schedule sending' },
username: { type: 'string', description: 'Username for new inbox' },
domain: { type: 'string', description: 'Domain for new inbox' },
displayName: { type: 'string', description: 'Display name for inbox' },
limit: { type: 'string', description: 'Max results to return' },
pageToken: { type: 'string', description: 'Pagination token' },
labels: { type: 'string', description: 'Labels filter for threads' },
before: { type: 'string', description: 'Filter threads before this date' },
after: { type: 'string', description: 'Filter threads after this date' },
},
outputs: {
inboxId: { type: 'string', description: 'Inbox ID' },
email: { type: 'string', description: 'Inbox email address' },
displayName: { type: 'string', description: 'Inbox display name' },
threadId: { type: 'string', description: 'Thread ID' },
messageId: { type: 'string', description: 'Message ID' },
draftId: { type: 'string', description: 'Draft ID' },
subject: { type: 'string', description: 'Email subject' },
to: { type: 'string', description: 'Recipient email address' },
from: { type: 'string', description: 'Sender email address' },
text: { type: 'string', description: 'Plain text content' },
html: { type: 'string', description: 'HTML content' },
preview: { type: 'string', description: 'Message or draft preview text' },
senders: { type: 'json', description: 'List of sender email addresses' },
recipients: { type: 'json', description: 'List of recipient email addresses' },
labels: { type: 'json', description: 'Thread or draft labels' },
messages: { type: 'json', description: 'List of messages' },
threads: { type: 'json', description: 'List of threads' },
inboxes: { type: 'json', description: 'List of inboxes' },
drafts: { type: 'json', description: 'List of drafts' },
messageCount: { type: 'number', description: 'Number of messages in thread' },
count: { type: 'number', description: 'Total number of results' },
nextPageToken: { type: 'string', description: 'Token for next page of results' },
deleted: { type: 'boolean', description: 'Whether the resource was deleted' },
sendStatus: { type: 'string', description: 'Draft send status' },
sendAt: { type: 'string', description: 'Scheduled send time' },
inReplyTo: { type: 'string', description: 'Message ID this draft replies to' },
createdAt: { type: 'string', description: 'Creation timestamp' },
updatedAt: { type: 'string', description: 'Last updated timestamp' },
},
}

View File

@@ -177,7 +177,7 @@ export const EvaluatorBlock: BlockConfig<EvaluatorResponse> = {
type: 'combobox',
placeholder: 'Type or select a model...',
required: true,
defaultValue: 'claude-sonnet-4-5',
defaultValue: 'claude-sonnet-4-6',
options: getModelOptions,
},
...getProviderCredentialSubBlocks(),

File diff suppressed because it is too large Load Diff

View File

@@ -171,7 +171,7 @@ export const RouterBlock: BlockConfig<RouterResponse> = {
type: 'combobox',
placeholder: 'Type or select a model...',
required: true,
defaultValue: 'claude-sonnet-4-5',
defaultValue: 'claude-sonnet-4-6',
options: getModelOptions,
},
...getProviderCredentialSubBlocks(),
@@ -298,7 +298,7 @@ export const RouterV2Block: BlockConfig<RouterV2Response> = {
type: 'combobox',
placeholder: 'Type or select a model...',
required: true,
defaultValue: 'claude-sonnet-4-5',
defaultValue: 'claude-sonnet-4-6',
options: getModelOptions,
},
...getProviderCredentialSubBlocks(),

View File

@@ -1,5 +1,6 @@
import { A2ABlock } from '@/blocks/blocks/a2a'
import { AgentBlock } from '@/blocks/blocks/agent'
import { AgentMailBlock } from '@/blocks/blocks/agentmail'
import { AhrefsBlock } from '@/blocks/blocks/ahrefs'
import { AirtableBlock } from '@/blocks/blocks/airtable'
import { AirweaveBlock } from '@/blocks/blocks/airweave'
@@ -151,6 +152,7 @@ import { ResendBlock } from '@/blocks/blocks/resend'
import { ResponseBlock } from '@/blocks/blocks/response'
import { RevenueCatBlock } from '@/blocks/blocks/revenuecat'
import { RipplingBlock } from '@/blocks/blocks/rippling'
import { RootlyBlock } from '@/blocks/blocks/rootly'
import { RouterBlock, RouterV2Block } from '@/blocks/blocks/router'
import { RssBlock } from '@/blocks/blocks/rss'
import { S3Block } from '@/blocks/blocks/s3'
@@ -216,6 +218,7 @@ import type { BlockConfig } from '@/blocks/types'
export const registry: Record<string, BlockConfig> = {
a2a: A2ABlock,
agent: AgentBlock,
agentmail: AgentMailBlock,
ahrefs: AhrefsBlock,
airtable: AirtableBlock,
airweave: AirweaveBlock,
@@ -382,6 +385,7 @@ export const registry: Record<string, BlockConfig> = {
response: ResponseBlock,
revenuecat: RevenueCatBlock,
rippling: RipplingBlock,
rootly: RootlyBlock,
router: RouterBlock,
router_v2: RouterV2Block,
rss: RssBlock,

View File

@@ -1,6 +1,33 @@
import type { SVGProps } from 'react'
import { useId } from 'react'
export function AgentMailIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg {...props} viewBox='0 0 350 363' fill='none' xmlns='http://www.w3.org/2000/svg'>
<path
d='M318.029 88.3407C196.474 115.33 153.48 115.321 33.9244 88.3271C30.6216 87.5814 27.1432 88.9727 25.3284 91.8313L1.24109 129.774C-1.76483 134.509 0.965276 140.798 6.46483 141.898C152.613 171.13 197.678 171.182 343.903 141.835C349.304 140.751 352.064 134.641 349.247 129.907L326.719 92.0479C324.95 89.0744 321.407 87.5907 318.029 88.3407Z'
fill='currentColor'
/>
<path
d='M75.9931 246.6L149.939 311.655C151.973 313.444 151.633 316.969 149.281 318.48L119.141 337.84C117.283 339.034 114.951 338.412 113.933 336.452L70.1276 252.036C68.0779 248.086 72.7553 243.751 75.9931 246.6Z'
fill='currentColor'
/>
<path
d='M274.025 246.6L200.08 311.655C198.046 313.444 198.385 316.969 200.737 318.48L230.877 337.84C232.736 339.034 235.068 338.412 236.085 336.452L279.891 252.036C281.941 248.086 277.263 243.751 274.025 246.6Z'
fill='currentColor'
/>
<path
d='M138.75 198.472L152.436 192.983C155.238 191.918 157.77 191.918 158.574 191.918C164.115 192.126 169.564 192.232 175.009 192.235C180.454 192.232 185.904 192.126 191.444 191.918C192.248 191.918 194.78 191.918 197.583 192.983L211.269 198.472C212.645 199.025 214.082 199.382 215.544 199.448C218.585 199.587 221.733 199.464 224.63 198.811C225.706 198.568 226.728 198.103 227.704 197.545L243.046 188.784C244.81 187.777 246.726 187.138 248.697 186.9L258.276 185.5H259.242H263.556L262.713 190.965L256.679 234.22C255.957 238.31 254.25 242.328 250.443 245.834L187.376 299.258C184.555 301.648 181.107 302.942 177.562 302.942H175.009H172.457C168.911 302.942 165.464 301.648 162.643 299.258L99.5761 245.834C95.7684 242.328 94.0614 238.31 93.3393 234.22L87.3059 190.965L86.4624 185.5H90.7771H91.7429L101.322 186.9C103.293 187.138 105.208 187.777 106.972 188.784L122.314 197.545C123.291 198.103 124.313 198.568 125.389 198.811C128.286 199.464 131.434 199.587 134.474 199.448C135.936 199.382 137.373 199.025 138.75 198.472Z'
fill='currentColor'
/>
<path
d='M102.47 0.847827C205.434 44.796 156.456 42.1015 248.434 1.63153C252.885 -1.09955 258.353 1.88915 259.419 7.69219L269.235 61.1686L270.819 69.7893L263.592 71.8231L263.582 71.8259C190.588 92.3069 165.244 92.0078 86.7576 71.7428L79.1971 69.7905L80.9925 60.8681L91.8401 6.91975C92.9559 1.3706 98.105 -1.55777 102.47 0.847827Z'
fill='currentColor'
/>
</svg>
)
}
export function SearchIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg
@@ -6387,6 +6414,41 @@ export function RipplingIcon(props: SVGProps<SVGSVGElement>) {
)
}
export function RootlyIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg {...props} viewBox='0 0 250 217' fill='none' xmlns='http://www.w3.org/2000/svg'>
<path
d='m124.8 5.21c-9.62 11.52-15.84 24.61-15.84 35.75 0 11.65 7.22 21.11 15.56 21.11 8.72 0 15.81-10.18 15.81-21.25 0-11.06-6.44-24.29-15.53-35.61z'
fill='currentColor'
/>
<path
d='m124.7 84.29c-9.76 11.45-16.05 23.67-16.05 34.88 0 10.99 7.15 20.82 15.74 20.51 8.72-0.34 16.25-10.31 16.04-21.37-0.27-11.06-6.58-22.64-15.73-34.02z'
fill='currentColor'
/>
<path
d='m48.81 48.5c5.82 18.47 16.5 35.38 33.97 36.06 10.99 0.4 15.38-7.12 15.31-12.52-0.13-9.19-8.14-24.76-36.9-24.76-4.74 0-8.26 0.34-12.38 1.22z'
fill='currentColor'
/>
<path
d='m18.92 99.03c9.83 15.7 22.58 26.25 36.07 26.39 9.9 0 18.18-5.68 18.12-14.34-0.07-7.92-8.35-18.84-25.25-18.84-9.69 0-17.77 2.61-28.94 6.79z'
fill='currentColor'
/>
<path
d='m200.1 48.43c-4.18-1.01-7.63-1.29-13.32-1.29-21.73 0-36.35 9.91-36.69 24.7-0.2 7.52 6.17 12.78 15.83 12.78 14.48 0 26.89-14.79 34.18-36.19z'
fill='currentColor'
/>
<path
d='m230.6 98.96c-9.9-4.58-18.55-6.72-28.77-6.72-15.59 0-26.14 10.72-26.07 19.38 0.07 7.71 7.73 13.53 17.13 13.53 12.34 0 25.23-9.81 37.71-26.19z'
fill='currentColor'
/>
<path
d='m6.12 146.9 3.65 24.48c10.99-2.34 21.41-3.21 34.17-3.21 38.03 0 63.94 13.69 66.15 41.52h28.83c2.69-26.48 24.99-41.52 66.67-41.52 11.62 0 22.37 1.15 34.32 3.21l4.05-24.34c-10.99-1.8-20.72-2.41-32.73-2.41-38.44 0-68.07 10.32-86.55 31.79-16.25-19.98-42.03-31.79-84.53-31.79-12.01 0-23.36 0.61-34.03 2.27z'
fill='currentColor'
/>
</svg>
)
}
export function HexIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 1450.3 600'>

View File

@@ -190,7 +190,7 @@ export const HTTP = {
} as const
export const AGENT = {
DEFAULT_MODEL: 'claude-sonnet-4-5',
DEFAULT_MODEL: 'claude-sonnet-4-6',
get DEFAULT_FUNCTION_TIMEOUT() {
return getMaxExecutionTimeout()
},
@@ -225,13 +225,13 @@ export const MEMORY = {
} as const
export const ROUTER = {
DEFAULT_MODEL: 'claude-sonnet-4-5',
DEFAULT_MODEL: 'claude-sonnet-4-6',
DEFAULT_TEMPERATURE: 0,
INFERENCE_TEMPERATURE: 0.1,
} as const
export const EVALUATOR = {
DEFAULT_MODEL: 'claude-sonnet-4-5',
DEFAULT_MODEL: 'claude-sonnet-4-6',
DEFAULT_TEMPERATURE: 0.1,
RESPONSE_SCHEMA_NAME: 'evaluation_response',
JSON_INDENT: 2,

View File

@@ -463,7 +463,7 @@ describe('EvaluatorBlockHandler', () => {
json: () =>
Promise.resolve({
content: JSON.stringify({ score: 7 }),
model: 'claude-sonnet-4-5',
model: 'claude-sonnet-4-6',
tokens: {},
cost: 0,
timing: {},
@@ -476,6 +476,6 @@ describe('EvaluatorBlockHandler', () => {
const fetchCallArgs = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCallArgs[1].body)
expect(requestBody.model).toBe('claude-sonnet-4-5')
expect(requestBody.model).toBe('claude-sonnet-4-6')
})
})

View File

@@ -220,12 +220,12 @@ describe('RouterBlockHandler', () => {
await handler.execute(mockContext, mockBlock, inputs)
expect(mockGetProviderFromModel).toHaveBeenCalledWith('claude-sonnet-4-5')
expect(mockGetProviderFromModel).toHaveBeenCalledWith('claude-sonnet-4-6')
const fetchCallArgs = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCallArgs[1].body)
expect(requestBody).toMatchObject({
model: 'claude-sonnet-4-5',
model: 'claude-sonnet-4-6',
temperature: 0.1,
})
})

View File

@@ -1,4 +1,6 @@
import { keepPreviousData, useMutation, useQuery, useQueryClient } from '@tanstack/react-query'
import type { PersistedMessage } from '@/lib/copilot/chat/persisted-message'
import { normalizeMessage } from '@/lib/copilot/chat/persisted-message'
import type { MothershipResource } from '@/app/workspace/[workspaceId]/home/types'
export interface TaskMetadata {
@@ -9,70 +11,13 @@ export interface TaskMetadata {
isUnread: boolean
}
export interface StreamSnapshot {
events: Array<{ eventId: number; streamId: string; event: Record<string, unknown> }>
status: string
}
export interface TaskChatHistory {
id: string
title: string | null
messages: TaskStoredMessage[]
messages: PersistedMessage[]
activeStreamId: string | null
resources: MothershipResource[]
streamSnapshot?: StreamSnapshot | null
}
export interface TaskStoredToolCall {
id: string
name: string
status: string
params?: Record<string, unknown>
result?: unknown
error?: string
durationMs?: number
}
export interface TaskStoredFileAttachment {
id: string
key: string
filename: string
media_type: string
size: number
}
export interface TaskStoredMessageContext {
kind: string
label: string
workflowId?: string
knowledgeId?: string
tableId?: string
fileId?: string
}
export interface TaskStoredMessage {
id: string
role: 'user' | 'assistant'
content: string
requestId?: string
toolCalls?: TaskStoredToolCall[]
contentBlocks?: TaskStoredContentBlock[]
fileAttachments?: TaskStoredFileAttachment[]
contexts?: TaskStoredMessageContext[]
}
export interface TaskStoredContentBlock {
type: string
content?: string
toolCall?: {
id?: string
name?: string
state?: string
params?: Record<string, unknown>
result?: { success: boolean; output?: unknown; error?: string }
display?: { text?: string }
calledBy?: string
} | null
streamSnapshot?: { events: unknown[]; status: string } | null
}
export const taskKeys = {
@@ -87,7 +32,7 @@ interface TaskResponse {
id: string
title: string | null
updatedAt: string
conversationId: string | null
activeStreamId: string | null
lastSeenAt: string | null
}
@@ -97,9 +42,9 @@ function mapTask(chat: TaskResponse): TaskMetadata {
id: chat.id,
name: chat.title ?? 'New task',
updatedAt,
isActive: chat.conversationId !== null,
isActive: chat.activeStreamId !== null,
isUnread:
chat.conversationId === null &&
chat.activeStreamId === null &&
(chat.lastSeenAt === null || updatedAt > new Date(chat.lastSeenAt)),
}
}
@@ -159,10 +104,11 @@ export async function fetchChatHistory(
return {
id: chat.id,
title: chat.title,
messages: Array.isArray(chat.messages) ? chat.messages : [],
activeStreamId: chat.conversationId || null,
messages: Array.isArray(chat.messages)
? chat.messages.map((m: Record<string, unknown>) => normalizeMessage(m))
: [],
activeStreamId: chat.activeStreamId || null,
resources: Array.isArray(chat.resources) ? chat.resources : [],
streamSnapshot: chat.streamSnapshot || null,
}
}

View File

@@ -17,20 +17,10 @@ export const enterpriseSubscriptionMetadataSchema = z.object({
monthlyPrice: z.coerce.number().positive(),
// Number of seats for invitation limits (not for billing)
seats: z.coerce.number().int().positive(),
// Optional custom workspace concurrency limit for enterprise workspaces
workspaceConcurrencyLimit: z.coerce.number().int().positive().optional(),
})
export type EnterpriseSubscriptionMetadata = z.infer<typeof enterpriseSubscriptionMetadataSchema>
const enterpriseWorkspaceConcurrencyMetadataSchema = z.object({
workspaceConcurrencyLimit: z.coerce.number().int().positive().optional(),
})
export type EnterpriseWorkspaceConcurrencyMetadata = z.infer<
typeof enterpriseWorkspaceConcurrencyMetadataSchema
>
export function parseEnterpriseSubscriptionMetadata(
value: unknown
): EnterpriseSubscriptionMetadata | null {
@@ -38,13 +28,6 @@ export function parseEnterpriseSubscriptionMetadata(
return result.success ? result.data : null
}
export function parseEnterpriseWorkspaceConcurrencyMetadata(
value: unknown
): EnterpriseWorkspaceConcurrencyMetadata | null {
const result = enterpriseWorkspaceConcurrencyMetadataSchema.safeParse(value)
return result.success ? result.data : null
}
export interface UsageData {
currentUsage: number
limit: number

View File

@@ -1,146 +0,0 @@
/**
* @vitest-environment node
*/
import { beforeEach, describe, expect, it, vi } from 'vitest'
const {
mockGetHighestPrioritySubscription,
mockGetWorkspaceBilledAccountUserId,
mockFeatureFlags,
mockRedisGet,
mockRedisSet,
mockRedisDel,
mockRedisKeys,
mockGetRedisClient,
} = vi.hoisted(() => ({
mockGetHighestPrioritySubscription: vi.fn(),
mockGetWorkspaceBilledAccountUserId: vi.fn(),
mockFeatureFlags: {
isBillingEnabled: true,
},
mockRedisGet: vi.fn(),
mockRedisSet: vi.fn(),
mockRedisDel: vi.fn(),
mockRedisKeys: vi.fn(),
mockGetRedisClient: vi.fn(),
}))
vi.mock('@sim/logger', () => ({
createLogger: () => ({
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
}),
}))
vi.mock('@/lib/billing/core/plan', () => ({
getHighestPrioritySubscription: mockGetHighestPrioritySubscription,
}))
vi.mock('@/lib/workspaces/utils', () => ({
getWorkspaceBilledAccountUserId: mockGetWorkspaceBilledAccountUserId,
}))
vi.mock('@/lib/core/config/redis', () => ({
getRedisClient: mockGetRedisClient,
}))
vi.mock('@/lib/core/config/feature-flags', () => mockFeatureFlags)
import {
getWorkspaceConcurrencyLimit,
resetWorkspaceConcurrencyLimitCache,
} from '@/lib/billing/workspace-concurrency'
describe('workspace concurrency billing', () => {
beforeEach(() => {
vi.clearAllMocks()
mockFeatureFlags.isBillingEnabled = true
mockRedisGet.mockResolvedValue(null)
mockRedisSet.mockResolvedValue('OK')
mockRedisDel.mockResolvedValue(1)
mockRedisKeys.mockResolvedValue([])
mockGetRedisClient.mockReturnValue({
get: mockRedisGet,
set: mockRedisSet,
del: mockRedisDel,
keys: mockRedisKeys,
})
})
it('returns free tier when no billed account exists', async () => {
mockGetWorkspaceBilledAccountUserId.mockResolvedValue(null)
await expect(getWorkspaceConcurrencyLimit('workspace-1')).resolves.toBe(5)
})
it('returns pro limit for pro billing accounts', async () => {
mockGetWorkspaceBilledAccountUserId.mockResolvedValue('user-1')
mockGetHighestPrioritySubscription.mockResolvedValue({
plan: 'pro_6000',
metadata: null,
})
await expect(getWorkspaceConcurrencyLimit('workspace-1')).resolves.toBe(50)
})
it('returns max limit for max plan tiers', async () => {
mockGetWorkspaceBilledAccountUserId.mockResolvedValue('user-1')
mockGetHighestPrioritySubscription.mockResolvedValue({
plan: 'pro_25000',
metadata: null,
})
await expect(getWorkspaceConcurrencyLimit('workspace-1')).resolves.toBe(200)
})
it('returns max limit for legacy team plans', async () => {
mockGetWorkspaceBilledAccountUserId.mockResolvedValue('user-1')
mockGetHighestPrioritySubscription.mockResolvedValue({
plan: 'team',
metadata: null,
})
await expect(getWorkspaceConcurrencyLimit('workspace-1')).resolves.toBe(200)
})
it('returns enterprise metadata override when present', async () => {
mockGetWorkspaceBilledAccountUserId.mockResolvedValue('user-1')
mockGetHighestPrioritySubscription.mockResolvedValue({
plan: 'enterprise',
metadata: {
workspaceConcurrencyLimit: '350',
},
})
await expect(getWorkspaceConcurrencyLimit('workspace-1')).resolves.toBe(350)
})
it('uses free-tier limit when billing is disabled', async () => {
mockFeatureFlags.isBillingEnabled = false
mockGetWorkspaceBilledAccountUserId.mockResolvedValue('user-1')
mockGetHighestPrioritySubscription.mockResolvedValue({
plan: 'pro_25000',
metadata: {
workspaceConcurrencyLimit: 999,
},
})
await expect(getWorkspaceConcurrencyLimit('workspace-1')).resolves.toBe(5)
})
it('uses redis cache when available', async () => {
mockRedisGet.mockResolvedValueOnce('123')
await expect(getWorkspaceConcurrencyLimit('workspace-1')).resolves.toBe(123)
expect(mockGetWorkspaceBilledAccountUserId).not.toHaveBeenCalled()
})
it('can clear a specific workspace cache entry', async () => {
await resetWorkspaceConcurrencyLimitCache('workspace-1')
expect(mockRedisDel).toHaveBeenCalledWith('workspace-concurrency-limit:workspace-1')
})
})

View File

@@ -1,170 +0,0 @@
import { createLogger } from '@sim/logger'
import { getHighestPrioritySubscription } from '@/lib/billing/core/plan'
import { getPlanTierCredits, isEnterprise, isPro, isTeam } from '@/lib/billing/plan-helpers'
import { parseEnterpriseWorkspaceConcurrencyMetadata } from '@/lib/billing/types'
import { env } from '@/lib/core/config/env'
import { isBillingEnabled } from '@/lib/core/config/feature-flags'
import { getRedisClient } from '@/lib/core/config/redis'
import { getWorkspaceBilledAccountUserId } from '@/lib/workspaces/utils'
const logger = createLogger('WorkspaceConcurrencyBilling')
const CACHE_TTL_MS = 60_000
const CACHE_TTL_SECONDS = Math.floor(CACHE_TTL_MS / 1000)
interface CacheEntry {
value: number
expiresAt: number
}
const inMemoryConcurrencyCache = new Map<string, CacheEntry>()
function cacheKey(workspaceId: string): string {
return `workspace-concurrency-limit:${workspaceId}`
}
function parsePositiveLimit(value: unknown): number | null {
if (typeof value === 'number' && Number.isFinite(value) && value > 0) {
return Math.floor(value)
}
if (typeof value === 'string') {
const parsed = Number.parseInt(value, 10)
if (Number.isFinite(parsed) && parsed > 0) {
return parsed
}
}
return null
}
function getFreeConcurrencyLimit(): number {
return Number.parseInt(env.WORKSPACE_CONCURRENCY_FREE, 10) || 5
}
function getProConcurrencyLimit(): number {
return Number.parseInt(env.WORKSPACE_CONCURRENCY_PRO, 10) || 50
}
function getTeamConcurrencyLimit(): number {
return Number.parseInt(env.WORKSPACE_CONCURRENCY_TEAM, 10) || 200
}
function getEnterpriseDefaultConcurrencyLimit(): number {
return Number.parseInt(env.WORKSPACE_CONCURRENCY_ENTERPRISE, 10) || 200
}
function getEnterpriseConcurrencyLimit(metadata: unknown): number {
const enterpriseMetadata = parseEnterpriseWorkspaceConcurrencyMetadata(metadata)
return enterpriseMetadata?.workspaceConcurrencyLimit ?? getEnterpriseDefaultConcurrencyLimit()
}
function getPlanConcurrencyLimit(plan: string | null | undefined, metadata: unknown): number {
if (!isBillingEnabled) {
return getFreeConcurrencyLimit()
}
if (!plan) {
return getFreeConcurrencyLimit()
}
if (isEnterprise(plan)) {
return getEnterpriseConcurrencyLimit(metadata)
}
if (isTeam(plan)) {
return getTeamConcurrencyLimit()
}
const credits = getPlanTierCredits(plan)
if (credits >= 25_000) {
return getTeamConcurrencyLimit()
}
if (isPro(plan)) {
return getProConcurrencyLimit()
}
return getFreeConcurrencyLimit()
}
export async function getWorkspaceConcurrencyLimit(workspaceId: string): Promise<number> {
const redis = getRedisClient()
if (redis) {
const cached = await redis.get(cacheKey(workspaceId))
const cachedValue = parsePositiveLimit(cached)
if (cachedValue !== null) {
return cachedValue
}
} else {
const cached = inMemoryConcurrencyCache.get(workspaceId)
if (cached && cached.expiresAt > Date.now()) {
return cached.value
}
}
try {
const billedAccountUserId = await getWorkspaceBilledAccountUserId(workspaceId)
if (!billedAccountUserId) {
if (redis) {
await redis.set(
cacheKey(workspaceId),
String(getFreeConcurrencyLimit()),
'EX',
CACHE_TTL_SECONDS
)
} else {
inMemoryConcurrencyCache.set(workspaceId, {
value: getFreeConcurrencyLimit(),
expiresAt: Date.now() + CACHE_TTL_MS,
})
}
return getFreeConcurrencyLimit()
}
const subscription = await getHighestPrioritySubscription(billedAccountUserId)
const limit = getPlanConcurrencyLimit(subscription?.plan, subscription?.metadata)
if (redis) {
await redis.set(cacheKey(workspaceId), String(limit), 'EX', CACHE_TTL_SECONDS)
} else {
inMemoryConcurrencyCache.set(workspaceId, {
value: limit,
expiresAt: Date.now() + CACHE_TTL_MS,
})
}
return limit
} catch (error) {
logger.error('Failed to resolve workspace concurrency limit, using free tier', {
workspaceId,
error,
})
return getFreeConcurrencyLimit()
}
}
export async function resetWorkspaceConcurrencyLimitCache(workspaceId?: string): Promise<void> {
if (!workspaceId) {
inMemoryConcurrencyCache.clear()
} else {
inMemoryConcurrencyCache.delete(workspaceId)
}
const redis = getRedisClient()
if (!redis) {
return
}
if (workspaceId) {
await redis.del(cacheKey(workspaceId))
return
}
const keys = await redis.keys('workspace-concurrency-limit:*')
if (keys.length > 0) {
await redis.del(...keys)
}
}

View File

@@ -44,7 +44,7 @@ export const mdxComponents: MDXRemoteProps['components'] = {
<p
{...props}
style={{ fontSize: '19px', marginBottom: '1.5rem', fontWeight: '400' }}
className={clsx('text-[var(--text-subtle)] leading-relaxed', props.className)}
className={clsx('text-[var(--landing-text-muted)] leading-relaxed', props.className)}
/>
),
ul: (props: any) => (
@@ -52,7 +52,7 @@ export const mdxComponents: MDXRemoteProps['components'] = {
{...props}
style={{ fontSize: '19px', marginBottom: '1rem', fontWeight: '400' }}
className={clsx(
'list-outside list-disc pl-6 text-[var(--text-subtle)] leading-relaxed',
'list-outside list-disc pl-6 text-[var(--landing-text-muted)] leading-relaxed',
props.className
)}
/>
@@ -62,7 +62,7 @@ export const mdxComponents: MDXRemoteProps['components'] = {
{...props}
style={{ fontSize: '19px', marginBottom: '1rem', fontWeight: '400' }}
className={clsx(
'list-outside list-decimal pl-6 text-[var(--text-subtle)] leading-relaxed',
'list-outside list-decimal pl-6 text-[var(--landing-text-muted)] leading-relaxed',
props.className
)}
/>
@@ -140,7 +140,7 @@ export const mdxComponents: MDXRemoteProps['components'] = {
<code
{...props}
className={clsx(
'rounded bg-[var(--surface-4)] px-1.5 py-0.5 font-mono font-normal text-[0.9em] text-[var(--landing-text)]',
'rounded bg-[var(--landing-bg-elevated)] px-1.5 py-0.5 font-mono font-normal text-[0.9em] text-[var(--landing-text)]',
props.className
)}
style={{ fontWeight: 400 }}

View File

@@ -1,13 +1,7 @@
import type { CopilotAsyncToolStatus } from '@sim/db/schema'
import { MothershipStreamV1AsyncToolRecordStatus } from '@/lib/copilot/generated/mothership-stream-v1'
export const ASYNC_TOOL_STATUS = {
pending: 'pending',
running: 'running',
completed: 'completed',
failed: 'failed',
cancelled: 'cancelled',
delivered: 'delivered',
} as const
export const ASYNC_TOOL_STATUS = MothershipStreamV1AsyncToolRecordStatus
export type AsyncLifecycleStatus =
| typeof ASYNC_TOOL_STATUS.pending

View File

@@ -1,53 +0,0 @@
import { createLogger } from '@sim/logger'
import { CopilotFiles } from '@/lib/uploads'
import { createFileContent } from '@/lib/uploads/utils/file-utils'
const logger = createLogger('CopilotChatContext')
export interface FileAttachmentInput {
id: string
key: string
name?: string
filename?: string
mimeType?: string
media_type?: string
size: number
}
export interface FileContent {
type: string
[key: string]: unknown
}
/**
* Process file attachments into content for the payload.
*/
export async function processFileAttachments(
fileAttachments: FileAttachmentInput[],
userId: string
): Promise<FileContent[]> {
if (!Array.isArray(fileAttachments) || fileAttachments.length === 0) return []
const processedFileContents: FileContent[] = []
const requestId = `copilot-${userId}-${Date.now()}`
const processedAttachments = await CopilotFiles.processCopilotAttachments(
fileAttachments as Parameters<typeof CopilotFiles.processCopilotAttachments>[0],
requestId
)
for (const { buffer, attachment } of processedAttachments) {
const fileContent = createFileContent(buffer, attachment.media_type)
if (fileContent) {
const enriched: FileContent = { ...fileContent, filename: attachment.filename }
processedFileContents.push(enriched)
}
}
logger.debug('Processed file attachments for payload', {
userId,
inputCount: fileAttachments.length,
outputCount: processedFileContents.length,
})
return processedFileContents
}

View File

@@ -1,140 +0,0 @@
/**
* @vitest-environment node
*/
import { beforeEach, describe, expect, it, vi } from 'vitest'
const {
orchestrateCopilotStream,
createRunSegment,
updateRunStatus,
resetStreamBuffer,
setStreamMeta,
createStreamEventWriter,
} = vi.hoisted(() => ({
orchestrateCopilotStream: vi.fn(),
createRunSegment: vi.fn(),
updateRunStatus: vi.fn(),
resetStreamBuffer: vi.fn(),
setStreamMeta: vi.fn(),
createStreamEventWriter: vi.fn(),
}))
vi.mock('@/lib/copilot/orchestrator', () => ({
orchestrateCopilotStream,
}))
vi.mock('@/lib/copilot/async-runs/repository', () => ({
createRunSegment,
updateRunStatus,
}))
vi.mock('@/lib/copilot/orchestrator/stream/buffer', () => ({
createStreamEventWriter,
resetStreamBuffer,
setStreamMeta,
}))
vi.mock('@sim/db', () => ({
db: {
update: vi.fn(() => ({
set: vi.fn(() => ({
where: vi.fn(),
})),
})),
},
}))
vi.mock('@/lib/copilot/task-events', () => ({
taskPubSub: null,
}))
import { createSSEStream } from '@/lib/copilot/chat-streaming'
async function drainStream(stream: ReadableStream) {
const reader = stream.getReader()
while (true) {
const { done } = await reader.read()
if (done) break
}
}
describe('createSSEStream terminal error handling', () => {
const write = vi.fn().mockResolvedValue({ eventId: 1, streamId: 'stream-1', event: {} })
const flush = vi.fn().mockResolvedValue(undefined)
const close = vi.fn().mockResolvedValue(undefined)
beforeEach(() => {
vi.clearAllMocks()
write.mockResolvedValue({ eventId: 1, streamId: 'stream-1', event: {} })
flush.mockResolvedValue(undefined)
close.mockResolvedValue(undefined)
createStreamEventWriter.mockReturnValue({ write, flush, close })
resetStreamBuffer.mockResolvedValue(undefined)
setStreamMeta.mockResolvedValue(undefined)
createRunSegment.mockResolvedValue(null)
updateRunStatus.mockResolvedValue(null)
})
it('writes a terminal error event before close when orchestration returns success=false', async () => {
orchestrateCopilotStream.mockResolvedValue({
success: false,
error: 'resume failed',
content: '',
contentBlocks: [],
toolCalls: [],
})
const stream = createSSEStream({
requestPayload: { message: 'hello' },
userId: 'user-1',
streamId: 'stream-1',
executionId: 'exec-1',
runId: 'run-1',
currentChat: null,
isNewChat: false,
message: 'hello',
titleModel: 'gpt-5.4',
requestId: 'req-1',
orchestrateOptions: {},
})
await drainStream(stream)
expect(write).toHaveBeenCalledWith(
expect.objectContaining({
type: 'error',
error: 'resume failed',
})
)
expect(write.mock.invocationCallOrder.at(-1)).toBeLessThan(close.mock.invocationCallOrder[0])
})
it('writes the thrown terminal error event before close for replay durability', async () => {
orchestrateCopilotStream.mockRejectedValue(new Error('kaboom'))
const stream = createSSEStream({
requestPayload: { message: 'hello' },
userId: 'user-1',
streamId: 'stream-1',
executionId: 'exec-1',
runId: 'run-1',
currentChat: null,
isNewChat: false,
message: 'hello',
titleModel: 'gpt-5.4',
requestId: 'req-1',
orchestrateOptions: {},
})
await drainStream(stream)
expect(write).toHaveBeenCalledWith(
expect.objectContaining({
type: 'error',
error: 'kaboom',
})
)
expect(write.mock.invocationCallOrder.at(-1)).toBeLessThan(close.mock.invocationCallOrder[0])
})
})

View File

@@ -1,579 +0,0 @@
import { db } from '@sim/db'
import { copilotChats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { createRunSegment, updateRunStatus } from '@/lib/copilot/async-runs/repository'
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import type { OrchestrateStreamOptions } from '@/lib/copilot/orchestrator'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import {
createStreamEventWriter,
getStreamMeta,
resetStreamBuffer,
setStreamMeta,
} from '@/lib/copilot/orchestrator/stream/buffer'
import { taskPubSub } from '@/lib/copilot/task-events'
import { env } from '@/lib/core/config/env'
import { acquireLock, getRedisClient, releaseLock } from '@/lib/core/config/redis'
import { SSE_HEADERS } from '@/lib/core/utils/sse'
const logger = createLogger('CopilotChatStreaming')
const CHAT_STREAM_LOCK_TTL_SECONDS = 2 * 60 * 60
const STREAM_ABORT_TTL_SECONDS = 10 * 60
const STREAM_ABORT_POLL_MS = 1000
interface ActiveStreamEntry {
abortController: AbortController
userStopController: AbortController
}
const activeStreams = new Map<string, ActiveStreamEntry>()
// Tracks in-flight streams by chatId so that a subsequent request for the
// same chat can force-abort the previous stream and wait for it to settle
// before forwarding to Go.
const pendingChatStreams = new Map<
string,
{ promise: Promise<void>; resolve: () => void; streamId: string }
>()
function registerPendingChatStream(chatId: string, streamId: string): void {
if (pendingChatStreams.has(chatId)) {
logger.warn(`registerPendingChatStream: overwriting existing entry for chatId ${chatId}`)
}
let resolve!: () => void
const promise = new Promise<void>((r) => {
resolve = r
})
pendingChatStreams.set(chatId, { promise, resolve, streamId })
}
function resolvePendingChatStream(chatId: string, streamId: string): void {
const entry = pendingChatStreams.get(chatId)
if (entry && entry.streamId === streamId) {
entry.resolve()
pendingChatStreams.delete(chatId)
}
}
function getChatStreamLockKey(chatId: string): string {
return `copilot:chat-stream-lock:${chatId}`
}
function getStreamAbortKey(streamId: string): string {
return `copilot:stream-abort:${streamId}`
}
/**
* Wait for any in-flight stream on `chatId` to settle without force-aborting it.
* Returns true when no stream is active (or it settles in time), false on timeout.
*/
export async function waitForPendingChatStream(
chatId: string,
timeoutMs = 5_000,
expectedStreamId?: string
): Promise<boolean> {
const redis = getRedisClient()
const deadline = Date.now() + timeoutMs
for (;;) {
const entry = pendingChatStreams.get(chatId)
const localPending = !!entry && (!expectedStreamId || entry.streamId === expectedStreamId)
if (redis) {
try {
const ownerStreamId = await redis.get(getChatStreamLockKey(chatId))
const lockReleased =
!ownerStreamId || (expectedStreamId !== undefined && ownerStreamId !== expectedStreamId)
if (!localPending && lockReleased) {
return true
}
} catch (error) {
logger.warn('Failed to check distributed chat stream lock while waiting', {
chatId,
expectedStreamId,
error: error instanceof Error ? error.message : String(error),
})
}
} else if (!localPending) {
return true
}
if (Date.now() >= deadline) return false
await new Promise((resolve) => setTimeout(resolve, 200))
}
}
export async function releasePendingChatStream(chatId: string, streamId: string): Promise<void> {
const redis = getRedisClient()
if (redis) {
await releaseLock(getChatStreamLockKey(chatId), streamId).catch(() => false)
}
resolvePendingChatStream(chatId, streamId)
}
export async function acquirePendingChatStream(
chatId: string,
streamId: string,
timeoutMs = 5_000
): Promise<boolean> {
const redis = getRedisClient()
if (redis) {
const deadline = Date.now() + timeoutMs
for (;;) {
try {
const acquired = await acquireLock(
getChatStreamLockKey(chatId),
streamId,
CHAT_STREAM_LOCK_TTL_SECONDS
)
if (acquired) {
registerPendingChatStream(chatId, streamId)
return true
}
if (!pendingChatStreams.has(chatId)) {
const ownerStreamId = await redis.get(getChatStreamLockKey(chatId))
if (ownerStreamId) {
const ownerMeta = await getStreamMeta(ownerStreamId)
const ownerTerminal =
ownerMeta?.status === 'complete' ||
ownerMeta?.status === 'error' ||
ownerMeta?.status === 'cancelled'
if (ownerTerminal) {
await releaseLock(getChatStreamLockKey(chatId), ownerStreamId).catch(() => false)
continue
}
}
}
} catch (error) {
logger.warn('Distributed chat stream lock failed; retrying distributed coordination', {
chatId,
streamId,
error: error instanceof Error ? error.message : String(error),
})
}
if (Date.now() >= deadline) return false
await new Promise((resolve) => setTimeout(resolve, 200))
}
}
for (;;) {
const existing = pendingChatStreams.get(chatId)
if (!existing) {
registerPendingChatStream(chatId, streamId)
return true
}
const settled = await Promise.race([
existing.promise.then(() => true),
new Promise<boolean>((r) => setTimeout(() => r(false), timeoutMs)),
])
if (!settled) return false
}
}
export async function abortActiveStream(streamId: string): Promise<boolean> {
const redis = getRedisClient()
let published = false
if (redis) {
try {
await redis.set(getStreamAbortKey(streamId), '1', 'EX', STREAM_ABORT_TTL_SECONDS)
published = true
} catch (error) {
logger.warn('Failed to publish distributed stream abort', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
}
}
const entry = activeStreams.get(streamId)
if (!entry) return published
entry.userStopController.abort()
entry.abortController.abort()
activeStreams.delete(streamId)
return true
}
const FLUSH_EVENT_TYPES = new Set([
'tool_call',
'tool_result',
'tool_error',
'subagent_end',
'structured_result',
'subagent_result',
'done',
'error',
])
export async function requestChatTitle(params: {
message: string
model: string
provider?: string
messageId?: string
}): Promise<string | null> {
const { message, model, provider, messageId } = params
if (!message || !model) return null
const headers: Record<string, string> = { 'Content-Type': 'application/json' }
if (env.COPILOT_API_KEY) {
headers['x-api-key'] = env.COPILOT_API_KEY
}
try {
const response = await fetch(`${SIM_AGENT_API_URL}/api/generate-chat-title`, {
method: 'POST',
headers,
body: JSON.stringify({ message, model, ...(provider ? { provider } : {}) }),
})
const payload = await response.json().catch(() => ({}))
if (!response.ok) {
logger.withMetadata({ messageId }).warn('Failed to generate chat title via copilot backend', {
status: response.status,
error: payload,
})
return null
}
const title = typeof payload?.title === 'string' ? payload.title.trim() : ''
return title || null
} catch (error) {
logger.withMetadata({ messageId }).error('Error generating chat title', error)
return null
}
}
export interface StreamingOrchestrationParams {
requestPayload: Record<string, unknown>
userId: string
streamId: string
executionId: string
runId: string
chatId?: string
currentChat: any
isNewChat: boolean
message: string
titleModel: string
titleProvider?: string
requestId: string
workspaceId?: string
orchestrateOptions: Omit<OrchestrateStreamOptions, 'onEvent'>
pendingChatStreamAlreadyRegistered?: boolean
}
export function createSSEStream(params: StreamingOrchestrationParams): ReadableStream {
const {
requestPayload,
userId,
streamId,
executionId,
runId,
chatId,
currentChat,
isNewChat,
message,
titleModel,
titleProvider,
requestId,
workspaceId,
orchestrateOptions,
pendingChatStreamAlreadyRegistered = false,
} = params
const messageId =
typeof requestPayload.messageId === 'string' ? requestPayload.messageId : streamId
const reqLogger = logger.withMetadata({ requestId, messageId })
let eventWriter: ReturnType<typeof createStreamEventWriter> | null = null
let clientDisconnected = false
const abortController = new AbortController()
const userStopController = new AbortController()
const clientDisconnectedController = new AbortController()
activeStreams.set(streamId, { abortController, userStopController })
if (chatId && !pendingChatStreamAlreadyRegistered) {
registerPendingChatStream(chatId, streamId)
}
return new ReadableStream({
async start(controller) {
const encoder = new TextEncoder()
const markClientDisconnected = (reason: string) => {
if (clientDisconnected) return
clientDisconnected = true
if (!clientDisconnectedController.signal.aborted) {
clientDisconnectedController.abort()
}
reqLogger.info('Client disconnected from live SSE stream', {
streamId,
runId,
reason,
})
}
await resetStreamBuffer(streamId)
await setStreamMeta(streamId, { status: 'active', userId, executionId, runId })
if (chatId) {
await createRunSegment({
id: runId,
executionId,
chatId,
userId,
workflowId: (requestPayload.workflowId as string | undefined) || null,
workspaceId,
streamId,
model: (requestPayload.model as string | undefined) || null,
provider: (requestPayload.provider as string | undefined) || null,
requestContext: { requestId },
}).catch((error) => {
reqLogger.warn('Failed to create copilot run segment', {
error: error instanceof Error ? error.message : String(error),
})
})
}
eventWriter = createStreamEventWriter(streamId)
let localSeq = 0
let abortPoller: ReturnType<typeof setInterval> | null = null
const redis = getRedisClient()
if (redis) {
abortPoller = setInterval(() => {
void (async () => {
try {
const shouldAbort = await redis.get(getStreamAbortKey(streamId))
if (shouldAbort && !abortController.signal.aborted) {
userStopController.abort()
abortController.abort()
await redis.del(getStreamAbortKey(streamId))
}
} catch (error) {
reqLogger.warn('Failed to poll distributed stream abort', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
}
})()
}, STREAM_ABORT_POLL_MS)
}
const pushEvent = async (event: Record<string, any>) => {
if (!eventWriter) return
const eventId = ++localSeq
try {
await eventWriter.write(event)
if (FLUSH_EVENT_TYPES.has(event.type)) {
await eventWriter.flush()
}
} catch (error) {
reqLogger.error('Failed to persist stream event', {
eventType: event.type,
eventId,
error: error instanceof Error ? error.message : String(error),
})
// Keep the live SSE stream going even if durable buffering hiccups.
}
try {
if (!clientDisconnected) {
controller.enqueue(
encoder.encode(`data: ${JSON.stringify({ ...event, eventId, streamId })}\n\n`)
)
}
} catch {
markClientDisconnected('enqueue_failed')
}
}
const pushEventBestEffort = async (event: Record<string, any>) => {
try {
await pushEvent(event)
} catch (error) {
reqLogger.error('Failed to push event', {
eventType: event.type,
error: error instanceof Error ? error.message : String(error),
})
}
}
if (chatId) {
await pushEvent({ type: 'chat_id', chatId })
}
if (chatId && !currentChat?.title && isNewChat) {
requestChatTitle({ message, model: titleModel, provider: titleProvider, messageId })
.then(async (title) => {
if (title) {
await db.update(copilotChats).set({ title }).where(eq(copilotChats.id, chatId!))
await pushEvent({ type: 'title_updated', title })
if (workspaceId) {
taskPubSub?.publishStatusChanged({ workspaceId, chatId: chatId!, type: 'renamed' })
}
}
})
.catch((error) => {
reqLogger.error('Title generation failed', error)
})
}
const keepaliveInterval = setInterval(() => {
if (clientDisconnected) return
try {
controller.enqueue(encoder.encode(': keepalive\n\n'))
} catch {
markClientDisconnected('keepalive_failed')
}
}, 15_000)
try {
const result = await orchestrateCopilotStream(requestPayload, {
...orchestrateOptions,
executionId,
runId,
abortSignal: abortController.signal,
userStopSignal: userStopController.signal,
clientDisconnectedSignal: clientDisconnectedController.signal,
onEvent: async (event) => {
await pushEvent(event)
},
})
if (abortController.signal.aborted) {
reqLogger.info('Stream aborted by explicit stop')
await eventWriter.close().catch(() => {})
await setStreamMeta(streamId, { status: 'cancelled', userId, executionId, runId })
await updateRunStatus(runId, 'cancelled', { completedAt: new Date() }).catch(() => {})
return
}
if (!result.success) {
const errorMessage =
result.error ||
result.errors?.[0] ||
'An unexpected error occurred while processing the response.'
if (clientDisconnected) {
reqLogger.info('Stream failed after client disconnect', {
error: errorMessage,
})
}
reqLogger.error('Orchestration returned failure', {
error: errorMessage,
})
await pushEventBestEffort({
type: 'error',
error: errorMessage,
data: {
displayMessage: errorMessage,
},
})
await eventWriter.close()
await setStreamMeta(streamId, {
status: 'error',
userId,
executionId,
runId,
error: errorMessage,
})
await updateRunStatus(runId, 'error', {
completedAt: new Date(),
error: errorMessage,
}).catch(() => {})
return
}
await eventWriter.close()
await setStreamMeta(streamId, { status: 'complete', userId, executionId, runId })
await updateRunStatus(runId, 'complete', { completedAt: new Date() }).catch(() => {})
if (clientDisconnected) {
reqLogger.info('Orchestration completed after client disconnect', {
streamId,
runId,
})
}
} catch (error) {
if (abortController.signal.aborted) {
reqLogger.info('Stream aborted by explicit stop')
await eventWriter.close().catch(() => {})
await setStreamMeta(streamId, { status: 'cancelled', userId, executionId, runId })
await updateRunStatus(runId, 'cancelled', { completedAt: new Date() }).catch(() => {})
return
}
if (clientDisconnected) {
reqLogger.info('Stream errored after client disconnect', {
error: error instanceof Error ? error.message : 'Stream error',
})
}
reqLogger.error('Orchestration error', error)
const errorMessage = error instanceof Error ? error.message : 'Stream error'
await pushEventBestEffort({
type: 'error',
error: errorMessage,
data: {
displayMessage: 'An unexpected error occurred while processing the response.',
},
})
await eventWriter.close()
await setStreamMeta(streamId, {
status: 'error',
userId,
executionId,
runId,
error: errorMessage,
})
await updateRunStatus(runId, 'error', {
completedAt: new Date(),
error: errorMessage,
}).catch(() => {})
} finally {
reqLogger.info('Closing live SSE stream', {
streamId,
runId,
clientDisconnected,
aborted: abortController.signal.aborted,
})
clearInterval(keepaliveInterval)
if (abortPoller) {
clearInterval(abortPoller)
}
activeStreams.delete(streamId)
if (chatId) {
if (redis) {
await releaseLock(getChatStreamLockKey(chatId), streamId).catch(() => false)
}
resolvePendingChatStream(chatId, streamId)
}
if (redis) {
await redis.del(getStreamAbortKey(streamId)).catch(() => {})
}
try {
controller.close()
} catch {
// Controller already closed from cancel() — safe to ignore
}
}
},
cancel() {
reqLogger.info('ReadableStream cancel received from client', {
streamId,
runId,
})
if (!clientDisconnected) {
clientDisconnected = true
if (!clientDisconnectedController.signal.aborted) {
clientDisconnectedController.abort()
}
}
if (eventWriter) {
eventWriter.flush().catch(() => {})
}
},
})
}
export const SSE_RESPONSE_HEADERS = {
...SSE_HEADERS,
'Content-Encoding': 'none',
} as const

View File

@@ -0,0 +1,63 @@
/**
* @vitest-environment node
*/
import { describe, expect, it } from 'vitest'
import { toDisplayMessage } from './display-message'
describe('display-message', () => {
it('maps canonical tool, subagent text, and cancelled complete blocks to display blocks', () => {
const display = toDisplayMessage({
id: 'msg-1',
role: 'assistant',
content: 'done',
timestamp: '2024-01-01T00:00:00.000Z',
requestId: 'req-1',
contentBlocks: [
{
type: 'tool',
phase: 'call',
toolCall: {
id: 'tool-1',
name: 'read',
state: 'cancelled',
display: { title: 'Stopped by user' },
},
},
{
type: 'text',
lane: 'subagent',
channel: 'assistant',
content: 'subagent output',
},
{
type: 'complete',
status: 'cancelled',
},
],
})
expect(display.contentBlocks).toEqual([
{
type: 'tool_call',
toolCall: {
id: 'tool-1',
name: 'read',
status: 'cancelled',
displayTitle: 'Stopped by user',
phaseLabel: undefined,
params: undefined,
calledBy: undefined,
result: undefined,
},
},
{
type: 'subagent_text',
content: 'subagent output',
},
{
type: 'stopped',
},
])
})
})

View File

@@ -0,0 +1,118 @@
import {
MothershipStreamV1CompletionStatus,
MothershipStreamV1EventType,
MothershipStreamV1SpanLifecycleEvent,
MothershipStreamV1ToolOutcome,
} from '@/lib/copilot/generated/mothership-stream-v1'
import {
type ChatMessage,
type ChatMessageAttachment,
type ChatMessageContext,
type ContentBlock,
ContentBlockType,
type ToolCallInfo,
ToolCallStatus,
} from '@/app/workspace/[workspaceId]/home/types'
import type { PersistedContentBlock, PersistedMessage } from './persisted-message'
const STATE_TO_STATUS: Record<string, ToolCallStatus> = {
[MothershipStreamV1ToolOutcome.success]: ToolCallStatus.success,
[MothershipStreamV1ToolOutcome.error]: ToolCallStatus.error,
[MothershipStreamV1ToolOutcome.cancelled]: ToolCallStatus.cancelled,
[MothershipStreamV1ToolOutcome.rejected]: ToolCallStatus.error,
[MothershipStreamV1ToolOutcome.skipped]: ToolCallStatus.success,
pending: ToolCallStatus.executing,
executing: ToolCallStatus.executing,
}
function toToolCallInfo(block: PersistedContentBlock): ToolCallInfo | undefined {
const tc = block.toolCall
if (!tc) return undefined
const status: ToolCallStatus = STATE_TO_STATUS[tc.state] ?? ToolCallStatus.error
return {
id: tc.id,
name: tc.name,
status,
displayTitle: status === ToolCallStatus.cancelled ? 'Stopped by user' : tc.display?.title,
phaseLabel: tc.display?.phaseLabel,
params: tc.params,
calledBy: tc.calledBy,
result: tc.result,
}
}
function toDisplayBlock(block: PersistedContentBlock): ContentBlock {
switch (block.type) {
case MothershipStreamV1EventType.text:
if (block.lane === 'subagent') {
return { type: ContentBlockType.subagent_text, content: block.content }
}
return { type: ContentBlockType.text, content: block.content }
case MothershipStreamV1EventType.tool:
return { type: ContentBlockType.tool_call, toolCall: toToolCallInfo(block) }
case MothershipStreamV1EventType.span:
if (block.lifecycle === MothershipStreamV1SpanLifecycleEvent.end) {
return { type: ContentBlockType.subagent_end }
}
return { type: ContentBlockType.subagent, content: block.content }
case MothershipStreamV1EventType.complete:
if (block.status === MothershipStreamV1CompletionStatus.cancelled) {
return { type: ContentBlockType.stopped }
}
return { type: ContentBlockType.text, content: block.content }
default:
return { type: ContentBlockType.text, content: block.content }
}
}
function toDisplayAttachment(f: PersistedMessage['fileAttachments']): ChatMessageAttachment[] {
if (!f || f.length === 0) return []
return f.map((a) => ({
id: a.id,
filename: a.filename,
media_type: a.media_type,
size: a.size,
previewUrl: a.media_type.startsWith('image/')
? `/api/files/serve/${encodeURIComponent(a.key)}?context=mothership`
: undefined,
}))
}
function toDisplayContexts(
contexts: PersistedMessage['contexts']
): ChatMessageContext[] | undefined {
if (!contexts || contexts.length === 0) return undefined
return contexts.map((c) => ({
kind: c.kind,
label: c.label,
...(c.workflowId ? { workflowId: c.workflowId } : {}),
...(c.knowledgeId ? { knowledgeId: c.knowledgeId } : {}),
...(c.tableId ? { tableId: c.tableId } : {}),
...(c.fileId ? { fileId: c.fileId } : {}),
}))
}
export function toDisplayMessage(msg: PersistedMessage): ChatMessage {
const display: ChatMessage = {
id: msg.id,
role: msg.role,
content: msg.content,
}
if (msg.requestId) {
display.requestId = msg.requestId
}
if (msg.contentBlocks && msg.contentBlocks.length > 0) {
display.contentBlocks = msg.contentBlocks.map(toDisplayBlock)
}
const attachments = toDisplayAttachment(msg.fileAttachments)
if (attachments.length > 0) {
display.attachments = attachments
}
display.contexts = toDisplayContexts(msg.contexts)
return display
}

View File

@@ -17,10 +17,6 @@ vi.mock('@/lib/billing/core/subscription', () => ({
getUserSubscriptionState: vi.fn(),
}))
vi.mock('@/lib/copilot/chat-context', () => ({
processFileAttachments: vi.fn(),
}))
vi.mock('@/lib/core/config/feature-flags', () => ({
isHosted: false,
}))
@@ -45,6 +41,12 @@ vi.mock('@/tools/registry', () => ({
name: 'Brandfetch Search',
description: 'Search for brands by company name',
},
// Catalog marks run_workflow as client / clientExecutable; registry ToolConfig has no executor fields.
run_workflow: {
id: 'run_workflow',
name: 'Run Workflow',
description: 'Run a workflow from the client',
},
},
}))
@@ -58,7 +60,7 @@ vi.mock('@/tools/params', () => ({
}))
import { getUserSubscriptionState } from '@/lib/billing/core/subscription'
import { buildIntegrationToolSchemas } from '@/lib/copilot/chat-payload'
import { buildIntegrationToolSchemas } from './payload'
const mockedGetUserSubscriptionState = getUserSubscriptionState as unknown as {
mockResolvedValue: (value: unknown) => void
@@ -102,4 +104,15 @@ describe('buildIntegrationToolSchemas', () => {
expect(gmailTool?.description).toBe('Send emails using Gmail')
expect(brandfetchTool?.description).toBe('Search for brands by company name')
})
it('emits executeLocally for dynamic client tools only', async () => {
mockedGetUserSubscriptionState.mockResolvedValue({ isFree: false })
const toolSchemas = await buildIntegrationToolSchemas('user-client')
const gmailTool = toolSchemas.find((tool) => tool.name === 'gmail_send')
const runTool = toolSchemas.find((tool) => tool.name === 'run_workflow')
expect(gmailTool?.executeLocally).toBe(false)
expect(runTool?.executeLocally).toBe(true)
})
})

View File

@@ -1,6 +1,7 @@
import { createLogger } from '@sim/logger'
import { getUserSubscriptionState } from '@/lib/billing/core/subscription'
import { getCopilotToolDescription } from '@/lib/copilot/tool-descriptions'
import { getToolEntry } from '@/lib/copilot/tool-executor/router'
import { getCopilotToolDescription } from '@/lib/copilot/tools/descriptions'
import { isHosted } from '@/lib/core/config/feature-flags'
import { createMcpToolId } from '@/lib/mcp/utils'
import { trackChatUpload } from '@/lib/uploads/contexts/workspace/workspace-file-manager'
@@ -10,7 +11,7 @@ import { getLatestVersionTools, stripVersionSuffix } from '@/tools/utils'
const logger = createLogger('CopilotChatPayload')
export interface BuildPayloadParams {
interface BuildPayloadParams {
message: string
workflowId?: string
workflowName?: string
@@ -60,16 +61,22 @@ export async function buildIntegrationToolSchemas(
const subscriptionState = await getUserSubscriptionState(userId)
shouldAppendEmailTagline = subscriptionState.isFree
} catch (error) {
reqLogger.warn('Failed to load subscription state for copilot tool descriptions', {
userId,
error: error instanceof Error ? error.message : String(error),
})
logger.warn(
messageId
? `Failed to load subscription state for copilot tool descriptions [messageId:${messageId}]`
: 'Failed to load subscription state for copilot tool descriptions',
{
userId,
error: error instanceof Error ? error.message : String(error),
}
)
}
for (const [toolId, toolConfig] of Object.entries(latestTools)) {
try {
const userSchema = createUserToolSchema(toolConfig)
const strippedName = stripVersionSuffix(toolId)
const catalogEntry = getToolEntry(strippedName)
integrationTools.push({
name: strippedName,
description: getCopilotToolDescription(toolConfig, {
@@ -79,6 +86,8 @@ export async function buildIntegrationToolSchemas(
}),
input_schema: userSchema as unknown as Record<string, unknown>,
defer_loading: true,
executeLocally:
catalogEntry?.clientExecutable === true || catalogEntry?.executor === 'client',
...(toolConfig.oauth?.required && {
oauth: {
required: true,
@@ -87,16 +96,26 @@ export async function buildIntegrationToolSchemas(
}),
})
} catch (toolError) {
reqLogger.warn('Failed to build schema for tool, skipping', {
toolId,
error: toolError instanceof Error ? toolError.message : String(toolError),
})
logger.warn(
messageId
? `Failed to build schema for tool, skipping [messageId:${messageId}]`
: 'Failed to build schema for tool, skipping',
{
toolId,
error: toolError instanceof Error ? toolError.message : String(toolError),
}
)
}
}
} catch (error) {
reqLogger.warn('Failed to build tool schemas', {
error: error instanceof Error ? error.message : String(error),
})
logger.warn(
messageId
? `Failed to build tool schemas [messageId:${messageId}]`
: 'Failed to build tool schemas',
{
error: error instanceof Error ? error.message : String(error),
}
)
}
return integrationTools
}
@@ -192,16 +211,27 @@ export async function buildCopilotRequestPayload(
description:
mcpTool.description || `MCP tool: ${mcpTool.name} (${mcpTool.serverName})`,
input_schema: mcpTool.inputSchema as unknown as Record<string, unknown>,
executeLocally: false,
})
}
if (mcpTools.length > 0) {
payloadLogger.info('Added MCP tools to copilot payload', { count: mcpTools.length })
logger.error(
userMessageId
? `Added MCP tools to copilot payload [messageId:${userMessageId}]`
: 'Added MCP tools to copilot payload',
{ count: mcpTools.length }
)
}
}
} catch (error) {
payloadLogger.warn('Failed to discover MCP tools for copilot', {
error: error instanceof Error ? error.message : String(error),
})
logger.warn(
userMessageId
? `Failed to discover MCP tools for copilot [messageId:${userMessageId}]`
: 'Failed to discover MCP tools for copilot',
{
error: error instanceof Error ? error.message : String(error),
}
)
}
}
}

View File

@@ -0,0 +1,122 @@
/**
* @vitest-environment node
*/
import { describe, expect, it } from 'vitest'
import type { OrchestratorResult } from '@/lib/copilot/request/types'
import {
buildPersistedAssistantMessage,
buildPersistedUserMessage,
normalizeMessage,
} from './persisted-message'
describe('persisted-message', () => {
it('round-trips canonical tool blocks through normalizeMessage', () => {
const result: OrchestratorResult = {
success: true,
content: 'done',
requestId: 'req-1',
contentBlocks: [
{
type: 'tool_call',
timestamp: Date.now(),
calledBy: 'build',
toolCall: {
id: 'tool-1',
name: 'read',
status: 'success',
params: { path: 'foo.txt' },
result: { success: true, output: { ok: true } },
},
},
],
toolCalls: [],
}
const persisted = buildPersistedAssistantMessage(result)
const normalized = normalizeMessage(persisted as unknown as Record<string, unknown>)
expect(normalized.contentBlocks).toEqual([
{
type: 'tool',
phase: 'call',
toolCall: {
id: 'tool-1',
name: 'read',
state: 'success',
params: { path: 'foo.txt' },
result: { success: true, output: { ok: true } },
calledBy: 'build',
},
},
{
type: 'text',
channel: 'assistant',
content: 'done',
},
])
})
it('normalizes legacy tool_call and top-level toolCalls shapes', () => {
const normalized = normalizeMessage({
id: 'msg-1',
role: 'assistant',
content: 'hello',
timestamp: '2024-01-01T00:00:00.000Z',
contentBlocks: [
{
type: 'tool_call',
toolCall: {
id: 'tool-1',
name: 'read',
state: 'cancelled',
display: { text: 'Stopped by user' },
},
},
],
toolCalls: [
{
id: 'tool-2',
name: 'glob',
status: 'success',
result: { matches: [] },
},
],
})
expect(normalized.contentBlocks).toEqual([
{
type: 'tool',
phase: 'call',
toolCall: {
id: 'tool-1',
name: 'read',
state: 'cancelled',
display: { title: 'Stopped by user' },
},
},
{
type: 'text',
channel: 'assistant',
content: 'hello',
},
])
})
it('builds normalized user messages with stripped optional empties', () => {
const msg = buildPersistedUserMessage({
id: 'user-1',
content: 'hello',
fileAttachments: [],
contexts: [],
})
expect(msg).toMatchObject({
id: 'user-1',
role: 'user',
content: 'hello',
})
expect(msg.fileAttachments).toBeUndefined()
expect(msg.contexts).toBeUndefined()
})
})

View File

@@ -0,0 +1,469 @@
import {
MothershipStreamV1CompletionStatus,
MothershipStreamV1EventType,
MothershipStreamV1SpanLifecycleEvent,
MothershipStreamV1SpanPayloadKind,
type MothershipStreamV1StreamScope,
MothershipStreamV1TextChannel,
MothershipStreamV1ToolOutcome,
MothershipStreamV1ToolPhase,
} from '@/lib/copilot/generated/mothership-stream-v1'
import type {
ContentBlock,
LocalToolCallStatus,
OrchestratorResult,
} from '@/lib/copilot/request/types'
export type PersistedToolState = LocalToolCallStatus | MothershipStreamV1ToolOutcome
export interface PersistedToolCall {
id: string
name: string
state: PersistedToolState
params?: Record<string, unknown>
result?: { success: boolean; output?: unknown; error?: string }
error?: string
calledBy?: string
durationMs?: number
display?: { title?: string; phaseLabel?: string }
}
export interface PersistedContentBlock {
type: MothershipStreamV1EventType
lane?: MothershipStreamV1StreamScope['lane']
channel?: MothershipStreamV1TextChannel
phase?: MothershipStreamV1ToolPhase
kind?: MothershipStreamV1SpanPayloadKind
lifecycle?: MothershipStreamV1SpanLifecycleEvent
status?: MothershipStreamV1CompletionStatus
content?: string
toolCall?: PersistedToolCall
}
export interface PersistedFileAttachment {
id: string
key: string
filename: string
media_type: string
size: number
}
export interface PersistedMessageContext {
kind: string
label: string
workflowId?: string
knowledgeId?: string
tableId?: string
fileId?: string
}
export interface PersistedMessage {
id: string
role: 'user' | 'assistant'
content: string
timestamp: string
requestId?: string
contentBlocks?: PersistedContentBlock[]
fileAttachments?: PersistedFileAttachment[]
contexts?: PersistedMessageContext[]
}
// ---------------------------------------------------------------------------
// Write: OrchestratorResult → PersistedMessage
// ---------------------------------------------------------------------------
function resolveToolState(block: ContentBlock): PersistedToolState {
const tc = block.toolCall
if (!tc) return 'pending'
if (tc.result?.success !== undefined) {
return tc.result.success
? MothershipStreamV1ToolOutcome.success
: MothershipStreamV1ToolOutcome.error
}
return tc.status as PersistedToolState
}
function mapContentBlock(block: ContentBlock): PersistedContentBlock {
switch (block.type) {
case 'text':
return {
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.assistant,
content: block.content,
}
case 'thinking':
return {
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.thinking,
content: block.content,
}
case 'subagent':
return {
type: MothershipStreamV1EventType.span,
kind: MothershipStreamV1SpanPayloadKind.subagent,
lifecycle: MothershipStreamV1SpanLifecycleEvent.start,
content: block.content,
}
case 'subagent_text':
return {
type: MothershipStreamV1EventType.text,
lane: 'subagent',
channel: MothershipStreamV1TextChannel.assistant,
content: block.content,
}
case 'tool_call': {
if (!block.toolCall) {
return {
type: MothershipStreamV1EventType.tool,
phase: MothershipStreamV1ToolPhase.call,
content: block.content,
}
}
const state = resolveToolState(block)
const isSubagentTool = !!block.calledBy
const isNonTerminal =
state === MothershipStreamV1ToolOutcome.cancelled ||
state === 'pending' ||
state === 'executing'
const toolCall: PersistedToolCall = {
id: block.toolCall.id,
name: block.toolCall.name,
state,
...(isSubagentTool && isNonTerminal ? {} : { result: block.toolCall.result }),
...(isSubagentTool && isNonTerminal
? {}
: block.toolCall.params
? { params: block.toolCall.params }
: {}),
...(block.calledBy ? { calledBy: block.calledBy } : {}),
}
return {
type: MothershipStreamV1EventType.tool,
phase: MothershipStreamV1ToolPhase.call,
toolCall,
}
}
default:
return { type: MothershipStreamV1EventType.text, content: block.content }
}
}
export function buildPersistedAssistantMessage(
result: OrchestratorResult,
requestId?: string
): PersistedMessage {
const message: PersistedMessage = {
id: crypto.randomUUID(),
role: 'assistant',
content: result.content,
timestamp: new Date().toISOString(),
}
if (requestId || result.requestId) {
message.requestId = requestId || result.requestId
}
if (result.contentBlocks.length > 0) {
message.contentBlocks = result.contentBlocks.map(mapContentBlock)
}
return message
}
export interface UserMessageParams {
id: string
content: string
fileAttachments?: PersistedFileAttachment[]
contexts?: PersistedMessageContext[]
}
export function buildPersistedUserMessage(params: UserMessageParams): PersistedMessage {
const message: PersistedMessage = {
id: params.id,
role: 'user',
content: params.content,
timestamp: new Date().toISOString(),
}
if (params.fileAttachments && params.fileAttachments.length > 0) {
message.fileAttachments = params.fileAttachments
}
if (params.contexts && params.contexts.length > 0) {
message.contexts = params.contexts.map((c) => ({
kind: c.kind,
label: c.label,
...(c.workflowId ? { workflowId: c.workflowId } : {}),
...(c.knowledgeId ? { knowledgeId: c.knowledgeId } : {}),
...(c.tableId ? { tableId: c.tableId } : {}),
...(c.fileId ? { fileId: c.fileId } : {}),
}))
}
return message
}
// ---------------------------------------------------------------------------
// Read: raw JSONB → PersistedMessage
// Handles both canonical (type: 'tool', 'text', 'span', 'complete') and
// legacy (type: 'tool_call', 'thinking', 'subagent', 'stopped') blocks.
// ---------------------------------------------------------------------------
const CANONICAL_BLOCK_TYPES: Set<string> = new Set(Object.values(MothershipStreamV1EventType))
interface RawBlock {
type: string
lane?: string
content?: string
channel?: string
phase?: string
kind?: string
lifecycle?: string
status?: string
toolCall?: {
id?: string
name?: string
state?: string
params?: Record<string, unknown>
result?: { success: boolean; output?: unknown; error?: string }
display?: { text?: string; title?: string; phaseLabel?: string }
calledBy?: string
durationMs?: number
error?: string
} | null
}
interface LegacyToolCall {
id: string
name: string
status: string
params?: Record<string, unknown>
result?: unknown
error?: string
durationMs?: number
}
const OUTCOME_NORMALIZATION: Record<string, PersistedToolState> = {
[MothershipStreamV1ToolOutcome.success]: MothershipStreamV1ToolOutcome.success,
[MothershipStreamV1ToolOutcome.error]: MothershipStreamV1ToolOutcome.error,
[MothershipStreamV1ToolOutcome.cancelled]: MothershipStreamV1ToolOutcome.cancelled,
[MothershipStreamV1ToolOutcome.skipped]: MothershipStreamV1ToolOutcome.skipped,
[MothershipStreamV1ToolOutcome.rejected]: MothershipStreamV1ToolOutcome.rejected,
pending: 'pending',
executing: 'executing',
}
function normalizeToolState(state: string | undefined): PersistedToolState {
if (!state) return 'pending'
return OUTCOME_NORMALIZATION[state] ?? MothershipStreamV1ToolOutcome.error
}
function isCanonicalBlock(block: RawBlock): boolean {
return CANONICAL_BLOCK_TYPES.has(block.type)
}
function normalizeCanonicalBlock(block: RawBlock): PersistedContentBlock {
const result: PersistedContentBlock = {
type: block.type as MothershipStreamV1EventType,
}
if (block.lane === 'main' || block.lane === 'subagent') {
result.lane = block.lane
}
if (block.content !== undefined) result.content = block.content
if (block.channel) result.channel = block.channel as MothershipStreamV1TextChannel
if (block.phase) result.phase = block.phase as MothershipStreamV1ToolPhase
if (block.kind) result.kind = block.kind as MothershipStreamV1SpanPayloadKind
if (block.lifecycle) result.lifecycle = block.lifecycle as MothershipStreamV1SpanLifecycleEvent
if (block.status) result.status = block.status as MothershipStreamV1CompletionStatus
if (block.toolCall) {
result.toolCall = {
id: block.toolCall.id ?? '',
name: block.toolCall.name ?? '',
state: normalizeToolState(block.toolCall.state),
...(block.toolCall.params ? { params: block.toolCall.params } : {}),
...(block.toolCall.result ? { result: block.toolCall.result } : {}),
...(block.toolCall.calledBy ? { calledBy: block.toolCall.calledBy } : {}),
...(block.toolCall.error ? { error: block.toolCall.error } : {}),
...(block.toolCall.durationMs ? { durationMs: block.toolCall.durationMs } : {}),
...(block.toolCall.display
? {
display: {
title: block.toolCall.display.title ?? block.toolCall.display.text,
phaseLabel: block.toolCall.display.phaseLabel,
},
}
: {}),
}
}
return result
}
function normalizeLegacyBlock(block: RawBlock): PersistedContentBlock {
if (block.type === 'tool_call' && block.toolCall) {
return {
type: MothershipStreamV1EventType.tool,
phase: MothershipStreamV1ToolPhase.call,
toolCall: {
id: block.toolCall.id ?? '',
name: block.toolCall.name ?? '',
state: normalizeToolState(block.toolCall.state),
...(block.toolCall.params ? { params: block.toolCall.params } : {}),
...(block.toolCall.result ? { result: block.toolCall.result } : {}),
...(block.toolCall.calledBy ? { calledBy: block.toolCall.calledBy } : {}),
...(block.toolCall.display ? { display: { title: block.toolCall.display.text } } : {}),
},
}
}
if (block.type === 'thinking') {
return {
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.thinking,
content: block.content,
}
}
if (block.type === 'subagent' || block.type === 'subagent_text') {
if (block.type === 'subagent_text') {
return {
type: MothershipStreamV1EventType.text,
lane: 'subagent',
channel: MothershipStreamV1TextChannel.assistant,
content: block.content,
}
}
return {
type: MothershipStreamV1EventType.span,
kind: MothershipStreamV1SpanPayloadKind.subagent,
lifecycle: MothershipStreamV1SpanLifecycleEvent.start,
content: block.content,
}
}
if (block.type === 'subagent_end') {
return {
type: MothershipStreamV1EventType.span,
kind: MothershipStreamV1SpanPayloadKind.subagent,
lifecycle: MothershipStreamV1SpanLifecycleEvent.end,
}
}
if (block.type === 'stopped') {
return {
type: MothershipStreamV1EventType.complete,
status: MothershipStreamV1CompletionStatus.cancelled,
}
}
return {
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.assistant,
content: block.content,
}
}
function normalizeBlock(block: RawBlock): PersistedContentBlock {
return isCanonicalBlock(block) ? normalizeCanonicalBlock(block) : normalizeLegacyBlock(block)
}
function normalizeLegacyToolCall(tc: LegacyToolCall): PersistedContentBlock {
const state = normalizeToolState(tc.status)
return {
type: MothershipStreamV1EventType.tool,
phase: MothershipStreamV1ToolPhase.call,
toolCall: {
id: tc.id,
name: tc.name,
state,
...(tc.params ? { params: tc.params } : {}),
...(tc.result != null
? {
result: {
success: tc.status === MothershipStreamV1ToolOutcome.success,
output: tc.result,
...(tc.error ? { error: tc.error } : {}),
},
}
: {}),
...(tc.durationMs ? { durationMs: tc.durationMs } : {}),
},
}
}
function blocksContainTools(blocks: RawBlock[]): boolean {
return blocks.some((b) => b.type === 'tool_call' || b.type === MothershipStreamV1EventType.tool)
}
function normalizeBlocks(rawBlocks: RawBlock[], messageContent: string): PersistedContentBlock[] {
const blocks = rawBlocks.map(normalizeBlock)
const hasAssistantText = blocks.some(
(b) =>
b.type === MothershipStreamV1EventType.text &&
b.channel !== MothershipStreamV1TextChannel.thinking &&
b.content?.trim()
)
if (!hasAssistantText && messageContent.trim()) {
blocks.push({
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.assistant,
content: messageContent,
})
}
return blocks
}
export function normalizeMessage(raw: Record<string, unknown>): PersistedMessage {
const msg: PersistedMessage = {
id: (raw.id as string) ?? crypto.randomUUID(),
role: (raw.role as 'user' | 'assistant') ?? 'assistant',
content: (raw.content as string) ?? '',
timestamp: (raw.timestamp as string) ?? new Date().toISOString(),
}
if (raw.requestId && typeof raw.requestId === 'string') {
msg.requestId = raw.requestId
}
const rawBlocks = raw.contentBlocks as RawBlock[] | undefined
const rawToolCalls = raw.toolCalls as LegacyToolCall[] | undefined
const hasBlocks = Array.isArray(rawBlocks) && rawBlocks.length > 0
const hasToolCalls = Array.isArray(rawToolCalls) && rawToolCalls.length > 0
if (hasBlocks) {
msg.contentBlocks = normalizeBlocks(rawBlocks!, msg.content)
const contentBlocksAlreadyContainTools = blocksContainTools(rawBlocks!)
if (hasToolCalls && !contentBlocksAlreadyContainTools) {
msg.contentBlocks.push(...rawToolCalls!.map(normalizeLegacyToolCall))
}
} else if (hasToolCalls) {
msg.contentBlocks = rawToolCalls!.map(normalizeLegacyToolCall)
if (msg.content.trim()) {
msg.contentBlocks.push({
type: MothershipStreamV1EventType.text,
channel: MothershipStreamV1TextChannel.assistant,
content: msg.content,
})
}
}
const rawAttachments = raw.fileAttachments as PersistedFileAttachment[] | undefined
if (Array.isArray(rawAttachments) && rawAttachments.length > 0) {
msg.fileAttachments = rawAttachments
}
const rawContexts = raw.contexts as PersistedMessageContext[] | undefined
if (Array.isArray(rawContexts) && rawContexts.length > 0) {
msg.contexts = rawContexts.map((c) => ({
kind: c.kind,
label: c.label,
...(c.workflowId ? { workflowId: c.workflowId } : {}),
...(c.knowledgeId ? { knowledgeId: c.knowledgeId } : {}),
...(c.tableId ? { tableId: c.tableId } : {}),
...(c.fileId ? { fileId: c.fileId } : {}),
}))
}
return msg
}

Some files were not shown because too many files have changed in this diff Show More