Compare commits

..

43 Commits

Author SHA1 Message Date
Siddharth Ganesan
2a7ebfb396 Add opus 4.6 2026-02-06 17:31:52 -08:00
Siddharth Ganesan
ca76e38e8c Fix copilot mcp tool names to be sim prefixed 2026-02-06 16:37:19 -08:00
Siddharth Ganesan
dad6fd68fa Clean up mcp 2026-02-06 16:11:02 -08:00
Siddharth Ganesan
d1a2d661c9 Updates 2026-02-06 14:25:51 -08:00
Siddharth Ganesan
f63ed61bc8 Fix 2026-02-06 13:54:08 -08:00
Siddharth Ganesan
0f5eb9d351 Fix mcp 2026-02-06 13:45:52 -08:00
Siddharth Ganesan
665cc6a3d1 Fix lint 2026-02-06 12:38:31 -08:00
Siddharth Ganesan
a7341cdcd3 Add copilot mcp tracking 2026-02-06 12:38:16 -08:00
Siddharth Ganesan
92efd817d2 Add tools 2026-02-06 12:37:48 -08:00
Siddharth Ganesan
3d5321d9a1 Add tools 2026-02-06 12:37:48 -08:00
Siddharth Ganesan
13c8621513 Fix definitions 2026-02-06 12:37:48 -08:00
Siddharth Ganesan
529d382d49 Add respond to subagents 2026-02-06 12:37:48 -08:00
Siddharth Ganesan
fe70beb751 Updated mcp 2026-02-06 12:37:48 -08:00
Siddharth Ganesan
529233bfb6 Clean up mcp 2026-02-06 12:37:48 -08:00
Siddharth Ganesan
43a32a627f Fix mcp 2026-02-06 12:37:47 -08:00
Siddharth Ganesan
ea7a07a0d0 Fix - cursor makes me sad 2026-02-06 12:37:47 -08:00
Siddharth Ganesan
3f3d5b276d Refactor complete - no testing yet 2026-02-06 12:37:47 -08:00
Siddharth Ganesan
ef4cae48f2 Refactor 2026-02-06 12:37:47 -08:00
Siddharth Ganesan
460935c032 Continued cleanup 2026-02-06 12:37:47 -08:00
Siddharth Ganesan
5fc5f46733 First cleanup pass complete - untested 2026-02-06 12:37:46 -08:00
Siddharth Ganesan
8d70132a4b Tool refactor 2026-02-06 12:37:46 -08:00
Siddharth Ganesan
c045580230 Initial test shows diff store still working 2026-02-06 12:37:46 -08:00
Siddharth Ganesan
bff3f03ba6 Initial temp state, in the middle of a refactor 2026-02-06 12:37:46 -08:00
Siddharth Ganesan
c20a5633bf Checkpoint 2026-02-06 12:37:46 -08:00
Siddharth Ganesan
1c23805782 Fix lint 2026-02-06 12:37:46 -08:00
Siddharth Ganesan
bb6b182d24 Fix stream buffer 2026-02-06 12:37:46 -08:00
Siddharth Ganesan
b7aaa53300 LUAAAA 2026-02-06 12:37:46 -08:00
Siddharth Ganesan
8d477c0bed Fix edge issue 2026-02-06 12:37:46 -08:00
Siddharth Ganesan
01371c8809 Streaming seems to work but copilot is dumb 2026-02-06 12:37:46 -08:00
Siddharth Ganesan
490b6bde08 Things are broken 2026-02-06 12:37:45 -08:00
Siddharth Ganesan
9073c1a0bf Fix abort 2026-02-06 12:37:45 -08:00
Siddharth Ganesan
d0329e14e5 Streaming 2026-02-06 12:37:45 -08:00
Siddharth Ganesan
d1b2e6c757 Checkpoint 2026-02-06 12:37:45 -08:00
Siddharth Ganesan
decc19e73b BROKEN 2026-02-06 12:37:45 -08:00
Siddharth Ganesan
d79fcab659 Fix 2026-02-06 12:37:45 -08:00
Siddharth Ganesan
c72e244655 Improvement 2026-02-06 12:37:45 -08:00
Siddharth Ganesan
7bb3dd6103 mcp v1 2026-02-06 12:37:45 -08:00
Siddharth Ganesan
75b62423bc Add mcp 2026-02-06 12:37:45 -08:00
Siddharth Ganesan
565167d3b3 Stuff 2026-02-06 12:37:45 -08:00
Siddharth Ganesan
9ff5237a2e Ss tests 2026-02-06 12:37:45 -08:00
Siddharth Ganesan
e9b80c566c Basic ss tes 2026-02-06 12:37:45 -08:00
Siddharth Ganesan
664ce3168c v1 2026-02-06 12:37:45 -08:00
Siddharth Ganesan
5d82f7ae73 v0 2026-02-06 12:37:45 -08:00
268 changed files with 29152 additions and 18505 deletions

View File

@@ -5462,24 +5462,3 @@ export function EnrichSoIcon(props: SVGProps<SVGSVGElement>) {
</svg> </svg>
) )
} }
export function AgentSkillsIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg
{...props}
xmlns='http://www.w3.org/2000/svg'
width='16'
height='16'
viewBox='0 0 16 16'
fill='none'
>
<path
d='M8 1L14.0622 4.5V11.5L8 15L1.93782 11.5V4.5L8 1Z'
stroke='currentColor'
strokeWidth='1.5'
fill='none'
/>
<path d='M8 4.5L11 6.25V9.75L8 11.5L5 9.75V6.25L8 4.5Z' fill='currentColor' />
</svg>
)
}

View File

@@ -56,7 +56,7 @@ Switch between modes using the mode selector at the bottom of the input area.
Select your preferred AI model using the model selector at the bottom right of the input area. Select your preferred AI model using the model selector at the bottom right of the input area.
**Available Models:** **Available Models:**
- Claude 4.5 Opus, Sonnet (default), Haiku - Claude 4.6 Opus (default), 4.5 Opus, Sonnet, Haiku
- GPT 5.2 Codex, Pro - GPT 5.2 Codex, Pro
- Gemini 3 Pro - Gemini 3 Pro

View File

@@ -18,9 +18,7 @@ This means you can attach many skills to an agent without bloating its context w
## Creating Skills ## Creating Skills
Go to **Settings** and select **Skills** under the Tools section. Go to **Settings** (gear icon) and select **Skills** under the Tools section.
![Manage Skills](/static/skills/manage-skills.png)
Click **Add** to create a new skill with three fields: Click **Add** to create a new skill with three fields:
@@ -54,22 +52,11 @@ Use when the user asks you to write, optimize, or debug SQL queries.
... ...
``` ```
**Recommended structure:**
- **When to use** — Specific triggers and scenarios
- **Instructions** — Step-by-step guidance with numbered lists
- **Examples** — Input/output samples showing expected behavior
- **Common Patterns** — Reusable approaches for frequent tasks
- **Edge Cases** — Gotchas and special considerations
Keep skills focused and under 500 lines. If a skill grows too large, split it into multiple specialized skills.
## Adding Skills to an Agent ## Adding Skills to an Agent
Open any **Agent** block and find the **Skills** dropdown below the tools section. Select the skills you want the agent to have access to. Open any **Agent** block and find the **Skills** dropdown below the tools section. Select the skills you want the agent to have access to.
![Add Skill](/static/skills/add-skill.png) Selected skills appear as chips that you can click to edit or remove.
Selected skills appear as cards that you can click to edit or remove.
### What Happens at Runtime ### What Happens at Runtime
@@ -82,50 +69,12 @@ When the workflow runs:
This works across all supported LLM providers — the `load_skill` tool uses standard tool-calling, so no provider-specific configuration is needed. This works across all supported LLM providers — the `load_skill` tool uses standard tool-calling, so no provider-specific configuration is needed.
## Common Use Cases ## Tips
Skills are most valuable when agents need specialized knowledge or multi-step workflows: - **Keep descriptions actionable** — Instead of "Helps with SQL", write "Write optimized SQL queries for PostgreSQL, MySQL, and SQLite, including index recommendations and query plan analysis"
**Domain Expertise**
- `api-integration-expert` — Best practices for calling specific APIs (authentication, rate limiting, error handling)
- `data-transformation` — ETL patterns, data cleaning, and validation rules
- `code-reviewer` — Code review guidelines specific to your team's standards
**Workflow Templates**
- `bug-investigation` — Step-by-step debugging methodology (reproduce → isolate → test → fix)
- `feature-implementation` — Development workflow from requirements to deployment
- `document-generator` — Templates and formatting rules for technical documentation
**Company-Specific Knowledge**
- `our-architecture` — System architecture diagrams, service dependencies, and deployment processes
- `style-guide` — Brand guidelines, writing tone, UI/UX patterns
- `customer-onboarding` — Standard procedures and common customer questions
**When to use skills vs. agent instructions:**
- Use **skills** for knowledge that applies across multiple workflows or changes frequently
- Use **agent instructions** for task-specific context that's unique to a single agent
## Best Practices
**Writing Effective Descriptions**
- **Be specific and keyword-rich** — Instead of "Helps with SQL", write "Write optimized SQL queries for PostgreSQL, MySQL, and SQLite, including index recommendations and query plan analysis"
- **Include activation triggers** — Mention specific words or phrases that should prompt the skill (e.g., "Use when the user mentions PDFs, forms, or document extraction")
- **Keep it under 200 words** — Agents scan descriptions quickly; make every word count
**Skill Scope and Organization**
- **One skill per domain** — A focused `sql-expert` skill works better than a broad `database-everything` skill - **One skill per domain** — A focused `sql-expert` skill works better than a broad `database-everything` skill
- **Limit to 5-10 skills per agent** — More skills = more decision overhead; start small and add as needed - **Use markdown structure** — Headers, lists, and code blocks help the agent parse and follow instructions
- **Split large skills** — If a skill exceeds 500 lines, break it into focused sub-skills - **Test iteratively** — Run your workflow and check if the agent activates the skill when expected
**Content Structure**
- **Use markdown formatting** — Headers, lists, and code blocks help agents parse and follow instructions
- **Provide examples** — Show input/output pairs so agents understand expected behavior
- **Be explicit about edge cases** — Don't assume agents will infer special handling
**Testing and Iteration**
- **Test activation** — Run your workflow and verify the agent loads the skill when expected
- **Check for false positives** — Make sure skills aren't activating when they shouldn't
- **Refine descriptions** — If a skill isn't loading when needed, add more keywords to the description
## Learn More ## Learn More

View File

@@ -10,21 +10,6 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
color="#6366F1" color="#6366F1"
/> />
{/* MANUAL-CONTENT-START:intro */}
[Airweave](https://airweave.ai/) is an AI-powered semantic search platform that helps you discover and retrieve knowledge across all your synced data sources. Built for modern teams, Airweave enables fast, relevant search results using neural, hybrid, or keyword-based strategies tailored to your needs.
With Airweave, you can:
- **Search smarter**: Use natural language queries to uncover information stored across your connected tools and databases
- **Unify your data**: Seamlessly access content from sources like code, docs, chat, emails, cloud files, and more
- **Customize retrieval**: Select between hybrid (semantic + keyword), neural, or keyword search strategies for optimal results
- **Boost recall**: Expand search queries with AI to find more comprehensive answers
- **Rerank results using AI**: Prioritize the most relevant answers with powerful language models
- **Get instant answers**: Generate clear, AI-powered responses synthesized from your data
In Sim, the Airweave integration empowers your agents to search, summarize, and extract insights from all your organizations data via a single tool. Use Airweave to drive rich, contextual knowledge retrieval within your workflows—whether answering questions, generating summaries, or supporting dynamic decision-making.
{/* MANUAL-CONTENT-END */}
## Usage Instructions ## Usage Instructions
Search across your synced data sources using Airweave. Supports semantic search with hybrid, neural, or keyword retrieval strategies. Optionally generate AI-powered answers from search results. Search across your synced data sources using Airweave. Supports semantic search with hybrid, neural, or keyword retrieval strategies. Optionally generate AI-powered answers from search results.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

View File

@@ -0,0 +1,6 @@
import { type NextRequest, NextResponse } from 'next/server'
import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery'
export async function GET(request: NextRequest): Promise<NextResponse> {
return createMcpAuthorizationServerMetadataResponse(request)
}

View File

@@ -0,0 +1,6 @@
import { type NextRequest, NextResponse } from 'next/server'
import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery'
export async function GET(request: NextRequest): Promise<NextResponse> {
return createMcpAuthorizationServerMetadataResponse(request)
}

View File

@@ -0,0 +1,6 @@
import { type NextRequest, NextResponse } from 'next/server'
import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery'
export async function GET(request: NextRequest): Promise<NextResponse> {
return createMcpAuthorizationServerMetadataResponse(request)
}

View File

@@ -0,0 +1,6 @@
import { type NextRequest, NextResponse } from 'next/server'
import { createMcpProtectedResourceMetadataResponse } from '@/lib/mcp/oauth-discovery'
export async function GET(request: NextRequest): Promise<NextResponse> {
return createMcpProtectedResourceMetadataResponse(request)
}

View File

@@ -0,0 +1,6 @@
import { type NextRequest, NextResponse } from 'next/server'
import { createMcpProtectedResourceMetadataResponse } from '@/lib/mcp/oauth-discovery'
export async function GET(request: NextRequest): Promise<NextResponse> {
return createMcpProtectedResourceMetadataResponse(request)
}

View File

@@ -5,7 +5,7 @@ import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { generateAgentCard, generateSkillsFromWorkflow } from '@/lib/a2a/agent-card' import { generateAgentCard, generateSkillsFromWorkflow } from '@/lib/a2a/agent-card'
import type { AgentCapabilities, AgentSkill } from '@/lib/a2a/types' import type { AgentCapabilities, AgentSkill } from '@/lib/a2a/types'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { getRedisClient } from '@/lib/core/config/redis' import { getRedisClient } from '@/lib/core/config/redis'
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils' import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils'
import { checkWorkspaceAccess } from '@/lib/workspaces/permissions/utils' import { checkWorkspaceAccess } from '@/lib/workspaces/permissions/utils'
@@ -40,7 +40,7 @@ export async function GET(request: NextRequest, { params }: { params: Promise<Ro
} }
if (!agent.agent.isPublished) { if (!agent.agent.isPublished) {
const auth = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const auth = await checkHybridAuth(request, { requireWorkflowId: false })
if (!auth.success) { if (!auth.success) {
return NextResponse.json({ error: 'Agent not published' }, { status: 404 }) return NextResponse.json({ error: 'Agent not published' }, { status: 404 })
} }
@@ -81,7 +81,7 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<Ro
const { agentId } = await params const { agentId } = await params
try { try {
const auth = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const auth = await checkHybridAuth(request, { requireWorkflowId: false })
if (!auth.success || !auth.userId) { if (!auth.success || !auth.userId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
} }
@@ -151,7 +151,7 @@ export async function DELETE(request: NextRequest, { params }: { params: Promise
const { agentId } = await params const { agentId } = await params
try { try {
const auth = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const auth = await checkHybridAuth(request, { requireWorkflowId: false })
if (!auth.success || !auth.userId) { if (!auth.success || !auth.userId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
} }
@@ -189,7 +189,7 @@ export async function POST(request: NextRequest, { params }: { params: Promise<R
const { agentId } = await params const { agentId } = await params
try { try {
const auth = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const auth = await checkHybridAuth(request, { requireWorkflowId: false })
if (!auth.success || !auth.userId) { if (!auth.success || !auth.userId) {
logger.warn('A2A agent publish auth failed:', { error: auth.error, hasUserId: !!auth.userId }) logger.warn('A2A agent publish auth failed:', { error: auth.error, hasUserId: !!auth.userId })
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })

View File

@@ -13,7 +13,7 @@ import { v4 as uuidv4 } from 'uuid'
import { generateSkillsFromWorkflow } from '@/lib/a2a/agent-card' import { generateSkillsFromWorkflow } from '@/lib/a2a/agent-card'
import { A2A_DEFAULT_CAPABILITIES } from '@/lib/a2a/constants' import { A2A_DEFAULT_CAPABILITIES } from '@/lib/a2a/constants'
import { sanitizeAgentName } from '@/lib/a2a/utils' import { sanitizeAgentName } from '@/lib/a2a/utils'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils' import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils'
import { hasValidStartBlockInState } from '@/lib/workflows/triggers/trigger-utils' import { hasValidStartBlockInState } from '@/lib/workflows/triggers/trigger-utils'
import { getWorkspaceById } from '@/lib/workspaces/permissions/utils' import { getWorkspaceById } from '@/lib/workspaces/permissions/utils'
@@ -27,7 +27,7 @@ export const dynamic = 'force-dynamic'
*/ */
export async function GET(request: NextRequest) { export async function GET(request: NextRequest) {
try { try {
const auth = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const auth = await checkHybridAuth(request, { requireWorkflowId: false })
if (!auth.success || !auth.userId) { if (!auth.success || !auth.userId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
} }
@@ -87,7 +87,7 @@ export async function GET(request: NextRequest) {
*/ */
export async function POST(request: NextRequest) { export async function POST(request: NextRequest) {
try { try {
const auth = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const auth = await checkHybridAuth(request, { requireWorkflowId: false })
if (!auth.success || !auth.userId) { if (!auth.success || !auth.userId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
} }

View File

@@ -5,7 +5,7 @@ import { and, eq } from 'drizzle-orm'
import { jwtDecode } from 'jwt-decode' import { jwtDecode } from 'jwt-decode'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
import { evaluateScopeCoverage, type OAuthProvider, parseProvider } from '@/lib/oauth' import { evaluateScopeCoverage, type OAuthProvider, parseProvider } from '@/lib/oauth'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils' import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
@@ -81,7 +81,7 @@ export async function GET(request: NextRequest) {
const { provider: providerParam, workflowId, credentialId } = parseResult.data const { provider: providerParam, workflowId, credentialId } = parseResult.data
// Authenticate requester (supports session, API key, internal JWT) // Authenticate requester (supports session, API key, internal JWT)
const authResult = await checkSessionOrInternalAuth(request) const authResult = await checkHybridAuth(request)
if (!authResult.success || !authResult.userId) { if (!authResult.success || !authResult.userId) {
logger.warn(`[${requestId}] Unauthenticated credentials request rejected`) logger.warn(`[${requestId}] Unauthenticated credentials request rejected`)
return NextResponse.json({ error: 'User not authenticated' }, { status: 401 }) return NextResponse.json({ error: 'User not authenticated' }, { status: 401 })

View File

@@ -12,7 +12,7 @@ describe('OAuth Token API Routes', () => {
const mockRefreshTokenIfNeeded = vi.fn() const mockRefreshTokenIfNeeded = vi.fn()
const mockGetOAuthToken = vi.fn() const mockGetOAuthToken = vi.fn()
const mockAuthorizeCredentialUse = vi.fn() const mockAuthorizeCredentialUse = vi.fn()
const mockCheckSessionOrInternalAuth = vi.fn() const mockCheckHybridAuth = vi.fn()
const mockLogger = createMockLogger() const mockLogger = createMockLogger()
@@ -42,7 +42,7 @@ describe('OAuth Token API Routes', () => {
})) }))
vi.doMock('@/lib/auth/hybrid', () => ({ vi.doMock('@/lib/auth/hybrid', () => ({
checkSessionOrInternalAuth: mockCheckSessionOrInternalAuth, checkHybridAuth: mockCheckHybridAuth,
})) }))
}) })
@@ -235,7 +235,7 @@ describe('OAuth Token API Routes', () => {
describe('credentialAccountUserId + providerId path', () => { describe('credentialAccountUserId + providerId path', () => {
it('should reject unauthenticated requests', async () => { it('should reject unauthenticated requests', async () => {
mockCheckSessionOrInternalAuth.mockResolvedValueOnce({ mockCheckHybridAuth.mockResolvedValueOnce({
success: false, success: false,
error: 'Authentication required', error: 'Authentication required',
}) })
@@ -255,8 +255,30 @@ describe('OAuth Token API Routes', () => {
expect(mockGetOAuthToken).not.toHaveBeenCalled() expect(mockGetOAuthToken).not.toHaveBeenCalled()
}) })
it('should reject API key authentication', async () => {
mockCheckHybridAuth.mockResolvedValueOnce({
success: true,
authType: 'api_key',
userId: 'test-user-id',
})
const req = createMockRequest('POST', {
credentialAccountUserId: 'test-user-id',
providerId: 'google',
})
const { POST } = await import('@/app/api/auth/oauth/token/route')
const response = await POST(req)
const data = await response.json()
expect(response.status).toBe(401)
expect(data).toHaveProperty('error', 'User not authenticated')
expect(mockGetOAuthToken).not.toHaveBeenCalled()
})
it('should reject internal JWT authentication', async () => { it('should reject internal JWT authentication', async () => {
mockCheckSessionOrInternalAuth.mockResolvedValueOnce({ mockCheckHybridAuth.mockResolvedValueOnce({
success: true, success: true,
authType: 'internal_jwt', authType: 'internal_jwt',
userId: 'test-user-id', userId: 'test-user-id',
@@ -278,7 +300,7 @@ describe('OAuth Token API Routes', () => {
}) })
it('should reject requests for other users credentials', async () => { it('should reject requests for other users credentials', async () => {
mockCheckSessionOrInternalAuth.mockResolvedValueOnce({ mockCheckHybridAuth.mockResolvedValueOnce({
success: true, success: true,
authType: 'session', authType: 'session',
userId: 'attacker-user-id', userId: 'attacker-user-id',
@@ -300,7 +322,7 @@ describe('OAuth Token API Routes', () => {
}) })
it('should allow session-authenticated users to access their own credentials', async () => { it('should allow session-authenticated users to access their own credentials', async () => {
mockCheckSessionOrInternalAuth.mockResolvedValueOnce({ mockCheckHybridAuth.mockResolvedValueOnce({
success: true, success: true,
authType: 'session', authType: 'session',
userId: 'test-user-id', userId: 'test-user-id',
@@ -323,7 +345,7 @@ describe('OAuth Token API Routes', () => {
}) })
it('should return 404 when credential not found for user', async () => { it('should return 404 when credential not found for user', async () => {
mockCheckSessionOrInternalAuth.mockResolvedValueOnce({ mockCheckHybridAuth.mockResolvedValueOnce({
success: true, success: true,
authType: 'session', authType: 'session',
userId: 'test-user-id', userId: 'test-user-id',
@@ -351,7 +373,7 @@ describe('OAuth Token API Routes', () => {
*/ */
describe('GET handler', () => { describe('GET handler', () => {
it('should return access token successfully', async () => { it('should return access token successfully', async () => {
mockCheckSessionOrInternalAuth.mockResolvedValueOnce({ mockCheckHybridAuth.mockResolvedValueOnce({
success: true, success: true,
authType: 'session', authType: 'session',
userId: 'test-user-id', userId: 'test-user-id',
@@ -380,7 +402,7 @@ describe('OAuth Token API Routes', () => {
expect(response.status).toBe(200) expect(response.status).toBe(200)
expect(data).toHaveProperty('accessToken', 'fresh-token') expect(data).toHaveProperty('accessToken', 'fresh-token')
expect(mockCheckSessionOrInternalAuth).toHaveBeenCalled() expect(mockCheckHybridAuth).toHaveBeenCalled()
expect(mockGetCredential).toHaveBeenCalledWith(mockRequestId, 'credential-id', 'test-user-id') expect(mockGetCredential).toHaveBeenCalledWith(mockRequestId, 'credential-id', 'test-user-id')
expect(mockRefreshTokenIfNeeded).toHaveBeenCalled() expect(mockRefreshTokenIfNeeded).toHaveBeenCalled()
}) })
@@ -399,7 +421,7 @@ describe('OAuth Token API Routes', () => {
}) })
it('should handle authentication failure', async () => { it('should handle authentication failure', async () => {
mockCheckSessionOrInternalAuth.mockResolvedValueOnce({ mockCheckHybridAuth.mockResolvedValueOnce({
success: false, success: false,
error: 'Authentication required', error: 'Authentication required',
}) })
@@ -418,7 +440,7 @@ describe('OAuth Token API Routes', () => {
}) })
it('should handle credential not found', async () => { it('should handle credential not found', async () => {
mockCheckSessionOrInternalAuth.mockResolvedValueOnce({ mockCheckHybridAuth.mockResolvedValueOnce({
success: true, success: true,
authType: 'session', authType: 'session',
userId: 'test-user-id', userId: 'test-user-id',
@@ -439,7 +461,7 @@ describe('OAuth Token API Routes', () => {
}) })
it('should handle missing access token', async () => { it('should handle missing access token', async () => {
mockCheckSessionOrInternalAuth.mockResolvedValueOnce({ mockCheckHybridAuth.mockResolvedValueOnce({
success: true, success: true,
authType: 'session', authType: 'session',
userId: 'test-user-id', userId: 'test-user-id',
@@ -465,7 +487,7 @@ describe('OAuth Token API Routes', () => {
}) })
it('should handle token refresh failure', async () => { it('should handle token refresh failure', async () => {
mockCheckSessionOrInternalAuth.mockResolvedValueOnce({ mockCheckHybridAuth.mockResolvedValueOnce({
success: true, success: true,
authType: 'session', authType: 'session',
userId: 'test-user-id', userId: 'test-user-id',

View File

@@ -2,7 +2,7 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { authorizeCredentialUse } from '@/lib/auth/credential-access' import { authorizeCredentialUse } from '@/lib/auth/credential-access'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
import { getCredential, getOAuthToken, refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils' import { getCredential, getOAuthToken, refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils'
@@ -71,7 +71,7 @@ export async function POST(request: NextRequest) {
providerId, providerId,
}) })
const auth = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const auth = await checkHybridAuth(request, { requireWorkflowId: false })
if (!auth.success || auth.authType !== 'session' || !auth.userId) { if (!auth.success || auth.authType !== 'session' || !auth.userId) {
logger.warn(`[${requestId}] Unauthorized request for credentialAccountUserId path`, { logger.warn(`[${requestId}] Unauthorized request for credentialAccountUserId path`, {
success: auth.success, success: auth.success,
@@ -187,7 +187,7 @@ export async function GET(request: NextRequest) {
const { credentialId } = parseResult.data const { credentialId } = parseResult.data
// For GET requests, we only support session-based authentication // For GET requests, we only support session-based authentication
const auth = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const auth = await checkHybridAuth(request, { requireWorkflowId: false })
if (!auth.success || auth.authType !== 'session' || !auth.userId) { if (!auth.success || auth.authType !== 'session' || !auth.userId) {
return NextResponse.json({ error: 'User not authenticated' }, { status: 401 }) return NextResponse.json({ error: 'User not authenticated' }, { status: 401 })
} }

View File

@@ -18,6 +18,7 @@ const UpdateCostSchema = z.object({
model: z.string().min(1, 'Model is required'), model: z.string().min(1, 'Model is required'),
inputTokens: z.number().min(0).default(0), inputTokens: z.number().min(0).default(0),
outputTokens: z.number().min(0).default(0), outputTokens: z.number().min(0).default(0),
source: z.enum(['copilot', 'mcp_copilot']).default('copilot'),
}) })
/** /**
@@ -75,12 +76,14 @@ export async function POST(req: NextRequest) {
) )
} }
const { userId, cost, model, inputTokens, outputTokens } = validation.data const { userId, cost, model, inputTokens, outputTokens, source } = validation.data
const isMcp = source === 'mcp_copilot'
logger.info(`[${requestId}] Processing cost update`, { logger.info(`[${requestId}] Processing cost update`, {
userId, userId,
cost, cost,
model, model,
source,
}) })
// Check if user stats record exists (same as ExecutionLogger) // Check if user stats record exists (same as ExecutionLogger)
@@ -96,7 +99,7 @@ export async function POST(req: NextRequest) {
return NextResponse.json({ error: 'User stats record not found' }, { status: 500 }) return NextResponse.json({ error: 'User stats record not found' }, { status: 500 })
} }
const updateFields = { const updateFields: Record<string, unknown> = {
totalCost: sql`total_cost + ${cost}`, totalCost: sql`total_cost + ${cost}`,
currentPeriodCost: sql`current_period_cost + ${cost}`, currentPeriodCost: sql`current_period_cost + ${cost}`,
totalCopilotCost: sql`total_copilot_cost + ${cost}`, totalCopilotCost: sql`total_copilot_cost + ${cost}`,
@@ -105,17 +108,24 @@ export async function POST(req: NextRequest) {
lastActive: new Date(), lastActive: new Date(),
} }
// Also increment MCP-specific counters when source is mcp_copilot
if (isMcp) {
updateFields.totalMcpCopilotCost = sql`total_mcp_copilot_cost + ${cost}`
updateFields.currentPeriodMcpCopilotCost = sql`current_period_mcp_copilot_cost + ${cost}`
}
await db.update(userStats).set(updateFields).where(eq(userStats.userId, userId)) await db.update(userStats).set(updateFields).where(eq(userStats.userId, userId))
logger.info(`[${requestId}] Updated user stats record`, { logger.info(`[${requestId}] Updated user stats record`, {
userId, userId,
addedCost: cost, addedCost: cost,
source,
}) })
// Log usage for complete audit trail // Log usage for complete audit trail
await logModelUsage({ await logModelUsage({
userId, userId,
source: 'copilot', source: isMcp ? 'mcp_copilot' : 'copilot',
model, model,
inputTokens, inputTokens,
outputTokens, outputTokens,

View File

@@ -1,7 +1,7 @@
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { getSession } from '@/lib/auth' import { getSession } from '@/lib/auth'
import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import { env } from '@/lib/core/config/env' import { env } from '@/lib/core/config/env'
const GenerateApiKeySchema = z.object({ const GenerateApiKeySchema = z.object({
@@ -17,9 +17,6 @@ export async function POST(req: NextRequest) {
const userId = session.user.id const userId = session.user.id
// Move environment variable access inside the function
const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT
const body = await req.json().catch(() => ({})) const body = await req.json().catch(() => ({}))
const validationResult = GenerateApiKeySchema.safeParse(body) const validationResult = GenerateApiKeySchema.safeParse(body)

View File

@@ -1,6 +1,6 @@
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth' import { getSession } from '@/lib/auth'
import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import { env } from '@/lib/core/config/env' import { env } from '@/lib/core/config/env'
export async function GET(request: NextRequest) { export async function GET(request: NextRequest) {
@@ -12,8 +12,6 @@ export async function GET(request: NextRequest) {
const userId = session.user.id const userId = session.user.id
const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT
const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key/get-api-keys`, { const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key/get-api-keys`, {
method: 'POST', method: 'POST',
headers: { headers: {
@@ -68,8 +66,6 @@ export async function DELETE(request: NextRequest) {
return NextResponse.json({ error: 'id is required' }, { status: 400 }) return NextResponse.json({ error: 'id is required' }, { status: 400 })
} }
const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT
const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key/delete`, { const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key/delete`, {
method: 'POST', method: 'POST',
headers: { headers: {

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,130 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import {
getStreamMeta,
readStreamEvents,
type StreamMeta,
} from '@/lib/copilot/orchestrator/stream-buffer'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import { SSE_HEADERS } from '@/lib/core/utils/sse'
const logger = createLogger('CopilotChatStreamAPI')
const POLL_INTERVAL_MS = 250
const MAX_STREAM_MS = 10 * 60 * 1000
function encodeEvent(event: Record<string, any>): Uint8Array {
return new TextEncoder().encode(`data: ${JSON.stringify(event)}\n\n`)
}
export async function GET(request: NextRequest) {
const { userId: authenticatedUserId, isAuthenticated } =
await authenticateCopilotRequestSessionOnly()
if (!isAuthenticated || !authenticatedUserId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const url = new URL(request.url)
const streamId = url.searchParams.get('streamId') || ''
const fromParam = url.searchParams.get('from') || '0'
const fromEventId = Number(fromParam || 0)
// If batch=true, return buffered events as JSON instead of SSE
const batchMode = url.searchParams.get('batch') === 'true'
const toParam = url.searchParams.get('to')
const toEventId = toParam ? Number(toParam) : undefined
if (!streamId) {
return NextResponse.json({ error: 'streamId is required' }, { status: 400 })
}
const meta = (await getStreamMeta(streamId)) as StreamMeta | null
logger.info('[Resume] Stream lookup', {
streamId,
fromEventId,
toEventId,
batchMode,
hasMeta: !!meta,
metaStatus: meta?.status,
})
if (!meta) {
return NextResponse.json({ error: 'Stream not found' }, { status: 404 })
}
if (meta.userId && meta.userId !== authenticatedUserId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 403 })
}
// Batch mode: return all buffered events as JSON
if (batchMode) {
const events = await readStreamEvents(streamId, fromEventId)
const filteredEvents = toEventId ? events.filter((e) => e.eventId <= toEventId) : events
logger.info('[Resume] Batch response', {
streamId,
fromEventId,
toEventId,
eventCount: filteredEvents.length,
})
return NextResponse.json({
success: true,
events: filteredEvents,
status: meta.status,
})
}
const startTime = Date.now()
const stream = new ReadableStream({
async start(controller) {
let lastEventId = Number.isFinite(fromEventId) ? fromEventId : 0
const flushEvents = async () => {
const events = await readStreamEvents(streamId, lastEventId)
if (events.length > 0) {
logger.info('[Resume] Flushing events', {
streamId,
fromEventId: lastEventId,
eventCount: events.length,
})
}
for (const entry of events) {
lastEventId = entry.eventId
const payload = {
...entry.event,
eventId: entry.eventId,
streamId: entry.streamId,
}
controller.enqueue(encodeEvent(payload))
}
}
try {
await flushEvents()
while (Date.now() - startTime < MAX_STREAM_MS) {
const currentMeta = await getStreamMeta(streamId)
if (!currentMeta) break
await flushEvents()
if (currentMeta.status === 'complete' || currentMeta.status === 'error') {
break
}
if (request.signal.aborted) {
break
}
await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS))
}
} catch (error) {
logger.warn('Stream replay failed', {
streamId,
error: error instanceof Error ? error.message : String(error),
})
} finally {
controller.close()
}
},
})
return new Response(stream, { headers: SSE_HEADERS })
}

View File

@@ -1,6 +1,7 @@
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { REDIS_TOOL_CALL_PREFIX, REDIS_TOOL_CALL_TTL_SECONDS } from '@/lib/copilot/constants'
import { import {
authenticateCopilotRequestSessionOnly, authenticateCopilotRequestSessionOnly,
createBadRequestResponse, createBadRequestResponse,
@@ -23,7 +24,8 @@ const ConfirmationSchema = z.object({
}) })
/** /**
* Update tool call status in Redis * Write the user's tool decision to Redis. The server-side orchestrator's
* waitForToolDecision() polls Redis for this value.
*/ */
async function updateToolCallStatus( async function updateToolCallStatus(
toolCallId: string, toolCallId: string,
@@ -32,57 +34,24 @@ async function updateToolCallStatus(
): Promise<boolean> { ): Promise<boolean> {
const redis = getRedisClient() const redis = getRedisClient()
if (!redis) { if (!redis) {
logger.warn('updateToolCallStatus: Redis client not available') logger.warn('Redis client not available for tool confirmation')
return false return false
} }
try { try {
const key = `tool_call:${toolCallId}` const key = `${REDIS_TOOL_CALL_PREFIX}${toolCallId}`
const timeout = 600000 // 10 minutes timeout for user confirmation const payload = {
const pollInterval = 100 // Poll every 100ms
const startTime = Date.now()
logger.info('Polling for tool call in Redis', { toolCallId, key, timeout })
// Poll until the key exists or timeout
while (Date.now() - startTime < timeout) {
const exists = await redis.exists(key)
if (exists) {
break
}
// Wait before next poll
await new Promise((resolve) => setTimeout(resolve, pollInterval))
}
// Final check if key exists after polling
const exists = await redis.exists(key)
if (!exists) {
logger.warn('Tool call not found in Redis after polling timeout', {
toolCallId,
key,
timeout,
pollDuration: Date.now() - startTime,
})
return false
}
// Store both status and message as JSON
const toolCallData = {
status, status,
message: message || null, message: message || null,
timestamp: new Date().toISOString(), timestamp: new Date().toISOString(),
} }
await redis.set(key, JSON.stringify(payload), 'EX', REDIS_TOOL_CALL_TTL_SECONDS)
await redis.set(key, JSON.stringify(toolCallData), 'EX', 86400) // Keep 24 hour expiry
return true return true
} catch (error) { } catch (error) {
logger.error('Failed to update tool call status in Redis', { logger.error('Failed to update tool call status', {
toolCallId, toolCallId,
status, status,
message, error: error instanceof Error ? error.message : String(error),
error: error instanceof Error ? error.message : 'Unknown error',
}) })
return false return false
} }

View File

@@ -0,0 +1,28 @@
import { type NextRequest, NextResponse } from 'next/server'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import { routeExecution } from '@/lib/copilot/tools/server/router'
/**
* GET /api/copilot/credentials
* Returns connected OAuth credentials for the authenticated user.
* Used by the copilot store for credential masking.
*/
export async function GET(_req: NextRequest) {
const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly()
if (!isAuthenticated || !userId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
try {
const result = await routeExecution('get_credentials', {}, { userId })
return NextResponse.json({ success: true, result })
} catch (error) {
return NextResponse.json(
{
success: false,
error: error instanceof Error ? error.message : 'Failed to load credentials',
},
{ status: 500 }
)
}
}

View File

@@ -1,54 +0,0 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
import { routeExecution } from '@/lib/copilot/tools/server/router'
const logger = createLogger('ExecuteCopilotServerToolAPI')
const ExecuteSchema = z.object({
toolName: z.string(),
payload: z.unknown().optional(),
})
export async function POST(req: NextRequest) {
const tracker = createRequestTracker()
try {
const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly()
if (!isAuthenticated || !userId) {
return createUnauthorizedResponse()
}
const body = await req.json()
try {
const preview = JSON.stringify(body).slice(0, 300)
logger.debug(`[${tracker.requestId}] Incoming request body preview`, { preview })
} catch {}
const { toolName, payload } = ExecuteSchema.parse(body)
logger.info(`[${tracker.requestId}] Executing server tool`, { toolName })
const result = await routeExecution(toolName, payload, { userId })
try {
const resultPreview = JSON.stringify(result).slice(0, 300)
logger.debug(`[${tracker.requestId}] Server tool result preview`, { toolName, resultPreview })
} catch {}
return NextResponse.json({ success: true, result })
} catch (error) {
if (error instanceof z.ZodError) {
logger.debug(`[${tracker.requestId}] Zod validation error`, { issues: error.issues })
return createBadRequestResponse('Invalid request body for execute-copilot-server-tool')
}
logger.error(`[${tracker.requestId}] Failed to execute server tool:`, error)
const errorMessage = error instanceof Error ? error.message : 'Failed to execute server tool'
return createInternalServerErrorResponse(errorMessage)
}
}

View File

@@ -1,247 +0,0 @@
import { db } from '@sim/db'
import { account, workflow } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import {
createBadRequestResponse,
createInternalServerErrorResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
import { generateRequestId } from '@/lib/core/utils/request'
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils'
import { resolveEnvVarReferences } from '@/executor/utils/reference-validation'
import { executeTool } from '@/tools'
import { getTool, resolveToolId } from '@/tools/utils'
const logger = createLogger('CopilotExecuteToolAPI')
const ExecuteToolSchema = z.object({
toolCallId: z.string(),
toolName: z.string(),
arguments: z.record(z.any()).optional().default({}),
workflowId: z.string().optional(),
})
export async function POST(req: NextRequest) {
const tracker = createRequestTracker()
try {
const session = await getSession()
if (!session?.user?.id) {
return createUnauthorizedResponse()
}
const userId = session.user.id
const body = await req.json()
try {
const preview = JSON.stringify(body).slice(0, 300)
logger.debug(`[${tracker.requestId}] Incoming execute-tool request`, { preview })
} catch {}
const { toolCallId, toolName, arguments: toolArgs, workflowId } = ExecuteToolSchema.parse(body)
const resolvedToolName = resolveToolId(toolName)
logger.info(`[${tracker.requestId}] Executing tool`, {
toolCallId,
toolName,
resolvedToolName,
workflowId,
hasArgs: Object.keys(toolArgs).length > 0,
})
const toolConfig = getTool(resolvedToolName)
if (!toolConfig) {
// Find similar tool names to help debug
const { tools: allTools } = await import('@/tools/registry')
const allToolNames = Object.keys(allTools)
const prefix = toolName.split('_').slice(0, 2).join('_')
const similarTools = allToolNames
.filter((name) => name.startsWith(`${prefix.split('_')[0]}_`))
.slice(0, 10)
logger.warn(`[${tracker.requestId}] Tool not found in registry`, {
toolName,
prefix,
similarTools,
totalToolsInRegistry: allToolNames.length,
})
return NextResponse.json(
{
success: false,
error: `Tool not found: ${toolName}. Similar tools: ${similarTools.join(', ')}`,
toolCallId,
},
{ status: 404 }
)
}
// Get the workspaceId from the workflow (env vars are stored at workspace level)
let workspaceId: string | undefined
if (workflowId) {
const workflowResult = await db
.select({ workspaceId: workflow.workspaceId })
.from(workflow)
.where(eq(workflow.id, workflowId))
.limit(1)
workspaceId = workflowResult[0]?.workspaceId ?? undefined
}
// Get decrypted environment variables early so we can resolve all {{VAR}} references
const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId)
logger.info(`[${tracker.requestId}] Fetched environment variables`, {
workflowId,
workspaceId,
envVarCount: Object.keys(decryptedEnvVars).length,
envVarKeys: Object.keys(decryptedEnvVars),
})
// Build execution params starting with LLM-provided arguments
// Resolve all {{ENV_VAR}} references in the arguments (deep for nested objects)
const executionParams: Record<string, any> = resolveEnvVarReferences(
toolArgs,
decryptedEnvVars,
{ deep: true }
) as Record<string, any>
logger.info(`[${tracker.requestId}] Resolved env var references in arguments`, {
toolName,
originalArgKeys: Object.keys(toolArgs),
resolvedArgKeys: Object.keys(executionParams),
})
// Resolve OAuth access token if required
if (toolConfig.oauth?.required && toolConfig.oauth.provider) {
const provider = toolConfig.oauth.provider
logger.info(`[${tracker.requestId}] Resolving OAuth token`, { provider })
try {
// Find the account for this provider and user
const accounts = await db
.select()
.from(account)
.where(and(eq(account.providerId, provider), eq(account.userId, userId)))
.limit(1)
if (accounts.length > 0) {
const acc = accounts[0]
const requestId = generateRequestId()
const { accessToken } = await refreshTokenIfNeeded(requestId, acc as any, acc.id)
if (accessToken) {
executionParams.accessToken = accessToken
logger.info(`[${tracker.requestId}] OAuth token resolved`, { provider })
} else {
logger.warn(`[${tracker.requestId}] No access token available`, { provider })
return NextResponse.json(
{
success: false,
error: `OAuth token not available for ${provider}. Please reconnect your account.`,
toolCallId,
},
{ status: 400 }
)
}
} else {
logger.warn(`[${tracker.requestId}] No account found for provider`, { provider })
return NextResponse.json(
{
success: false,
error: `No ${provider} account connected. Please connect your account first.`,
toolCallId,
},
{ status: 400 }
)
}
} catch (error) {
logger.error(`[${tracker.requestId}] Failed to resolve OAuth token`, {
provider,
error: error instanceof Error ? error.message : String(error),
})
return NextResponse.json(
{
success: false,
error: `Failed to get OAuth token for ${provider}`,
toolCallId,
},
{ status: 500 }
)
}
}
// Check if tool requires an API key that wasn't resolved via {{ENV_VAR}} reference
const needsApiKey = toolConfig.params?.apiKey?.required
if (needsApiKey && !executionParams.apiKey) {
logger.warn(`[${tracker.requestId}] No API key found for tool`, { toolName })
return NextResponse.json(
{
success: false,
error: `API key not provided for ${toolName}. Use {{YOUR_API_KEY_ENV_VAR}} to reference your environment variable.`,
toolCallId,
},
{ status: 400 }
)
}
// Add execution context
executionParams._context = {
workflowId,
userId,
}
// Special handling for function_execute - inject environment variables
if (toolName === 'function_execute') {
executionParams.envVars = decryptedEnvVars
executionParams.workflowVariables = {} // No workflow variables in copilot context
executionParams.blockData = {} // No block data in copilot context
executionParams.blockNameMapping = {} // No block mapping in copilot context
executionParams.language = executionParams.language || 'javascript'
executionParams.timeout = executionParams.timeout || 30000
logger.info(`[${tracker.requestId}] Injected env vars for function_execute`, {
envVarCount: Object.keys(decryptedEnvVars).length,
})
}
// Execute the tool
logger.info(`[${tracker.requestId}] Executing tool with resolved credentials`, {
toolName,
hasAccessToken: !!executionParams.accessToken,
hasApiKey: !!executionParams.apiKey,
})
const result = await executeTool(resolvedToolName, executionParams)
logger.info(`[${tracker.requestId}] Tool execution complete`, {
toolName,
success: result.success,
hasOutput: !!result.output,
})
return NextResponse.json({
success: true,
toolCallId,
result: {
success: result.success,
output: result.output,
error: result.error,
},
})
} catch (error) {
if (error instanceof z.ZodError) {
logger.debug(`[${tracker.requestId}] Zod validation error`, { issues: error.issues })
return createBadRequestResponse('Invalid request body for execute-tool')
}
logger.error(`[${tracker.requestId}] Failed to execute tool:`, error)
const errorMessage = error instanceof Error ? error.message : 'Failed to execute tool'
return createInternalServerErrorResponse(errorMessage)
}
}

View File

@@ -1,6 +1,6 @@
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import { import {
authenticateCopilotRequestSessionOnly, authenticateCopilotRequestSessionOnly,
createBadRequestResponse, createBadRequestResponse,
@@ -10,8 +10,6 @@ import {
} from '@/lib/copilot/request-helpers' } from '@/lib/copilot/request-helpers'
import { env } from '@/lib/core/config/env' import { env } from '@/lib/core/config/env'
const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT
const BodySchema = z.object({ const BodySchema = z.object({
messageId: z.string(), messageId: z.string(),
diffCreated: z.boolean(), diffCreated: z.boolean(),

View File

@@ -1,123 +0,0 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { SIM_AGENT_API_URL_DEFAULT } from '@/lib/copilot/constants'
import {
authenticateCopilotRequestSessionOnly,
createBadRequestResponse,
createInternalServerErrorResponse,
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
import { env } from '@/lib/core/config/env'
const logger = createLogger('CopilotMarkToolCompleteAPI')
const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT
const MarkCompleteSchema = z.object({
id: z.string(),
name: z.string(),
status: z.number().int(),
message: z.any().optional(),
data: z.any().optional(),
})
/**
* POST /api/copilot/tools/mark-complete
* Proxy to Sim Agent: POST /api/tools/mark-complete
*/
export async function POST(req: NextRequest) {
const tracker = createRequestTracker()
try {
const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly()
if (!isAuthenticated || !userId) {
return createUnauthorizedResponse()
}
const body = await req.json()
// Log raw body shape for diagnostics (avoid dumping huge payloads)
try {
const bodyPreview = JSON.stringify(body).slice(0, 300)
logger.debug(`[${tracker.requestId}] Incoming mark-complete raw body preview`, {
preview: `${bodyPreview}${bodyPreview.length === 300 ? '...' : ''}`,
})
} catch {}
const parsed = MarkCompleteSchema.parse(body)
const messagePreview = (() => {
try {
const s =
typeof parsed.message === 'string' ? parsed.message : JSON.stringify(parsed.message)
return s ? `${s.slice(0, 200)}${s.length > 200 ? '...' : ''}` : undefined
} catch {
return undefined
}
})()
logger.info(`[${tracker.requestId}] Forwarding tool mark-complete`, {
userId,
toolCallId: parsed.id,
toolName: parsed.name,
status: parsed.status,
hasMessage: parsed.message !== undefined,
hasData: parsed.data !== undefined,
messagePreview,
agentUrl: `${SIM_AGENT_API_URL}/api/tools/mark-complete`,
})
const agentRes = await fetch(`${SIM_AGENT_API_URL}/api/tools/mark-complete`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
...(env.COPILOT_API_KEY ? { 'x-api-key': env.COPILOT_API_KEY } : {}),
},
body: JSON.stringify(parsed),
})
// Attempt to parse agent response JSON
let agentJson: any = null
let agentText: string | null = null
try {
agentJson = await agentRes.json()
} catch (_) {
try {
agentText = await agentRes.text()
} catch {}
}
logger.info(`[${tracker.requestId}] Agent responded to mark-complete`, {
status: agentRes.status,
ok: agentRes.ok,
responseJsonPreview: agentJson ? JSON.stringify(agentJson).slice(0, 300) : undefined,
responseTextPreview: agentText ? agentText.slice(0, 300) : undefined,
})
if (agentRes.ok) {
return NextResponse.json({ success: true })
}
const errorMessage =
agentJson?.error || agentText || `Agent responded with status ${agentRes.status}`
const status = agentRes.status >= 500 ? 500 : 400
logger.warn(`[${tracker.requestId}] Mark-complete failed`, {
status,
error: errorMessage,
})
return NextResponse.json({ success: false, error: errorMessage }, { status })
} catch (error) {
if (error instanceof z.ZodError) {
logger.warn(`[${tracker.requestId}] Invalid mark-complete request body`, {
issues: error.issues,
})
return createBadRequestResponse('Invalid request body for mark-complete')
}
logger.error(`[${tracker.requestId}] Failed to proxy mark-complete:`, error)
return createInternalServerErrorResponse('Failed to mark tool as complete')
}
}

View File

@@ -28,6 +28,7 @@ const DEFAULT_ENABLED_MODELS: Record<CopilotModelId, boolean> = {
'claude-4-sonnet': false, 'claude-4-sonnet': false,
'claude-4.5-haiku': true, 'claude-4.5-haiku': true,
'claude-4.5-sonnet': true, 'claude-4.5-sonnet': true,
'claude-4.6-opus': true,
'claude-4.5-opus': true, 'claude-4.5-opus': true,
'claude-4.1-opus': false, 'claude-4.1-opus': false,
'gemini-3-pro': true, 'gemini-3-pro': true,

View File

@@ -29,7 +29,7 @@ function setupFileApiMocks(
} }
vi.doMock('@/lib/auth/hybrid', () => ({ vi.doMock('@/lib/auth/hybrid', () => ({
checkSessionOrInternalAuth: vi.fn().mockResolvedValue({ checkHybridAuth: vi.fn().mockResolvedValue({
success: authenticated, success: authenticated,
userId: authenticated ? 'test-user-id' : undefined, userId: authenticated ? 'test-user-id' : undefined,
error: authenticated ? undefined : 'Unauthorized', error: authenticated ? undefined : 'Unauthorized',

View File

@@ -1,7 +1,7 @@
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import type { NextRequest } from 'next/server' import type { NextRequest } from 'next/server'
import { NextResponse } from 'next/server' import { NextResponse } from 'next/server'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import type { StorageContext } from '@/lib/uploads/config' import type { StorageContext } from '@/lib/uploads/config'
import { deleteFile, hasCloudStorage } from '@/lib/uploads/core/storage-service' import { deleteFile, hasCloudStorage } from '@/lib/uploads/core/storage-service'
import { extractStorageKey, inferContextFromKey } from '@/lib/uploads/utils/file-utils' import { extractStorageKey, inferContextFromKey } from '@/lib/uploads/utils/file-utils'
@@ -24,7 +24,7 @@ const logger = createLogger('FilesDeleteAPI')
*/ */
export async function POST(request: NextRequest) { export async function POST(request: NextRequest) {
try { try {
const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
if (!authResult.success || !authResult.userId) { if (!authResult.success || !authResult.userId) {
logger.warn('Unauthorized file delete request', { logger.warn('Unauthorized file delete request', {

View File

@@ -1,6 +1,6 @@
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import type { StorageContext } from '@/lib/uploads/config' import type { StorageContext } from '@/lib/uploads/config'
import { hasCloudStorage } from '@/lib/uploads/core/storage-service' import { hasCloudStorage } from '@/lib/uploads/core/storage-service'
import { verifyFileAccess } from '@/app/api/files/authorization' import { verifyFileAccess } from '@/app/api/files/authorization'
@@ -12,7 +12,7 @@ export const dynamic = 'force-dynamic'
export async function POST(request: NextRequest) { export async function POST(request: NextRequest) {
try { try {
const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
if (!authResult.success || !authResult.userId) { if (!authResult.success || !authResult.userId) {
logger.warn('Unauthorized download URL request', { logger.warn('Unauthorized download URL request', {

View File

@@ -35,7 +35,7 @@ function setupFileApiMocks(
} }
vi.doMock('@/lib/auth/hybrid', () => ({ vi.doMock('@/lib/auth/hybrid', () => ({
checkInternalAuth: vi.fn().mockResolvedValue({ checkHybridAuth: vi.fn().mockResolvedValue({
success: authenticated, success: authenticated,
userId: authenticated ? 'test-user-id' : undefined, userId: authenticated ? 'test-user-id' : undefined,
error: authenticated ? undefined : 'Unauthorized', error: authenticated ? undefined : 'Unauthorized',

View File

@@ -5,7 +5,7 @@ import path from 'path'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import binaryExtensionsList from 'binary-extensions' import binaryExtensionsList from 'binary-extensions'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { checkInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { import {
secureFetchWithPinnedIP, secureFetchWithPinnedIP,
validateUrlWithDNS, validateUrlWithDNS,
@@ -66,7 +66,7 @@ export async function POST(request: NextRequest) {
const startTime = Date.now() const startTime = Date.now()
try { try {
const authResult = await checkInternalAuth(request, { requireWorkflowId: true }) const authResult = await checkHybridAuth(request, { requireWorkflowId: true })
if (!authResult.success) { if (!authResult.success) {
logger.warn('Unauthorized file parse request', { logger.warn('Unauthorized file parse request', {

View File

@@ -55,7 +55,7 @@ describe('File Serve API Route', () => {
}) })
vi.doMock('@/lib/auth/hybrid', () => ({ vi.doMock('@/lib/auth/hybrid', () => ({
checkSessionOrInternalAuth: vi.fn().mockResolvedValue({ checkHybridAuth: vi.fn().mockResolvedValue({
success: true, success: true,
userId: 'test-user-id', userId: 'test-user-id',
}), }),
@@ -165,7 +165,7 @@ describe('File Serve API Route', () => {
})) }))
vi.doMock('@/lib/auth/hybrid', () => ({ vi.doMock('@/lib/auth/hybrid', () => ({
checkSessionOrInternalAuth: vi.fn().mockResolvedValue({ checkHybridAuth: vi.fn().mockResolvedValue({
success: true, success: true,
userId: 'test-user-id', userId: 'test-user-id',
}), }),
@@ -226,7 +226,7 @@ describe('File Serve API Route', () => {
})) }))
vi.doMock('@/lib/auth/hybrid', () => ({ vi.doMock('@/lib/auth/hybrid', () => ({
checkSessionOrInternalAuth: vi.fn().mockResolvedValue({ checkHybridAuth: vi.fn().mockResolvedValue({
success: true, success: true,
userId: 'test-user-id', userId: 'test-user-id',
}), }),
@@ -291,7 +291,7 @@ describe('File Serve API Route', () => {
})) }))
vi.doMock('@/lib/auth/hybrid', () => ({ vi.doMock('@/lib/auth/hybrid', () => ({
checkSessionOrInternalAuth: vi.fn().mockResolvedValue({ checkHybridAuth: vi.fn().mockResolvedValue({
success: true, success: true,
userId: 'test-user-id', userId: 'test-user-id',
}), }),
@@ -350,7 +350,7 @@ describe('File Serve API Route', () => {
for (const test of contentTypeTests) { for (const test of contentTypeTests) {
it(`should serve ${test.ext} file with correct content type`, async () => { it(`should serve ${test.ext} file with correct content type`, async () => {
vi.doMock('@/lib/auth/hybrid', () => ({ vi.doMock('@/lib/auth/hybrid', () => ({
checkSessionOrInternalAuth: vi.fn().mockResolvedValue({ checkHybridAuth: vi.fn().mockResolvedValue({
success: true, success: true,
userId: 'test-user-id', userId: 'test-user-id',
}), }),

View File

@@ -2,7 +2,7 @@ import { readFile } from 'fs/promises'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import type { NextRequest } from 'next/server' import type { NextRequest } from 'next/server'
import { NextResponse } from 'next/server' import { NextResponse } from 'next/server'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { CopilotFiles, isUsingCloudStorage } from '@/lib/uploads' import { CopilotFiles, isUsingCloudStorage } from '@/lib/uploads'
import type { StorageContext } from '@/lib/uploads/config' import type { StorageContext } from '@/lib/uploads/config'
import { downloadFile } from '@/lib/uploads/core/storage-service' import { downloadFile } from '@/lib/uploads/core/storage-service'
@@ -49,7 +49,7 @@ export async function GET(
return await handleLocalFilePublic(fullPath) return await handleLocalFilePublic(fullPath)
} }
const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
if (!authResult.success || !authResult.userId) { if (!authResult.success || !authResult.userId) {
logger.warn('Unauthorized file access attempt', { logger.warn('Unauthorized file access attempt', {

View File

@@ -845,8 +845,6 @@ export async function POST(req: NextRequest) {
contextVariables, contextVariables,
timeoutMs: timeout, timeoutMs: timeout,
requestId, requestId,
ownerKey: `user:${auth.userId}`,
ownerWeight: 1,
}) })
const executionTime = Date.now() - startTime const executionTime = Date.now() - startTime

View File

@@ -23,16 +23,7 @@ export async function POST(request: NextRequest) {
topK, topK,
model, model,
apiKey, apiKey,
azureEndpoint,
azureApiVersion,
vertexProject,
vertexLocation,
vertexCredential,
bedrockAccessKeyId,
bedrockSecretKey,
bedrockRegion,
workflowId, workflowId,
workspaceId,
piiEntityTypes, piiEntityTypes,
piiMode, piiMode,
piiLanguage, piiLanguage,
@@ -119,18 +110,7 @@ export async function POST(request: NextRequest) {
topK, topK,
model, model,
apiKey, apiKey,
{
azureEndpoint,
azureApiVersion,
vertexProject,
vertexLocation,
vertexCredential,
bedrockAccessKeyId,
bedrockSecretKey,
bedrockRegion,
},
workflowId, workflowId,
workspaceId,
piiEntityTypes, piiEntityTypes,
piiMode, piiMode,
piiLanguage, piiLanguage,
@@ -198,18 +178,7 @@ async function executeValidation(
topK: string | undefined, topK: string | undefined,
model: string, model: string,
apiKey: string | undefined, apiKey: string | undefined,
providerCredentials: {
azureEndpoint?: string
azureApiVersion?: string
vertexProject?: string
vertexLocation?: string
vertexCredential?: string
bedrockAccessKeyId?: string
bedrockSecretKey?: string
bedrockRegion?: string
},
workflowId: string | undefined, workflowId: string | undefined,
workspaceId: string | undefined,
piiEntityTypes: string[] | undefined, piiEntityTypes: string[] | undefined,
piiMode: string | undefined, piiMode: string | undefined,
piiLanguage: string | undefined, piiLanguage: string | undefined,
@@ -250,9 +219,7 @@ async function executeValidation(
topK: topK ? Number.parseInt(topK) : 10, // Default topK is 10 topK: topK ? Number.parseInt(topK) : 10, // Default topK is 10
model: model, model: model,
apiKey, apiKey,
providerCredentials,
workflowId, workflowId,
workspaceId,
requestId, requestId,
}) })
} }

View File

@@ -2,7 +2,7 @@ import { randomUUID } from 'crypto'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { SUPPORTED_FIELD_TYPES } from '@/lib/knowledge/constants' import { SUPPORTED_FIELD_TYPES } from '@/lib/knowledge/constants'
import { createTagDefinition, getTagDefinitions } from '@/lib/knowledge/tags/service' import { createTagDefinition, getTagDefinitions } from '@/lib/knowledge/tags/service'
import { checkKnowledgeBaseAccess } from '@/app/api/knowledge/utils' import { checkKnowledgeBaseAccess } from '@/app/api/knowledge/utils'
@@ -19,11 +19,19 @@ export async function GET(req: NextRequest, { params }: { params: Promise<{ id:
try { try {
logger.info(`[${requestId}] Getting tag definitions for knowledge base ${knowledgeBaseId}`) logger.info(`[${requestId}] Getting tag definitions for knowledge base ${knowledgeBaseId}`)
const auth = await checkSessionOrInternalAuth(req, { requireWorkflowId: false }) const auth = await checkHybridAuth(req, { requireWorkflowId: false })
if (!auth.success) { if (!auth.success) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
} }
// Only allow session and internal JWT auth (not API key)
if (auth.authType === 'api_key') {
return NextResponse.json(
{ error: 'API key auth not supported for this endpoint' },
{ status: 401 }
)
}
// For session auth, verify KB access. Internal JWT is trusted. // For session auth, verify KB access. Internal JWT is trusted.
if (auth.authType === 'session' && auth.userId) { if (auth.authType === 'session' && auth.userId) {
const accessCheck = await checkKnowledgeBaseAccess(knowledgeBaseId, auth.userId) const accessCheck = await checkKnowledgeBaseAccess(knowledgeBaseId, auth.userId)
@@ -56,11 +64,19 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
try { try {
logger.info(`[${requestId}] Creating tag definition for knowledge base ${knowledgeBaseId}`) logger.info(`[${requestId}] Creating tag definition for knowledge base ${knowledgeBaseId}`)
const auth = await checkSessionOrInternalAuth(req, { requireWorkflowId: false }) const auth = await checkHybridAuth(req, { requireWorkflowId: false })
if (!auth.success) { if (!auth.success) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
} }
// Only allow session and internal JWT auth (not API key)
if (auth.authType === 'api_key') {
return NextResponse.json(
{ error: 'API key auth not supported for this endpoint' },
{ status: 401 }
)
}
// For session auth, verify KB access. Internal JWT is trusted. // For session auth, verify KB access. Internal JWT is trusted.
if (auth.authType === 'session' && auth.userId) { if (auth.authType === 'session' && auth.userId) {
const accessCheck = await checkKnowledgeBaseAccess(knowledgeBaseId, auth.userId) const accessCheck = await checkKnowledgeBaseAccess(knowledgeBaseId, auth.userId)

View File

@@ -8,7 +8,7 @@ import {
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { and, eq, inArray } from 'drizzle-orm' import { and, eq, inArray } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
import type { TraceSpan, WorkflowExecutionLog } from '@/lib/logs/types' import type { TraceSpan, WorkflowExecutionLog } from '@/lib/logs/types'
@@ -23,7 +23,7 @@ export async function GET(
try { try {
const { executionId } = await params const { executionId } = await params
const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
if (!authResult.success || !authResult.userId) { if (!authResult.success || !authResult.userId) {
logger.warn(`[${requestId}] Unauthorized execution data access attempt for: ${executionId}`) logger.warn(`[${requestId}] Unauthorized execution data access attempt for: ${executionId}`)
return NextResponse.json( return NextResponse.json(

View File

@@ -0,0 +1,6 @@
import { type NextRequest, NextResponse } from 'next/server'
import { createMcpAuthorizationServerMetadataResponse } from '@/lib/mcp/oauth-discovery'
export async function GET(request: NextRequest): Promise<NextResponse> {
return createMcpAuthorizationServerMetadataResponse(request)
}

View File

@@ -0,0 +1,6 @@
import { type NextRequest, NextResponse } from 'next/server'
import { createMcpProtectedResourceMetadataResponse } from '@/lib/mcp/oauth-discovery'
export async function GET(request: NextRequest): Promise<NextResponse> {
return createMcpProtectedResourceMetadataResponse(request)
}

View File

@@ -0,0 +1,776 @@
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js'
import {
CallToolRequestSchema,
type CallToolResult,
ErrorCode,
type JSONRPCError,
type ListToolsResult,
ListToolsRequestSchema,
McpError,
type RequestId,
} from '@modelcontextprotocol/sdk/types.js'
import { db } from '@sim/db'
import { userStats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { randomUUID } from 'node:crypto'
import { eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import { getCopilotModel } from '@/lib/copilot/config'
import { SIM_AGENT_API_URL, SIM_AGENT_VERSION } from '@/lib/copilot/constants'
import { RateLimiter } from '@/lib/core/rate-limiter'
import { env } from '@/lib/core/config/env'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import { orchestrateSubagentStream } from '@/lib/copilot/orchestrator/subagent'
import {
executeToolServerSide,
prepareExecutionContext,
} from '@/lib/copilot/orchestrator/tool-executor'
import { DIRECT_TOOL_DEFS, SUBAGENT_TOOL_DEFS } from '@/lib/copilot/tools/mcp/definitions'
import { resolveWorkflowIdForUser } from '@/lib/workflows/utils'
const logger = createLogger('CopilotMcpAPI')
const mcpRateLimiter = new RateLimiter()
export const dynamic = 'force-dynamic'
export const runtime = 'nodejs'
interface CopilotKeyAuthResult {
success: boolean
userId?: string
error?: string
}
/**
* Validates a copilot API key by forwarding it to the Go copilot service's
* `/api/validate-key` endpoint. Returns the associated userId on success.
*/
async function authenticateCopilotApiKey(apiKey: string): Promise<CopilotKeyAuthResult> {
try {
const internalSecret = env.INTERNAL_API_SECRET
if (!internalSecret) {
logger.error('INTERNAL_API_SECRET not configured')
return { success: false, error: 'Server configuration error' }
}
const res = await fetch(`${SIM_AGENT_API_URL}/api/validate-key`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'x-api-key': internalSecret,
},
body: JSON.stringify({ targetApiKey: apiKey }),
signal: AbortSignal.timeout(10_000),
})
if (!res.ok) {
const body = await res.json().catch(() => null)
const upstream = (body as Record<string, unknown>)?.message
const status = res.status
if (status === 401 || status === 403) {
return {
success: false,
error: `Invalid Copilot API key. Generate a new key in Settings → Copilot and set it in the x-api-key header.`,
}
}
if (status === 402) {
return {
success: false,
error: `Usage limit exceeded for this Copilot API key. Upgrade your plan or wait for your quota to reset.`,
}
}
return { success: false, error: String(upstream ?? 'Copilot API key validation failed') }
}
const data = (await res.json()) as { ok?: boolean; userId?: string }
if (!data.ok || !data.userId) {
return {
success: false,
error: 'Invalid Copilot API key. Generate a new key in Settings → Copilot.',
}
}
return { success: true, userId: data.userId }
} catch (error) {
logger.error('Copilot API key validation failed', { error })
return {
success: false,
error: 'Could not validate Copilot API key — the authentication service is temporarily unreachable. This is NOT a problem with the API key itself; please retry shortly.',
}
}
}
/**
* MCP Server instructions that guide LLMs on how to use the Sim copilot tools.
* This is included in the initialize response to help external LLMs understand
* the workflow lifecycle and best practices.
*/
const MCP_SERVER_INSTRUCTIONS = `
## Sim Workflow Copilot
Sim is a workflow automation platform. Workflows are visual pipelines of connected blocks (Agent, Function, Condition, API, integrations, etc.). The Agent block is the core — an LLM with tools, memory, structured output, and knowledge bases.
### Workflow Lifecycle (Happy Path)
1. \`list_workspaces\` → know where to work
2. \`create_workflow(name, workspaceId)\` → get a workflowId
3. \`sim_build(request, workflowId)\` → plan and build in one pass
4. \`sim_test(request, workflowId)\` → verify it works
5. \`sim_deploy("deploy as api", workflowId)\` → make it accessible externally (optional)
For fine-grained control, use \`sim_plan\`\`sim_edit\` instead of \`sim_build\`. Pass the plan object from sim_plan EXACTLY as-is to sim_edit's context.plan field.
### Working with Existing Workflows
When the user refers to a workflow by name or description ("the email one", "my Slack bot"):
1. Use \`sim_discovery\` to find it by functionality
2. Or use \`list_workflows\` and match by name
3. Then pass the workflowId to other tools
### Organization
- \`rename_workflow\` — rename a workflow
- \`move_workflow\` — move a workflow into a folder (or root with null)
- \`move_folder\` — nest a folder inside another (or root with null)
- \`create_folder(name, parentId)\` — create nested folder hierarchies
### Key Rules
- You can test workflows immediately after building — deployment is only needed for external access (API, chat, MCP).
- All copilot tools (build, plan, edit, deploy, test, debug) require workflowId.
- If the user reports errors → use \`sim_debug\` first, don't guess.
- Variable syntax: \`<blockname.field>\` for block outputs, \`{{ENV_VAR}}\` for env vars.
`
type HeaderMap = Record<string, string | string[] | undefined>
function createError(id: RequestId, code: ErrorCode | number, message: string): JSONRPCError {
return {
jsonrpc: '2.0',
id,
error: { code, message },
}
}
function normalizeRequestHeaders(request: NextRequest): HeaderMap {
const headers: HeaderMap = {}
request.headers.forEach((value, key) => {
headers[key.toLowerCase()] = value
})
return headers
}
function readHeader(headers: HeaderMap | undefined, name: string): string | undefined {
if (!headers) return undefined
const value = headers[name.toLowerCase()]
if (Array.isArray(value)) {
return value[0]
}
return value
}
class NextResponseCapture {
private _status = 200
private _headers = new Headers()
private _controller: ReadableStreamDefaultController<Uint8Array> | null = null
private _pendingChunks: Uint8Array[] = []
private _closeHandlers: Array<() => void> = []
private _errorHandlers: Array<(error: Error) => void> = []
private _headersWritten = false
private _ended = false
private _headersPromise: Promise<void>
private _resolveHeaders: (() => void) | null = null
private _endedPromise: Promise<void>
private _resolveEnded: (() => void) | null = null
readonly readable: ReadableStream<Uint8Array>
constructor() {
this._headersPromise = new Promise<void>((resolve) => {
this._resolveHeaders = resolve
})
this._endedPromise = new Promise<void>((resolve) => {
this._resolveEnded = resolve
})
this.readable = new ReadableStream<Uint8Array>({
start: (controller) => {
this._controller = controller
if (this._pendingChunks.length > 0) {
for (const chunk of this._pendingChunks) {
controller.enqueue(chunk)
}
this._pendingChunks = []
}
},
cancel: () => {
this._ended = true
this._resolveEnded?.()
this.triggerCloseHandlers()
},
})
}
private markHeadersWritten(): void {
if (this._headersWritten) return
this._headersWritten = true
this._resolveHeaders?.()
}
private triggerCloseHandlers(): void {
for (const handler of this._closeHandlers) {
try {
handler()
} catch (error) {
this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error)))
}
}
}
private triggerErrorHandlers(error: Error): void {
for (const errorHandler of this._errorHandlers) {
errorHandler(error)
}
}
private normalizeChunk(chunk: unknown): Uint8Array | null {
if (typeof chunk === 'string') {
return new TextEncoder().encode(chunk)
}
if (chunk instanceof Uint8Array) {
return chunk
}
if (chunk === undefined || chunk === null) {
return null
}
return new TextEncoder().encode(String(chunk))
}
writeHead(status: number, headers?: Record<string, string | number | string[]>): this {
this._status = status
if (headers) {
Object.entries(headers).forEach(([key, value]) => {
if (Array.isArray(value)) {
this._headers.set(key, value.join(', '))
} else {
this._headers.set(key, String(value))
}
})
}
this.markHeadersWritten()
return this
}
flushHeaders(): this {
this.markHeadersWritten()
return this
}
write(chunk: unknown): boolean {
const normalized = this.normalizeChunk(chunk)
if (!normalized) return true
this.markHeadersWritten()
if (this._controller) {
try {
this._controller.enqueue(normalized)
} catch (error) {
this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error)))
}
} else {
this._pendingChunks.push(normalized)
}
return true
}
end(chunk?: unknown): this {
if (chunk !== undefined) this.write(chunk)
this.markHeadersWritten()
if (this._ended) return this
this._ended = true
this._resolveEnded?.()
if (this._controller) {
try {
this._controller.close()
} catch (error) {
this.triggerErrorHandlers(error instanceof Error ? error : new Error(String(error)))
}
}
this.triggerCloseHandlers()
return this
}
async waitForHeaders(timeoutMs = 30000): Promise<void> {
if (this._headersWritten) return
await Promise.race([
this._headersPromise,
new Promise<void>((resolve) => {
setTimeout(resolve, timeoutMs)
}),
])
}
async waitForEnd(timeoutMs = 30000): Promise<void> {
if (this._ended) return
await Promise.race([
this._endedPromise,
new Promise<void>((resolve) => {
setTimeout(resolve, timeoutMs)
}),
])
}
on(event: 'close' | 'error', handler: (() => void) | ((error: Error) => void)): this {
if (event === 'close') {
this._closeHandlers.push(handler as () => void)
}
if (event === 'error') {
this._errorHandlers.push(handler as (error: Error) => void)
}
return this
}
toNextResponse(): NextResponse {
return new NextResponse(this.readable, {
status: this._status,
headers: this._headers,
})
}
}
function buildMcpServer(): Server {
const server = new Server(
{
name: 'sim-copilot',
version: '1.0.0',
},
{
capabilities: { tools: {} },
instructions: MCP_SERVER_INSTRUCTIONS,
}
)
server.setRequestHandler(ListToolsRequestSchema, async () => {
const directTools = DIRECT_TOOL_DEFS.map((tool) => ({
name: tool.name,
description: tool.description,
inputSchema: tool.inputSchema,
}))
const subagentTools = SUBAGENT_TOOL_DEFS.map((tool) => ({
name: tool.name,
description: tool.description,
inputSchema: tool.inputSchema,
}))
const result: ListToolsResult = {
tools: [...directTools, ...subagentTools],
}
return result
})
server.setRequestHandler(CallToolRequestSchema, async (request, extra) => {
const headers = (extra.requestInfo?.headers || {}) as HeaderMap
const apiKeyHeader = readHeader(headers, 'x-api-key')
if (!apiKeyHeader) {
return {
content: [
{
type: 'text' as const,
text: 'AUTHENTICATION ERROR: No Copilot API key provided. The user must set their Copilot API key in the x-api-key header. They can generate one in the Sim app under Settings → Copilot. Do NOT retry — this will fail until the key is configured.',
},
],
isError: true,
}
}
const authResult = await authenticateCopilotApiKey(apiKeyHeader)
if (!authResult.success || !authResult.userId) {
logger.warn('MCP copilot key auth failed', { method: request.method })
return {
content: [
{
type: 'text' as const,
text: `AUTHENTICATION ERROR: ${authResult.error} Do NOT retry — this will fail until the user fixes their Copilot API key.`,
},
],
isError: true,
}
}
const rateLimitResult = await mcpRateLimiter.checkRateLimitWithSubscription(
authResult.userId,
await getHighestPrioritySubscription(authResult.userId),
'api-endpoint',
false
)
if (!rateLimitResult.allowed) {
return {
content: [
{
type: 'text' as const,
text: `RATE LIMIT: Too many requests. Please wait and retry after ${rateLimitResult.resetAt.toISOString()}.`,
},
],
isError: true,
}
}
const params = request.params as { name?: string; arguments?: Record<string, unknown> } | undefined
if (!params?.name) {
throw new McpError(ErrorCode.InvalidParams, 'Tool name required')
}
const result = await handleToolsCall(
{
name: params.name,
arguments: params.arguments,
},
authResult.userId
)
trackMcpCopilotCall(authResult.userId)
return result
})
return server
}
async function handleMcpRequestWithSdk(
request: NextRequest,
parsedBody: unknown
): Promise<NextResponse> {
const server = buildMcpServer()
const transport = new StreamableHTTPServerTransport({
sessionIdGenerator: undefined,
enableJsonResponse: true,
})
const responseCapture = new NextResponseCapture()
const requestAdapter = {
method: request.method,
headers: normalizeRequestHeaders(request),
}
await server.connect(transport)
try {
await transport.handleRequest(requestAdapter as any, responseCapture as any, parsedBody)
await responseCapture.waitForHeaders()
await responseCapture.waitForEnd()
return responseCapture.toNextResponse()
} finally {
await server.close().catch(() => {})
await transport.close().catch(() => {})
}
}
export async function GET() {
// Return 405 to signal that server-initiated SSE notifications are not
// supported. Without this, clients like mcp-remote will repeatedly
// reconnect trying to open an SSE stream, flooding the logs with GETs.
return new NextResponse(null, { status: 405 })
}
export async function POST(request: NextRequest) {
try {
let parsedBody: unknown
try {
parsedBody = await request.json()
} catch {
return NextResponse.json(createError(0, ErrorCode.ParseError, 'Invalid JSON body'), {
status: 400,
})
}
return await handleMcpRequestWithSdk(request, parsedBody)
} catch (error) {
logger.error('Error handling MCP request', { error })
return NextResponse.json(createError(0, ErrorCode.InternalError, 'Internal error'), {
status: 500,
})
}
}
export async function DELETE(request: NextRequest) {
void request
return NextResponse.json(createError(0, -32000, 'Method not allowed.'), { status: 405 })
}
/**
* Increment MCP copilot call counter in userStats (fire-and-forget).
*/
function trackMcpCopilotCall(userId: string): void {
db.update(userStats)
.set({
totalMcpCopilotCalls: sql`total_mcp_copilot_calls + 1`,
lastActive: new Date(),
})
.where(eq(userStats.userId, userId))
.then(() => {})
.catch((error) => {
logger.error('Failed to track MCP copilot call', { error, userId })
})
}
async function handleToolsCall(
params: { name: string; arguments?: Record<string, unknown> },
userId: string
): Promise<CallToolResult> {
const args = params.arguments || {}
const directTool = DIRECT_TOOL_DEFS.find((tool) => tool.name === params.name)
if (directTool) {
return handleDirectToolCall(directTool, args, userId)
}
const subagentTool = SUBAGENT_TOOL_DEFS.find((tool) => tool.name === params.name)
if (subagentTool) {
return handleSubagentToolCall(subagentTool, args, userId)
}
throw new McpError(ErrorCode.MethodNotFound, `Tool not found: ${params.name}`)
}
async function handleDirectToolCall(
toolDef: (typeof DIRECT_TOOL_DEFS)[number],
args: Record<string, unknown>,
userId: string
): Promise<CallToolResult> {
try {
const execContext = await prepareExecutionContext(userId, (args.workflowId as string) || '')
const toolCall = {
id: randomUUID(),
name: toolDef.toolId,
status: 'pending' as const,
params: args as Record<string, any>,
startTime: Date.now(),
}
const result = await executeToolServerSide(toolCall, execContext)
return {
content: [
{
type: 'text',
text: JSON.stringify(result.output ?? result, null, 2),
},
],
isError: !result.success,
}
} catch (error) {
logger.error('Direct tool execution failed', { tool: toolDef.name, error })
return {
content: [
{
type: 'text',
text: `Tool execution failed: ${error instanceof Error ? error.message : String(error)}`,
},
],
isError: true,
}
}
}
/**
* Build mode uses the main chat orchestrator with the 'fast' command instead of
* the subagent endpoint. In Go, 'build' is not a registered subagent — it's a mode
* (ModeFast) on the main chat processor that bypasses subagent orchestration and
* executes all tools directly.
*/
async function handleBuildToolCall(
args: Record<string, unknown>,
userId: string
): Promise<CallToolResult> {
try {
const requestText = (args.request as string) || JSON.stringify(args)
const { model } = getCopilotModel('chat')
const workflowId = args.workflowId as string | undefined
const resolved = workflowId ? { workflowId } : await resolveWorkflowIdForUser(userId)
if (!resolved?.workflowId) {
return {
content: [
{
type: 'text',
text: JSON.stringify(
{
success: false,
error: 'workflowId is required for build. Call create_workflow first.',
},
null,
2
),
},
],
isError: true,
}
}
const chatId = randomUUID()
const requestPayload = {
message: requestText,
workflowId: resolved.workflowId,
userId,
model,
mode: 'agent',
commands: ['fast'],
messageId: randomUUID(),
version: SIM_AGENT_VERSION,
headless: true,
chatId,
source: 'mcp',
}
const result = await orchestrateCopilotStream(requestPayload, {
userId,
workflowId: resolved.workflowId,
chatId,
autoExecuteTools: true,
timeout: 300000,
interactive: false,
})
const responseData = {
success: result.success,
content: result.content,
toolCalls: result.toolCalls,
error: result.error,
}
return {
content: [{ type: 'text', text: JSON.stringify(responseData, null, 2) }],
isError: !result.success,
}
} catch (error) {
logger.error('Build tool call failed', { error })
return {
content: [
{
type: 'text',
text: `Build failed: ${error instanceof Error ? error.message : String(error)}`,
},
],
isError: true,
}
}
}
async function handleSubagentToolCall(
toolDef: (typeof SUBAGENT_TOOL_DEFS)[number],
args: Record<string, unknown>,
userId: string
): Promise<CallToolResult> {
if (toolDef.agentId === 'build') {
return handleBuildToolCall(args, userId)
}
try {
const requestText =
(args.request as string) ||
(args.message as string) ||
(args.error as string) ||
JSON.stringify(args)
const context = (args.context as Record<string, unknown>) || {}
if (args.plan && !context.plan) {
context.plan = args.plan
}
const { model } = getCopilotModel('chat')
const result = await orchestrateSubagentStream(
toolDef.agentId,
{
message: requestText,
workflowId: args.workflowId,
workspaceId: args.workspaceId,
context,
model,
headless: true,
source: 'mcp',
},
{
userId,
workflowId: args.workflowId as string | undefined,
workspaceId: args.workspaceId as string | undefined,
}
)
let responseData: unknown
if (result.structuredResult) {
responseData = {
success: result.structuredResult.success ?? result.success,
type: result.structuredResult.type,
summary: result.structuredResult.summary,
data: result.structuredResult.data,
}
} else if (result.error) {
responseData = {
success: false,
error: result.error,
errors: result.errors,
}
} else {
responseData = {
success: result.success,
content: result.content,
}
}
return {
content: [
{
type: 'text',
text: JSON.stringify(responseData, null, 2),
},
],
isError: !result.success,
}
} catch (error) {
logger.error('Subagent tool call failed', {
tool: toolDef.name,
agentId: toolDef.agentId,
error,
})
return {
content: [
{
type: 'text',
text: `Subagent call failed: ${error instanceof Error ? error.message : String(error)}`,
},
],
isError: true,
}
}
}

View File

@@ -4,7 +4,7 @@ import { createLogger } from '@sim/logger'
import { and, eq } from 'drizzle-orm' import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { checkInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
import { checkWorkspaceAccess } from '@/lib/workspaces/permissions/utils' import { checkWorkspaceAccess } from '@/lib/workspaces/permissions/utils'
@@ -36,7 +36,7 @@ async function validateMemoryAccess(
requestId: string, requestId: string,
action: 'read' | 'write' action: 'read' | 'write'
): Promise<{ userId: string } | { error: NextResponse }> { ): Promise<{ userId: string } | { error: NextResponse }> {
const authResult = await checkInternalAuth(request, { requireWorkflowId: false }) const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
if (!authResult.success || !authResult.userId) { if (!authResult.success || !authResult.userId) {
logger.warn(`[${requestId}] Unauthorized memory ${action} attempt`) logger.warn(`[${requestId}] Unauthorized memory ${action} attempt`)
return { return {

View File

@@ -3,7 +3,7 @@ import { memory } from '@sim/db/schema'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { and, eq, isNull, like } from 'drizzle-orm' import { and, eq, isNull, like } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { checkInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
import { checkWorkspaceAccess } from '@/lib/workspaces/permissions/utils' import { checkWorkspaceAccess } from '@/lib/workspaces/permissions/utils'
@@ -16,7 +16,7 @@ export async function GET(request: NextRequest) {
const requestId = generateRequestId() const requestId = generateRequestId()
try { try {
const authResult = await checkInternalAuth(request) const authResult = await checkHybridAuth(request)
if (!authResult.success || !authResult.userId) { if (!authResult.success || !authResult.userId) {
logger.warn(`[${requestId}] Unauthorized memory access attempt`) logger.warn(`[${requestId}] Unauthorized memory access attempt`)
return NextResponse.json( return NextResponse.json(
@@ -89,7 +89,7 @@ export async function POST(request: NextRequest) {
const requestId = generateRequestId() const requestId = generateRequestId()
try { try {
const authResult = await checkInternalAuth(request) const authResult = await checkHybridAuth(request)
if (!authResult.success || !authResult.userId) { if (!authResult.success || !authResult.userId) {
logger.warn(`[${requestId}] Unauthorized memory creation attempt`) logger.warn(`[${requestId}] Unauthorized memory creation attempt`)
return NextResponse.json( return NextResponse.json(
@@ -228,7 +228,7 @@ export async function DELETE(request: NextRequest) {
const requestId = generateRequestId() const requestId = generateRequestId()
try { try {
const authResult = await checkInternalAuth(request) const authResult = await checkHybridAuth(request)
if (!authResult.success || !authResult.userId) { if (!authResult.success || !authResult.userId) {
logger.warn(`[${requestId}] Unauthorized memory deletion attempt`) logger.warn(`[${requestId}] Unauthorized memory deletion attempt`)
return NextResponse.json( return NextResponse.json(

View File

@@ -3,7 +3,7 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { createA2AClient } from '@/lib/a2a/utils' import { createA2AClient } from '@/lib/a2a/utils'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
const logger = createLogger('A2ACancelTaskAPI') const logger = createLogger('A2ACancelTaskAPI')
@@ -20,7 +20,7 @@ export async function POST(request: NextRequest) {
const requestId = generateRequestId() const requestId = generateRequestId()
try { try {
const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
if (!authResult.success) { if (!authResult.success) {
logger.warn(`[${requestId}] Unauthorized A2A cancel task attempt`) logger.warn(`[${requestId}] Unauthorized A2A cancel task attempt`)

View File

@@ -2,7 +2,7 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { createA2AClient } from '@/lib/a2a/utils' import { createA2AClient } from '@/lib/a2a/utils'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
export const dynamic = 'force-dynamic' export const dynamic = 'force-dynamic'
@@ -20,7 +20,7 @@ export async function POST(request: NextRequest) {
const requestId = generateRequestId() const requestId = generateRequestId()
try { try {
const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
if (!authResult.success) { if (!authResult.success) {
logger.warn( logger.warn(

View File

@@ -2,7 +2,7 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { createA2AClient } from '@/lib/a2a/utils' import { createA2AClient } from '@/lib/a2a/utils'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
export const dynamic = 'force-dynamic' export const dynamic = 'force-dynamic'
@@ -18,7 +18,7 @@ export async function POST(request: NextRequest) {
const requestId = generateRequestId() const requestId = generateRequestId()
try { try {
const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
if (!authResult.success) { if (!authResult.success) {
logger.warn(`[${requestId}] Unauthorized A2A get agent card attempt: ${authResult.error}`) logger.warn(`[${requestId}] Unauthorized A2A get agent card attempt: ${authResult.error}`)

View File

@@ -2,7 +2,7 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { createA2AClient } from '@/lib/a2a/utils' import { createA2AClient } from '@/lib/a2a/utils'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
export const dynamic = 'force-dynamic' export const dynamic = 'force-dynamic'
@@ -19,7 +19,7 @@ export async function POST(request: NextRequest) {
const requestId = generateRequestId() const requestId = generateRequestId()
try { try {
const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
if (!authResult.success) { if (!authResult.success) {
logger.warn( logger.warn(

View File

@@ -3,7 +3,7 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { createA2AClient } from '@/lib/a2a/utils' import { createA2AClient } from '@/lib/a2a/utils'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
export const dynamic = 'force-dynamic' export const dynamic = 'force-dynamic'
@@ -21,7 +21,7 @@ export async function POST(request: NextRequest) {
const requestId = generateRequestId() const requestId = generateRequestId()
try { try {
const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
if (!authResult.success) { if (!authResult.success) {
logger.warn(`[${requestId}] Unauthorized A2A get task attempt: ${authResult.error}`) logger.warn(`[${requestId}] Unauthorized A2A get task attempt: ${authResult.error}`)

View File

@@ -10,7 +10,7 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { createA2AClient, extractTextContent, isTerminalState } from '@/lib/a2a/utils' import { createA2AClient, extractTextContent, isTerminalState } from '@/lib/a2a/utils'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
const logger = createLogger('A2AResubscribeAPI') const logger = createLogger('A2AResubscribeAPI')
@@ -27,7 +27,7 @@ export async function POST(request: NextRequest) {
const requestId = generateRequestId() const requestId = generateRequestId()
try { try {
const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
if (!authResult.success) { if (!authResult.success) {
logger.warn(`[${requestId}] Unauthorized A2A resubscribe attempt`) logger.warn(`[${requestId}] Unauthorized A2A resubscribe attempt`)

View File

@@ -3,7 +3,7 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { createA2AClient, extractTextContent, isTerminalState } from '@/lib/a2a/utils' import { createA2AClient, extractTextContent, isTerminalState } from '@/lib/a2a/utils'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { validateUrlWithDNS } from '@/lib/core/security/input-validation.server' import { validateUrlWithDNS } from '@/lib/core/security/input-validation.server'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
@@ -32,7 +32,7 @@ export async function POST(request: NextRequest) {
const requestId = generateRequestId() const requestId = generateRequestId()
try { try {
const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
if (!authResult.success) { if (!authResult.success) {
logger.warn(`[${requestId}] Unauthorized A2A send message attempt: ${authResult.error}`) logger.warn(`[${requestId}] Unauthorized A2A send message attempt: ${authResult.error}`)

View File

@@ -2,7 +2,7 @@ import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { createA2AClient } from '@/lib/a2a/utils' import { createA2AClient } from '@/lib/a2a/utils'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { validateUrlWithDNS } from '@/lib/core/security/input-validation.server' import { validateUrlWithDNS } from '@/lib/core/security/input-validation.server'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
@@ -22,7 +22,7 @@ export async function POST(request: NextRequest) {
const requestId = generateRequestId() const requestId = generateRequestId()
try { try {
const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
if (!authResult.success) { if (!authResult.success) {
logger.warn(`[${requestId}] Unauthorized A2A set push notification attempt`, { logger.warn(`[${requestId}] Unauthorized A2A set push notification attempt`, {

View File

@@ -1,7 +1,7 @@
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { getUserUsageLogs, type UsageLogSource } from '@/lib/billing/core/usage-log' import { getUserUsageLogs, type UsageLogSource } from '@/lib/billing/core/usage-log'
const logger = createLogger('UsageLogsAPI') const logger = createLogger('UsageLogsAPI')
@@ -20,7 +20,7 @@ const QuerySchema = z.object({
*/ */
export async function GET(req: NextRequest) { export async function GET(req: NextRequest) {
try { try {
const auth = await checkSessionOrInternalAuth(req, { requireWorkflowId: false }) const auth = await checkHybridAuth(req, { requireWorkflowId: false })
if (!auth.success || !auth.userId) { if (!auth.success || !auth.userId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })

View File

@@ -0,0 +1,114 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getCopilotModel } from '@/lib/copilot/config'
import { SIM_AGENT_VERSION } from '@/lib/copilot/constants'
import { COPILOT_REQUEST_MODES } from '@/lib/copilot/models'
import { orchestrateCopilotStream } from '@/lib/copilot/orchestrator'
import { resolveWorkflowIdForUser } from '@/lib/workflows/utils'
import { authenticateV1Request } from '@/app/api/v1/auth'
const logger = createLogger('CopilotHeadlessAPI')
const RequestSchema = z.object({
message: z.string().min(1, 'message is required'),
workflowId: z.string().optional(),
workflowName: z.string().optional(),
chatId: z.string().optional(),
mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'),
model: z.string().optional(),
autoExecuteTools: z.boolean().optional().default(true),
timeout: z.number().optional().default(300000),
})
/**
* POST /api/v1/copilot/chat
* Headless copilot endpoint for server-side orchestration.
*
* workflowId is optional - if not provided:
* - If workflowName is provided, finds that workflow
* - Otherwise uses the user's first workflow as context
* - The copilot can still operate on any workflow using list_user_workflows
*/
export async function POST(req: NextRequest) {
const auth = await authenticateV1Request(req)
if (!auth.authenticated || !auth.userId) {
return NextResponse.json(
{ success: false, error: auth.error || 'Unauthorized' },
{ status: 401 }
)
}
try {
const body = await req.json()
const parsed = RequestSchema.parse(body)
const defaults = getCopilotModel('chat')
const selectedModel = parsed.model || defaults.model
// Resolve workflow ID
const resolved = await resolveWorkflowIdForUser(
auth.userId,
parsed.workflowId,
parsed.workflowName
)
if (!resolved) {
return NextResponse.json(
{
success: false,
error: 'No workflows found. Create a workflow first or provide a valid workflowId.',
},
{ status: 400 }
)
}
// Transform mode to transport mode (same as client API)
// build and agent both map to 'agent' on the backend
const effectiveMode = parsed.mode === 'agent' ? 'build' : parsed.mode
const transportMode = effectiveMode === 'build' ? 'agent' : effectiveMode
// Always generate a chatId - required for artifacts system to work with subagents
const chatId = parsed.chatId || crypto.randomUUID()
const requestPayload = {
message: parsed.message,
workflowId: resolved.workflowId,
userId: auth.userId,
model: selectedModel,
mode: transportMode,
messageId: crypto.randomUUID(),
version: SIM_AGENT_VERSION,
headless: true,
chatId,
}
const result = await orchestrateCopilotStream(requestPayload, {
userId: auth.userId,
workflowId: resolved.workflowId,
chatId,
autoExecuteTools: parsed.autoExecuteTools,
timeout: parsed.timeout,
interactive: false,
})
return NextResponse.json({
success: result.success,
content: result.content,
toolCalls: result.toolCalls,
chatId: result.chatId || chatId, // Return the chatId for conversation continuity
conversationId: result.conversationId,
error: result.error,
})
} catch (error) {
if (error instanceof z.ZodError) {
return NextResponse.json(
{ success: false, error: 'Invalid request', details: error.errors },
{ status: 400 }
)
}
logger.error('Headless copilot request failed', {
error: error instanceof Error ? error.message : String(error),
})
return NextResponse.json({ success: false, error: 'Internal server error' }, { status: 500 })
}
}

View File

@@ -325,11 +325,6 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
requestId requestId
) )
// Client-side sessions and personal API keys bill/permission-check the
// authenticated user, not the workspace billed account.
const useAuthenticatedUserAsActor =
isClientSession || (auth.authType === 'api_key' && auth.apiKeyType === 'personal')
const preprocessResult = await preprocessExecution({ const preprocessResult = await preprocessExecution({
workflowId, workflowId,
userId, userId,
@@ -339,7 +334,6 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
checkDeployment: !shouldUseDraftState, checkDeployment: !shouldUseDraftState,
loggingSession, loggingSession,
useDraftState: shouldUseDraftState, useDraftState: shouldUseDraftState,
useAuthenticatedUserAsActor,
}) })
if (!preprocessResult.success) { if (!preprocessResult.success) {

View File

@@ -74,7 +74,8 @@ function FileCard({ file, isExecutionFile = false, workspaceId }: FileCardProps)
} }
if (isExecutionFile) { if (isExecutionFile) {
const serveUrl = `/api/files/serve/${encodeURIComponent(file.key)}?context=execution` const serveUrl =
file.url || `/api/files/serve/${encodeURIComponent(file.key)}?context=execution`
window.open(serveUrl, '_blank') window.open(serveUrl, '_blank')
logger.info(`Opened execution file serve URL: ${serveUrl}`) logger.info(`Opened execution file serve URL: ${serveUrl}`)
} else { } else {
@@ -87,12 +88,16 @@ function FileCard({ file, isExecutionFile = false, workspaceId }: FileCardProps)
logger.warn( logger.warn(
`Could not construct viewer URL for file: ${file.name}, falling back to serve URL` `Could not construct viewer URL for file: ${file.name}, falling back to serve URL`
) )
const serveUrl = `/api/files/serve/${encodeURIComponent(file.key)}?context=workspace` const serveUrl =
file.url || `/api/files/serve/${encodeURIComponent(file.key)}?context=workspace`
window.open(serveUrl, '_blank') window.open(serveUrl, '_blank')
} }
} }
} catch (error) { } catch (error) {
logger.error(`Failed to download file ${file.name}:`, error) logger.error(`Failed to download file ${file.name}:`, error)
if (file.url) {
window.open(file.url, '_blank')
}
} finally { } finally {
setIsDownloading(false) setIsDownloading(false)
} }
@@ -193,7 +198,8 @@ export function FileDownload({
} }
if (isExecutionFile) { if (isExecutionFile) {
const serveUrl = `/api/files/serve/${encodeURIComponent(file.key)}?context=execution` const serveUrl =
file.url || `/api/files/serve/${encodeURIComponent(file.key)}?context=execution`
window.open(serveUrl, '_blank') window.open(serveUrl, '_blank')
logger.info(`Opened execution file serve URL: ${serveUrl}`) logger.info(`Opened execution file serve URL: ${serveUrl}`)
} else { } else {
@@ -206,12 +212,16 @@ export function FileDownload({
logger.warn( logger.warn(
`Could not construct viewer URL for file: ${file.name}, falling back to serve URL` `Could not construct viewer URL for file: ${file.name}, falling back to serve URL`
) )
const serveUrl = `/api/files/serve/${encodeURIComponent(file.key)}?context=workspace` const serveUrl =
file.url || `/api/files/serve/${encodeURIComponent(file.key)}?context=workspace`
window.open(serveUrl, '_blank') window.open(serveUrl, '_blank')
} }
} }
} catch (error) { } catch (error) {
logger.error(`Failed to download file ${file.name}:`, error) logger.error(`Failed to download file ${file.name}:`, error)
if (file.url) {
window.open(file.url, '_blank')
}
} finally { } finally {
setIsDownloading(false) setIsDownloading(false)
} }

View File

@@ -211,7 +211,7 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
if (block.type === 'text') { if (block.type === 'text') {
const isLastTextBlock = const isLastTextBlock =
index === message.contentBlocks!.length - 1 && block.type === 'text' index === message.contentBlocks!.length - 1 && block.type === 'text'
const parsed = parseSpecialTags(block.content) const parsed = parseSpecialTags(block.content ?? '')
// Mask credential IDs in the displayed content // Mask credential IDs in the displayed content
const cleanBlockContent = maskCredentialValue( const cleanBlockContent = maskCredentialValue(
parsed.cleanContent.replace(/\n{3,}/g, '\n\n') parsed.cleanContent.replace(/\n{3,}/g, '\n\n')
@@ -243,7 +243,7 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
return ( return (
<div key={blockKey} className='w-full'> <div key={blockKey} className='w-full'>
<ThinkingBlock <ThinkingBlock
content={maskCredentialValue(block.content)} content={maskCredentialValue(block.content ?? '')}
isStreaming={isActivelyStreaming} isStreaming={isActivelyStreaming}
hasFollowingContent={hasFollowingContent} hasFollowingContent={hasFollowingContent}
hasSpecialTags={hasSpecialTags} hasSpecialTags={hasSpecialTags}
@@ -251,7 +251,7 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
</div> </div>
) )
} }
if (block.type === 'tool_call') { if (block.type === 'tool_call' && block.toolCall) {
const blockKey = `tool-${block.toolCall.id}` const blockKey = `tool-${block.toolCall.id}`
return ( return (

View File

@@ -1,20 +1,15 @@
'use client' 'use client'
import { memo, useEffect, useMemo, useRef, useState } from 'react' import { memo, useEffect, useMemo, useRef, useState } from 'react'
import { createLogger } from '@sim/logger'
import clsx from 'clsx' import clsx from 'clsx'
import { ChevronUp, LayoutList } from 'lucide-react' import { ChevronUp, LayoutList } from 'lucide-react'
import Editor from 'react-simple-code-editor' import Editor from 'react-simple-code-editor'
import { Button, Code, getCodeEditorProps, highlight, languages } from '@/components/emcn' import { Button, Code, getCodeEditorProps, highlight, languages } from '@/components/emcn'
import { ClientToolCallState } from '@/lib/copilot/tools/client/base-tool'
import { getClientTool } from '@/lib/copilot/tools/client/manager'
import { getRegisteredTools } from '@/lib/copilot/tools/client/registry'
import '@/lib/copilot/tools/client/init-tool-configs'
import { import {
getSubagentLabels as getSubagentLabelsFromConfig, ClientToolCallState,
getToolUIConfig, TOOL_DISPLAY_REGISTRY,
hasInterrupt as hasInterruptFromConfig, } from '@/lib/copilot/tools/client/tool-display-registry'
isSpecialTool as isSpecialToolFromConfig,
} from '@/lib/copilot/tools/client/ui-config'
import { formatDuration } from '@/lib/core/utils/formatting' import { formatDuration } from '@/lib/core/utils/formatting'
import { CopilotMarkdownRenderer } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components/markdown-renderer' import { CopilotMarkdownRenderer } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components/markdown-renderer'
import { SmoothStreamingText } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components/smooth-streaming' import { SmoothStreamingText } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components/smooth-streaming'
@@ -25,7 +20,6 @@ import { getDisplayValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/co
import { getBlock } from '@/blocks/registry' import { getBlock } from '@/blocks/registry'
import type { CopilotToolCall } from '@/stores/panel' import type { CopilotToolCall } from '@/stores/panel'
import { useCopilotStore } from '@/stores/panel' import { useCopilotStore } from '@/stores/panel'
import { CLASS_TOOL_METADATA } from '@/stores/panel/copilot/store'
import type { SubAgentContentBlock } from '@/stores/panel/copilot/types' import type { SubAgentContentBlock } from '@/stores/panel/copilot/types'
import { useWorkflowStore } from '@/stores/workflows/workflow/store' import { useWorkflowStore } from '@/stores/workflows/workflow/store'
@@ -710,8 +704,8 @@ const ShimmerOverlayText = memo(function ShimmerOverlayText({
* @returns The completion label from UI config, defaults to 'Thought' * @returns The completion label from UI config, defaults to 'Thought'
*/ */
function getSubagentCompletionLabel(toolName: string): string { function getSubagentCompletionLabel(toolName: string): string {
const labels = getSubagentLabelsFromConfig(toolName, false) const labels = TOOL_DISPLAY_REGISTRY[toolName]?.uiConfig?.subagentLabels
return labels?.completed ?? 'Thought' return labels?.completed || 'Thought'
} }
/** /**
@@ -943,7 +937,7 @@ const SubagentContentRenderer = memo(function SubagentContentRenderer({
* Determines if a tool call should display with special gradient styling. * Determines if a tool call should display with special gradient styling.
*/ */
function isSpecialToolCall(toolCall: CopilotToolCall): boolean { function isSpecialToolCall(toolCall: CopilotToolCall): boolean {
return isSpecialToolFromConfig(toolCall.name) return TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.isSpecial === true
} }
/** /**
@@ -1223,28 +1217,11 @@ const WorkflowEditSummary = memo(function WorkflowEditSummary({
/** Checks if a tool is server-side executed (not a client tool) */ /** Checks if a tool is server-side executed (not a client tool) */
function isIntegrationTool(toolName: string): boolean { function isIntegrationTool(toolName: string): boolean {
return !CLASS_TOOL_METADATA[toolName] return !TOOL_DISPLAY_REGISTRY[toolName]
} }
function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean { function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean {
if (hasInterruptFromConfig(toolCall.name) && toolCall.state === 'pending') { const hasInterrupt = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.interrupt === true
return true
}
const instance = getClientTool(toolCall.id)
let hasInterrupt = !!instance?.getInterruptDisplays?.()
if (!hasInterrupt) {
try {
const def = getRegisteredTools()[toolCall.name]
if (def) {
hasInterrupt =
typeof def.hasInterrupt === 'function'
? !!def.hasInterrupt(toolCall.params || {})
: !!def.hasInterrupt
}
} catch {}
}
if (hasInterrupt && toolCall.state === 'pending') { if (hasInterrupt && toolCall.state === 'pending') {
return true return true
} }
@@ -1257,109 +1234,50 @@ function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean {
return false return false
} }
const toolCallLogger = createLogger('CopilotToolCall')
async function sendToolDecision(
toolCallId: string,
status: 'accepted' | 'rejected' | 'background'
) {
try {
await fetch('/api/copilot/confirm', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ toolCallId, status }),
})
} catch (error) {
toolCallLogger.warn('Failed to send tool decision', {
toolCallId,
status,
error: error instanceof Error ? error.message : String(error),
})
}
}
async function handleRun( async function handleRun(
toolCall: CopilotToolCall, toolCall: CopilotToolCall,
setToolCallState: any, setToolCallState: any,
onStateChange?: any, onStateChange?: any,
editedParams?: any editedParams?: any
) { ) {
const instance = getClientTool(toolCall.id) setToolCallState(toolCall, 'executing', editedParams ? { params: editedParams } : undefined)
onStateChange?.('executing')
if (!instance && isIntegrationTool(toolCall.name)) { await sendToolDecision(toolCall.id, 'accepted')
onStateChange?.('executing')
try {
await useCopilotStore.getState().executeIntegrationTool(toolCall.id)
} catch (e) {
setToolCallState(toolCall, 'error', { error: e instanceof Error ? e.message : String(e) })
onStateChange?.('error')
try {
await fetch('/api/copilot/tools/mark-complete', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
id: toolCall.id,
name: toolCall.name,
status: 500,
message: e instanceof Error ? e.message : 'Tool execution failed',
data: { error: e instanceof Error ? e.message : String(e) },
}),
})
} catch {
console.error('[handleRun] Failed to notify backend of tool error:', toolCall.id)
}
}
return
}
if (!instance) return
try {
const mergedParams =
editedParams ||
(toolCall as any).params ||
(toolCall as any).parameters ||
(toolCall as any).input ||
{}
await instance.handleAccept?.(mergedParams)
onStateChange?.('executing')
} catch (e) {
setToolCallState(toolCall, 'error', { error: e instanceof Error ? e.message : String(e) })
}
} }
async function handleSkip(toolCall: CopilotToolCall, setToolCallState: any, onStateChange?: any) { async function handleSkip(toolCall: CopilotToolCall, setToolCallState: any, onStateChange?: any) {
const instance = getClientTool(toolCall.id)
if (!instance && isIntegrationTool(toolCall.name)) {
setToolCallState(toolCall, 'rejected')
onStateChange?.('rejected')
let notified = false
for (let attempt = 0; attempt < 3 && !notified; attempt++) {
try {
const res = await fetch('/api/copilot/tools/mark-complete', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
id: toolCall.id,
name: toolCall.name,
status: 400,
message: 'Tool execution skipped by user',
data: { skipped: true, reason: 'user_skipped' },
}),
})
if (res.ok) {
notified = true
}
} catch (e) {
if (attempt < 2) {
await new Promise((resolve) => setTimeout(resolve, 500))
}
}
}
if (!notified) {
console.error('[handleSkip] Failed to notify backend after 3 attempts:', toolCall.id)
}
return
}
if (instance) {
try {
await instance.handleReject?.()
} catch {}
}
setToolCallState(toolCall, 'rejected') setToolCallState(toolCall, 'rejected')
onStateChange?.('rejected') onStateChange?.('rejected')
await sendToolDecision(toolCall.id, 'rejected')
} }
function getDisplayName(toolCall: CopilotToolCall): string { function getDisplayName(toolCall: CopilotToolCall): string {
const fromStore = (toolCall as any).display?.text const fromStore = (toolCall as any).display?.text
if (fromStore) return fromStore if (fromStore) return fromStore
try { const registryEntry = TOOL_DISPLAY_REGISTRY[toolCall.name]
const def = getRegisteredTools()[toolCall.name] as any const byState = registryEntry?.displayNames?.[toolCall.state as ClientToolCallState]
const byState = def?.metadata?.displayNames?.[toolCall.state] if (byState?.text) return byState.text
if (byState?.text) return byState.text
} catch {}
const stateVerb = getStateVerb(toolCall.state) const stateVerb = getStateVerb(toolCall.state)
const formattedName = formatToolName(toolCall.name) const formattedName = formatToolName(toolCall.name)
@@ -1509,7 +1427,7 @@ export function ToolCall({
// Check if this integration tool is auto-allowed // Check if this integration tool is auto-allowed
// Subscribe to autoAllowedTools so we re-render when it changes // Subscribe to autoAllowedTools so we re-render when it changes
const autoAllowedTools = useCopilotStore((s) => s.autoAllowedTools) const autoAllowedTools = useCopilotStore((s) => s.autoAllowedTools)
const { removeAutoAllowedTool } = useCopilotStore() const { removeAutoAllowedTool, setToolCallState } = useCopilotStore()
const isAutoAllowed = isIntegrationTool(toolCall.name) && autoAllowedTools.includes(toolCall.name) const isAutoAllowed = isIntegrationTool(toolCall.name) && autoAllowedTools.includes(toolCall.name)
// Update edited params when toolCall params change (deep comparison to avoid resetting user edits on ref change) // Update edited params when toolCall params change (deep comparison to avoid resetting user edits on ref change)
@@ -1537,23 +1455,7 @@ export function ToolCall({
return null return null
// Special rendering for subagent tools - show as thinking text with tool calls at top level // Special rendering for subagent tools - show as thinking text with tool calls at top level
const SUBAGENT_TOOLS = [ const isSubagentTool = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.subagent === true
'plan',
'edit',
'debug',
'test',
'deploy',
'evaluate',
'auth',
'research',
'knowledge',
'custom_tool',
'tour',
'info',
'workflow',
'superagent',
]
const isSubagentTool = SUBAGENT_TOOLS.includes(toolCall.name)
// For ALL subagent tools, don't show anything until we have blocks with content // For ALL subagent tools, don't show anything until we have blocks with content
if (isSubagentTool) { if (isSubagentTool) {
@@ -1593,17 +1495,18 @@ export function ToolCall({
stateStr === 'aborted' stateStr === 'aborted'
// Allow rendering if: // Allow rendering if:
// 1. Tool is in CLASS_TOOL_METADATA (client tools), OR // 1. Tool is in TOOL_DISPLAY_REGISTRY (client tools), OR
// 2. We're in build mode (integration tools are executed server-side), OR // 2. We're in build mode (integration tools are executed server-side), OR
// 3. Tool call is already completed (historical - should always render) // 3. Tool call is already completed (historical - should always render)
const isClientTool = !!CLASS_TOOL_METADATA[toolCall.name] const isClientTool = !!TOOL_DISPLAY_REGISTRY[toolCall.name]
const isIntegrationToolInBuildMode = mode === 'build' && !isClientTool const isIntegrationToolInBuildMode = mode === 'build' && !isClientTool
if (!isClientTool && !isIntegrationToolInBuildMode && !isCompletedToolCall) { if (!isClientTool && !isIntegrationToolInBuildMode && !isCompletedToolCall) {
return null return null
} }
const toolUIConfig = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig
// Check if tool has params table config (meaning it's expandable) // Check if tool has params table config (meaning it's expandable)
const hasParamsTable = !!getToolUIConfig(toolCall.name)?.paramsTable const hasParamsTable = !!toolUIConfig?.paramsTable
const isRunWorkflow = toolCall.name === 'run_workflow' const isRunWorkflow = toolCall.name === 'run_workflow'
const isExpandableTool = const isExpandableTool =
hasParamsTable || hasParamsTable ||
@@ -1613,7 +1516,6 @@ export function ToolCall({
const showButtons = isCurrentMessage && shouldShowRunSkipButtons(toolCall) const showButtons = isCurrentMessage && shouldShowRunSkipButtons(toolCall)
// Check UI config for secondary action - only show for current message tool calls // Check UI config for secondary action - only show for current message tool calls
const toolUIConfig = getToolUIConfig(toolCall.name)
const secondaryAction = toolUIConfig?.secondaryAction const secondaryAction = toolUIConfig?.secondaryAction
const showSecondaryAction = secondaryAction?.showInStates.includes( const showSecondaryAction = secondaryAction?.showInStates.includes(
toolCall.state as ClientToolCallState toolCall.state as ClientToolCallState
@@ -2211,16 +2113,9 @@ export function ToolCall({
<div className='mt-[10px]'> <div className='mt-[10px]'>
<Button <Button
onClick={async () => { onClick={async () => {
try { setToolCallState(toolCall, ClientToolCallState.background)
const instance = getClientTool(toolCall.id) onStateChange?.('background')
instance?.setState?.((ClientToolCallState as any).background) await sendToolDecision(toolCall.id, 'background')
await instance?.markToolComplete?.(
200,
'The user has chosen to move the workflow execution to the background. Check back with them later to know when the workflow execution is complete'
)
forceUpdate({})
onStateChange?.('background')
} catch {}
}} }}
variant='tertiary' variant='tertiary'
title='Move to Background' title='Move to Background'
@@ -2232,21 +2127,9 @@ export function ToolCall({
<div className='mt-[10px]'> <div className='mt-[10px]'>
<Button <Button
onClick={async () => { onClick={async () => {
try { setToolCallState(toolCall, ClientToolCallState.background)
const instance = getClientTool(toolCall.id) onStateChange?.('background')
const elapsedSeconds = instance?.getElapsedSeconds?.() || 0 await sendToolDecision(toolCall.id, 'background')
instance?.setState?.((ClientToolCallState as any).background, {
result: { _elapsedSeconds: elapsedSeconds },
})
const { updateToolCallParams } = useCopilotStore.getState()
updateToolCallParams?.(toolCall.id, { _elapsedSeconds: Math.round(elapsedSeconds) })
await instance?.markToolComplete?.(
200,
`User woke you up after ${Math.round(elapsedSeconds)} seconds`
)
forceUpdate({})
onStateChange?.('background')
} catch {}
}} }}
variant='tertiary' variant='tertiary'
title='Wake' title='Wake'

View File

@@ -246,6 +246,7 @@ export function getCommandDisplayLabel(commandId: string): string {
* Model configuration options * Model configuration options
*/ */
export const MODEL_OPTIONS = [ export const MODEL_OPTIONS = [
{ value: 'claude-4.6-opus', label: 'Claude 4.6 Opus' },
{ value: 'claude-4.5-opus', label: 'Claude 4.5 Opus' }, { value: 'claude-4.5-opus', label: 'Claude 4.5 Opus' },
{ value: 'claude-4.5-sonnet', label: 'Claude 4.5 Sonnet' }, { value: 'claude-4.5-sonnet', label: 'Claude 4.5 Sonnet' },
{ value: 'claude-4.5-haiku', label: 'Claude 4.5 Haiku' }, { value: 'claude-4.5-haiku', label: 'Claude 4.5 Haiku' },

View File

@@ -107,13 +107,13 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
currentChat, currentChat,
selectChat, selectChat,
deleteChat, deleteChat,
areChatsFresh,
workflowId: copilotWorkflowId, workflowId: copilotWorkflowId,
setPlanTodos, setPlanTodos,
closePlanTodos, closePlanTodos,
clearPlanArtifact, clearPlanArtifact,
savePlanArtifact, savePlanArtifact,
loadAutoAllowedTools, loadAutoAllowedTools,
resumeActiveStream,
} = useCopilotStore() } = useCopilotStore()
// Initialize copilot // Initialize copilot
@@ -126,6 +126,7 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
loadAutoAllowedTools, loadAutoAllowedTools,
currentChat, currentChat,
isSendingMessage, isSendingMessage,
resumeActiveStream,
}) })
// Handle scroll management (80px stickiness for copilot) // Handle scroll management (80px stickiness for copilot)
@@ -140,7 +141,6 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
activeWorkflowId, activeWorkflowId,
copilotWorkflowId, copilotWorkflowId,
loadChats, loadChats,
areChatsFresh,
isSendingMessage, isSendingMessage,
} }
) )
@@ -421,8 +421,8 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
</div> </div>
</div> </div>
{/* Show loading state until fully initialized */} {/* Show loading state until fully initialized, but skip if actively streaming (resume case) */}
{!isInitialized ? ( {!isInitialized && !isSendingMessage ? (
<div className='flex h-full w-full items-center justify-center'> <div className='flex h-full w-full items-center justify-center'>
<div className='flex flex-col items-center gap-3'> <div className='flex flex-col items-center gap-3'>
<p className='text-muted-foreground text-sm'>Loading copilot</p> <p className='text-muted-foreground text-sm'>Loading copilot</p>

View File

@@ -10,7 +10,6 @@ interface UseChatHistoryProps {
activeWorkflowId: string | null activeWorkflowId: string | null
copilotWorkflowId: string | null copilotWorkflowId: string | null
loadChats: (forceRefresh: boolean) => Promise<void> loadChats: (forceRefresh: boolean) => Promise<void>
areChatsFresh: (workflowId: string) => boolean
isSendingMessage: boolean isSendingMessage: boolean
} }
@@ -21,8 +20,7 @@ interface UseChatHistoryProps {
* @returns Chat history utilities * @returns Chat history utilities
*/ */
export function useChatHistory(props: UseChatHistoryProps) { export function useChatHistory(props: UseChatHistoryProps) {
const { chats, activeWorkflowId, copilotWorkflowId, loadChats, areChatsFresh, isSendingMessage } = const { chats, activeWorkflowId, copilotWorkflowId, loadChats, isSendingMessage } = props
props
/** Groups chats by time period (Today, Yesterday, This Week, etc.) */ /** Groups chats by time period (Today, Yesterday, This Week, etc.) */
const groupedChats = useMemo(() => { const groupedChats = useMemo(() => {
@@ -80,7 +78,7 @@ export function useChatHistory(props: UseChatHistoryProps) {
/** Handles history dropdown opening and loads chats if needed (non-blocking) */ /** Handles history dropdown opening and loads chats if needed (non-blocking) */
const handleHistoryDropdownOpen = useCallback( const handleHistoryDropdownOpen = useCallback(
(open: boolean) => { (open: boolean) => {
if (open && activeWorkflowId && !isSendingMessage && !areChatsFresh(activeWorkflowId)) { if (open && activeWorkflowId && !isSendingMessage) {
loadChats(false).catch((error) => { loadChats(false).catch((error) => {
logger.error('Failed to load chat history:', error) logger.error('Failed to load chat history:', error)
}) })
@@ -90,7 +88,7 @@ export function useChatHistory(props: UseChatHistoryProps) {
logger.info('Chat history opened during stream - showing cached data only') logger.info('Chat history opened during stream - showing cached data only')
} }
}, },
[activeWorkflowId, areChatsFresh, isSendingMessage, loadChats] [activeWorkflowId, isSendingMessage, loadChats]
) )
return { return {

View File

@@ -14,6 +14,7 @@ interface UseCopilotInitializationProps {
loadAutoAllowedTools: () => Promise<void> loadAutoAllowedTools: () => Promise<void>
currentChat: any currentChat: any
isSendingMessage: boolean isSendingMessage: boolean
resumeActiveStream: () => Promise<boolean>
} }
/** /**
@@ -32,11 +33,13 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) {
loadAutoAllowedTools, loadAutoAllowedTools,
currentChat, currentChat,
isSendingMessage, isSendingMessage,
resumeActiveStream,
} = props } = props
const [isInitialized, setIsInitialized] = useState(false) const [isInitialized, setIsInitialized] = useState(false)
const lastWorkflowIdRef = useRef<string | null>(null) const lastWorkflowIdRef = useRef<string | null>(null)
const hasMountedRef = useRef(false) const hasMountedRef = useRef(false)
const hasResumedRef = useRef(false)
/** Initialize on mount - loads chats if needed. Never loads during streaming */ /** Initialize on mount - loads chats if needed. Never loads during streaming */
useEffect(() => { useEffect(() => {
@@ -105,6 +108,16 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) {
isSendingMessage, isSendingMessage,
]) ])
/** Try to resume active stream on mount - runs early, before waiting for chats */
useEffect(() => {
if (hasResumedRef.current || isSendingMessage) return
hasResumedRef.current = true
// Resume immediately on mount - don't wait for isInitialized
resumeActiveStream().catch((err) => {
logger.warn('[Copilot] Failed to resume active stream', err)
})
}, [isSendingMessage, resumeActiveStream])
/** Load auto-allowed tools once on mount - runs immediately, independent of workflow */ /** Load auto-allowed tools once on mount - runs immediately, independent of workflow */
const hasLoadedAutoAllowedToolsRef = useRef(false) const hasLoadedAutoAllowedToolsRef = useRef(false)
useEffect(() => { useEffect(() => {

View File

@@ -130,52 +130,39 @@ export function SkillInput({
onOpenChange={setOpen} onOpenChange={setOpen}
/> />
{selectedSkills.length > 0 && {selectedSkills.length > 0 && (
selectedSkills.map((stored) => { <div className='flex flex-wrap gap-[4px]'>
const fullSkill = workspaceSkills.find((s) => s.id === stored.skillId) {selectedSkills.map((stored) => {
return ( const fullSkill = workspaceSkills.find((s) => s.id === stored.skillId)
<div return (
key={stored.skillId}
className='group relative flex flex-col overflow-hidden rounded-[4px] border border-[var(--border-1)] transition-all duration-200 ease-in-out'
>
<div <div
className='flex cursor-pointer items-center justify-between gap-[8px] rounded-t-[4px] bg-[var(--surface-4)] px-[8px] py-[6.5px]' key={stored.skillId}
className='flex cursor-pointer items-center gap-[4px] rounded-[4px] border border-[var(--border-1)] bg-[var(--surface-5)] px-[6px] py-[2px] font-medium text-[12px] text-[var(--text-secondary)] hover:bg-[var(--surface-6)]'
onClick={() => { onClick={() => {
if (fullSkill && !disabled && !isPreview) { if (fullSkill && !disabled && !isPreview) {
setEditingSkill(fullSkill) setEditingSkill(fullSkill)
} }
}} }}
> >
<div className='flex min-w-0 flex-1 items-center gap-[8px]'> <AgentSkillsIcon className='h-[10px] w-[10px] text-[var(--text-tertiary)]' />
<div <span className='max-w-[140px] truncate'>{resolveSkillName(stored)}</span>
className='flex h-[16px] w-[16px] flex-shrink-0 items-center justify-center rounded-[4px]' {!disabled && !isPreview && (
style={{ backgroundColor: '#e0e0e0' }} <button
type='button'
onClick={(e) => {
e.stopPropagation()
handleRemove(stored.skillId)
}}
className='ml-[2px] rounded-[2px] p-[1px] text-[var(--text-tertiary)] hover:bg-[var(--surface-7)] hover:text-[var(--text-secondary)]'
> >
<AgentSkillsIcon className='h-[10px] w-[10px] text-[#333]' /> <XIcon className='h-[10px] w-[10px]' />
</div> </button>
<span className='truncate font-medium text-[13px] text-[var(--text-primary)]'> )}
{resolveSkillName(stored)}
</span>
</div>
<div className='flex flex-shrink-0 items-center gap-[8px]'>
{!disabled && !isPreview && (
<button
type='button'
onClick={(e) => {
e.stopPropagation()
handleRemove(stored.skillId)
}}
className='flex items-center justify-center text-[var(--text-tertiary)] transition-colors hover:text-[var(--text-primary)]'
aria-label='Remove skill'
>
<XIcon className='h-[13px] w-[13px]' />
</button>
)}
</div>
</div> </div>
</div> )
) })}
})} </div>
)}
</div> </div>
<SkillModal <SkillModal

View File

@@ -6,7 +6,6 @@ import {
isSubBlockVisibleForMode, isSubBlockVisibleForMode,
} from '@/lib/workflows/subblocks/visibility' } from '@/lib/workflows/subblocks/visibility'
import type { BlockConfig, SubBlockConfig, SubBlockType } from '@/blocks/types' import type { BlockConfig, SubBlockConfig, SubBlockType } from '@/blocks/types'
import { usePermissionConfig } from '@/hooks/use-permission-config'
import { useWorkflowDiffStore } from '@/stores/workflow-diff' import { useWorkflowDiffStore } from '@/stores/workflow-diff'
import { mergeSubblockState } from '@/stores/workflows/utils' import { mergeSubblockState } from '@/stores/workflows/utils'
import { useWorkflowStore } from '@/stores/workflows/workflow/store' import { useWorkflowStore } from '@/stores/workflows/workflow/store'
@@ -36,7 +35,6 @@ export function useEditorSubblockLayout(
const blockDataFromStore = useWorkflowStore( const blockDataFromStore = useWorkflowStore(
useCallback((state) => state.blocks?.[blockId]?.data, [blockId]) useCallback((state) => state.blocks?.[blockId]?.data, [blockId])
) )
const { config: permissionConfig } = usePermissionConfig()
return useMemo(() => { return useMemo(() => {
// Guard against missing config or block selection // Guard against missing config or block selection
@@ -102,9 +100,6 @@ export function useEditorSubblockLayout(
const visibleSubBlocks = (config.subBlocks || []).filter((block) => { const visibleSubBlocks = (config.subBlocks || []).filter((block) => {
if (block.hidden) return false if (block.hidden) return false
// Hide skill-input subblock when skills are disabled via permissions
if (block.type === 'skill-input' && permissionConfig.disableSkills) return false
// Check required feature if specified - declarative feature gating // Check required feature if specified - declarative feature gating
if (!isSubBlockFeatureEnabled(block)) return false if (!isSubBlockFeatureEnabled(block)) return false
@@ -154,6 +149,5 @@ export function useEditorSubblockLayout(
activeWorkflowId, activeWorkflowId,
isSnapshotView, isSnapshotView,
blockDataFromStore, blockDataFromStore,
permissionConfig.disableSkills,
]) ])
} }

View File

@@ -40,7 +40,6 @@ import { useCustomTools } from '@/hooks/queries/custom-tools'
import { useMcpServers, useMcpToolsQuery } from '@/hooks/queries/mcp' import { useMcpServers, useMcpToolsQuery } from '@/hooks/queries/mcp'
import { useCredentialName } from '@/hooks/queries/oauth-credentials' import { useCredentialName } from '@/hooks/queries/oauth-credentials'
import { useReactivateSchedule, useScheduleInfo } from '@/hooks/queries/schedules' import { useReactivateSchedule, useScheduleInfo } from '@/hooks/queries/schedules'
import { useSkills } from '@/hooks/queries/skills'
import { useDeployChildWorkflow } from '@/hooks/queries/workflows' import { useDeployChildWorkflow } from '@/hooks/queries/workflows'
import { useSelectorDisplayName } from '@/hooks/use-selector-display-name' import { useSelectorDisplayName } from '@/hooks/use-selector-display-name'
import { useVariablesStore } from '@/stores/panel' import { useVariablesStore } from '@/stores/panel'
@@ -619,48 +618,6 @@ const SubBlockRow = memo(function SubBlockRow({
return `${toolNames[0]}, ${toolNames[1]} +${toolNames.length - 2}` return `${toolNames[0]}, ${toolNames[1]} +${toolNames.length - 2}`
}, [subBlock?.type, rawValue, customTools, workspaceId]) }, [subBlock?.type, rawValue, customTools, workspaceId])
/**
* Hydrates skill references to display names.
* Resolves skill IDs to their current names from the skills query.
*/
const { data: workspaceSkills = [] } = useSkills(workspaceId || '')
const skillsDisplayValue = useMemo(() => {
if (subBlock?.type !== 'skill-input' || !Array.isArray(rawValue) || rawValue.length === 0) {
return null
}
interface StoredSkill {
skillId: string
name?: string
}
const skillNames = rawValue
.map((skill: StoredSkill) => {
if (!skill || typeof skill !== 'object') return null
// Priority 1: Resolve skill name from the skills query (fresh data)
if (skill.skillId) {
const foundSkill = workspaceSkills.find((s) => s.id === skill.skillId)
if (foundSkill?.name) return foundSkill.name
}
// Priority 2: Fall back to stored name (for deleted skills)
if (skill.name && typeof skill.name === 'string') return skill.name
// Priority 3: Use skillId as last resort
if (skill.skillId) return skill.skillId
return null
})
.filter((name): name is string => !!name)
if (skillNames.length === 0) return null
if (skillNames.length === 1) return skillNames[0]
if (skillNames.length === 2) return `${skillNames[0]}, ${skillNames[1]}`
return `${skillNames[0]}, ${skillNames[1]} +${skillNames.length - 2}`
}, [subBlock?.type, rawValue, workspaceSkills])
const isPasswordField = subBlock?.password === true const isPasswordField = subBlock?.password === true
const maskedValue = isPasswordField && value && value !== '-' ? '•••' : null const maskedValue = isPasswordField && value && value !== '-' ? '•••' : null
@@ -670,7 +627,6 @@ const SubBlockRow = memo(function SubBlockRow({
dropdownLabel || dropdownLabel ||
variablesDisplayValue || variablesDisplayValue ||
toolsDisplayValue || toolsDisplayValue ||
skillsDisplayValue ||
knowledgeBaseDisplayName || knowledgeBaseDisplayName ||
workflowSelectionName || workflowSelectionName ||
mcpServerDisplayName || mcpServerDisplayName ||

View File

@@ -18,7 +18,7 @@ import 'reactflow/dist/style.css'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { useShallow } from 'zustand/react/shallow' import { useShallow } from 'zustand/react/shallow'
import { useSession } from '@/lib/auth/auth-client' import { useSession } from '@/lib/auth/auth-client'
import type { OAuthConnectEventDetail } from '@/lib/copilot/tools/client/other/oauth-request-access' import type { OAuthConnectEventDetail } from '@/lib/copilot/tools/client/base-tool'
import type { OAuthProvider } from '@/lib/oauth' import type { OAuthProvider } from '@/lib/oauth'
import { BLOCK_DIMENSIONS, CONTAINER_DIMENSIONS } from '@/lib/workflows/blocks/block-dimensions' import { BLOCK_DIMENSIONS, CONTAINER_DIMENSIONS } from '@/lib/workflows/blocks/block-dimensions'
import { TriggerUtils } from '@/lib/workflows/triggers/triggers' import { TriggerUtils } from '@/lib/workflows/triggers/triggers'

View File

@@ -27,13 +27,6 @@ interface SkillModalProps {
const KEBAB_CASE_REGEX = /^[a-z0-9]+(-[a-z0-9]+)*$/ const KEBAB_CASE_REGEX = /^[a-z0-9]+(-[a-z0-9]+)*$/
interface FieldErrors {
name?: string
description?: string
content?: string
general?: string
}
export function SkillModal({ export function SkillModal({
open, open,
onOpenChange, onOpenChange,
@@ -50,7 +43,7 @@ export function SkillModal({
const [name, setName] = useState('') const [name, setName] = useState('')
const [description, setDescription] = useState('') const [description, setDescription] = useState('')
const [content, setContent] = useState('') const [content, setContent] = useState('')
const [errors, setErrors] = useState<FieldErrors>({}) const [formError, setFormError] = useState('')
const [saving, setSaving] = useState(false) const [saving, setSaving] = useState(false)
useEffect(() => { useEffect(() => {
@@ -64,7 +57,7 @@ export function SkillModal({
setDescription('') setDescription('')
setContent('') setContent('')
} }
setErrors({}) setFormError('')
} }
}, [open, initialValues]) }, [open, initialValues])
@@ -78,26 +71,24 @@ export function SkillModal({
}, [name, description, content, initialValues]) }, [name, description, content, initialValues])
const handleSave = async () => { const handleSave = async () => {
const newErrors: FieldErrors = {}
if (!name.trim()) { if (!name.trim()) {
newErrors.name = 'Name is required' setFormError('Name is required')
} else if (name.length > 64) { return
newErrors.name = 'Name must be 64 characters or less' }
} else if (!KEBAB_CASE_REGEX.test(name)) { if (name.length > 64) {
newErrors.name = 'Name must be kebab-case (e.g. my-skill)' setFormError('Name must be 64 characters or less')
return
}
if (!KEBAB_CASE_REGEX.test(name)) {
setFormError('Name must be kebab-case (e.g. my-skill)')
return
} }
if (!description.trim()) { if (!description.trim()) {
newErrors.description = 'Description is required' setFormError('Description is required')
return
} }
if (!content.trim()) { if (!content.trim()) {
newErrors.content = 'Content is required' setFormError('Content is required')
}
if (Object.keys(newErrors).length > 0) {
setErrors(newErrors)
return return
} }
@@ -122,7 +113,7 @@ export function SkillModal({
error instanceof Error && error.message.includes('already exists') error instanceof Error && error.message.includes('already exists')
? error.message ? error.message
: 'Failed to save skill. Please try again.' : 'Failed to save skill. Please try again.'
setErrors({ general: message }) setFormError(message)
} finally { } finally {
setSaving(false) setSaving(false)
} }
@@ -144,17 +135,12 @@ export function SkillModal({
value={name} value={name}
onChange={(e) => { onChange={(e) => {
setName(e.target.value) setName(e.target.value)
if (errors.name || errors.general) if (formError) setFormError('')
setErrors((prev) => ({ ...prev, name: undefined, general: undefined }))
}} }}
/> />
{errors.name ? ( <span className='text-[11px] text-[var(--text-muted)]'>
<p className='text-[12px] text-[var(--text-error)]'>{errors.name}</p> Lowercase letters, numbers, and hyphens (e.g. my-skill)
) : ( </span>
<span className='text-[11px] text-[var(--text-muted)]'>
Lowercase letters, numbers, and hyphens (e.g. my-skill)
</span>
)}
</div> </div>
<div className='flex flex-col gap-[4px]'> <div className='flex flex-col gap-[4px]'>
@@ -167,14 +153,10 @@ export function SkillModal({
value={description} value={description}
onChange={(e) => { onChange={(e) => {
setDescription(e.target.value) setDescription(e.target.value)
if (errors.description || errors.general) if (formError) setFormError('')
setErrors((prev) => ({ ...prev, description: undefined, general: undefined }))
}} }}
maxLength={1024} maxLength={1024}
/> />
{errors.description && (
<p className='text-[12px] text-[var(--text-error)]'>{errors.description}</p>
)}
</div> </div>
<div className='flex flex-col gap-[4px]'> <div className='flex flex-col gap-[4px]'>
@@ -187,19 +169,13 @@ export function SkillModal({
value={content} value={content}
onChange={(e: ChangeEvent<HTMLTextAreaElement>) => { onChange={(e: ChangeEvent<HTMLTextAreaElement>) => {
setContent(e.target.value) setContent(e.target.value)
if (errors.content || errors.general) if (formError) setFormError('')
setErrors((prev) => ({ ...prev, content: undefined, general: undefined }))
}} }}
className='min-h-[200px] resize-y font-mono text-[13px]' className='min-h-[200px] resize-y font-mono text-[13px]'
/> />
{errors.content && (
<p className='text-[12px] text-[var(--text-error)]'>{errors.content}</p>
)}
</div> </div>
{errors.general && ( {formError && <span className='text-[11px] text-[var(--text-error)]'>{formError}</span>}
<p className='text-[12px] text-[var(--text-error)]'>{errors.general}</p>
)}
</div> </div>
</ModalBody> </ModalBody>
<ModalFooter className='items-center justify-between'> <ModalFooter className='items-center justify-between'>

View File

@@ -1,10 +1,11 @@
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { AgentIcon } from '@/components/icons' import { AgentIcon } from '@/components/icons'
import { isHosted } from '@/lib/core/config/feature-flags'
import type { BlockConfig } from '@/blocks/types' import type { BlockConfig } from '@/blocks/types'
import { AuthMode } from '@/blocks/types' import { AuthMode } from '@/blocks/types'
import { getApiKeyCondition } from '@/blocks/utils'
import { import {
getBaseModelProviders, getBaseModelProviders,
getHostedModels,
getMaxTemperature, getMaxTemperature,
getProviderIcon, getProviderIcon,
getReasoningEffortValuesForModel, getReasoningEffortValuesForModel,
@@ -16,6 +17,15 @@ import {
providers, providers,
supportsTemperature, supportsTemperature,
} from '@/providers/utils' } from '@/providers/utils'
const getCurrentOllamaModels = () => {
return useProvidersStore.getState().providers.ollama.models
}
const getCurrentVLLMModels = () => {
return useProvidersStore.getState().providers.vllm.models
}
import { useProvidersStore } from '@/stores/providers' import { useProvidersStore } from '@/stores/providers'
import type { ToolResponse } from '@/tools/types' import type { ToolResponse } from '@/tools/types'
@@ -323,11 +333,11 @@ Return ONLY the JSON array.`,
id: 'azureApiVersion', id: 'azureApiVersion',
title: 'Azure API Version', title: 'Azure API Version',
type: 'short-input', type: 'short-input',
placeholder: 'Enter API version', placeholder: '2024-07-01-preview',
connectionDroppable: false, connectionDroppable: false,
condition: { condition: {
field: 'model', field: 'model',
value: [...providers['azure-openai'].models, ...providers['azure-anthropic'].models], value: providers['azure-openai'].models,
}, },
}, },
{ {
@@ -411,7 +421,23 @@ Return ONLY the JSON array.`,
password: true, password: true,
connectionDroppable: false, connectionDroppable: false,
required: true, required: true,
condition: getApiKeyCondition(), // Hide API key for hosted models, Ollama models, vLLM models, Vertex models (uses OAuth), and Bedrock (uses AWS credentials)
condition: isHosted
? {
field: 'model',
value: [...getHostedModels(), ...providers.vertex.models, ...providers.bedrock.models],
not: true, // Show for all models EXCEPT those listed
}
: () => ({
field: 'model',
value: [
...getCurrentOllamaModels(),
...getCurrentVLLMModels(),
...providers.vertex.models,
...providers.bedrock.models,
],
not: true, // Show for all models EXCEPT Ollama, vLLM, Vertex, and Bedrock models
}),
}, },
{ {
id: 'memoryType', id: 'memoryType',
@@ -689,7 +715,7 @@ Example 3 (Array Input):
}, },
model: { type: 'string', description: 'AI model to use' }, model: { type: 'string', description: 'AI model to use' },
apiKey: { type: 'string', description: 'Provider API key' }, apiKey: { type: 'string', description: 'Provider API key' },
azureEndpoint: { type: 'string', description: 'Azure endpoint URL' }, azureEndpoint: { type: 'string', description: 'Azure OpenAI endpoint URL' },
azureApiVersion: { type: 'string', description: 'Azure API version' }, azureApiVersion: { type: 'string', description: 'Azure API version' },
vertexProject: { type: 'string', description: 'Google Cloud project ID for Vertex AI' }, vertexProject: { type: 'string', description: 'Google Cloud project ID for Vertex AI' },
vertexLocation: { type: 'string', description: 'Google Cloud location for Vertex AI' }, vertexLocation: { type: 'string', description: 'Google Cloud location for Vertex AI' },

View File

@@ -76,9 +76,8 @@ export const TranslateBlock: BlockConfig = {
vertexProject: params.vertexProject, vertexProject: params.vertexProject,
vertexLocation: params.vertexLocation, vertexLocation: params.vertexLocation,
vertexCredential: params.vertexCredential, vertexCredential: params.vertexCredential,
bedrockAccessKeyId: params.bedrockAccessKeyId,
bedrockSecretKey: params.bedrockSecretKey,
bedrockRegion: params.bedrockRegion, bedrockRegion: params.bedrockRegion,
bedrockSecretKey: params.bedrockSecretKey,
}), }),
}, },
}, },

View File

@@ -208,7 +208,7 @@ export interface SubBlockConfig {
not?: boolean not?: boolean
} }
} }
| ((values?: Record<string, unknown>) => { | (() => {
field: string field: string
value: string | number | boolean | Array<string | number | boolean> value: string | number | boolean | Array<string | number | boolean>
not?: boolean not?: boolean
@@ -261,7 +261,7 @@ export interface SubBlockConfig {
not?: boolean not?: boolean
} }
} }
| ((values?: Record<string, unknown>) => { | (() => {
field: string field: string
value: string | number | boolean | Array<string | number | boolean> value: string | number | boolean | Array<string | number | boolean>
not?: boolean not?: boolean

View File

@@ -1,6 +1,6 @@
import { isHosted } from '@/lib/core/config/feature-flags' import { isHosted } from '@/lib/core/config/feature-flags'
import type { BlockOutput, OutputFieldDefinition, SubBlockConfig } from '@/blocks/types' import type { BlockOutput, OutputFieldDefinition, SubBlockConfig } from '@/blocks/types'
import { getHostedModels, getProviderFromModel, providers } from '@/providers/utils' import { getHostedModels, providers } from '@/providers/utils'
import { useProvidersStore } from '@/stores/providers/store' import { useProvidersStore } from '@/stores/providers/store'
/** /**
@@ -48,54 +48,11 @@ const getCurrentOllamaModels = () => {
return useProvidersStore.getState().providers.ollama.models return useProvidersStore.getState().providers.ollama.models
} }
function buildModelVisibilityCondition(model: string, shouldShow: boolean) { /**
if (!model) { * Helper to get current vLLM models from store
return { field: 'model', value: '__no_model_selected__' } */
} const getCurrentVLLMModels = () => {
return useProvidersStore.getState().providers.vllm.models
return shouldShow ? { field: 'model', value: model } : { field: 'model', value: model, not: true }
}
function shouldRequireApiKeyForModel(model: string): boolean {
const normalizedModel = model.trim().toLowerCase()
if (!normalizedModel) return false
const hostedModels = getHostedModels()
const isHostedModel = hostedModels.some(
(hostedModel) => hostedModel.toLowerCase() === normalizedModel
)
if (isHosted && isHostedModel) return false
if (normalizedModel.startsWith('vertex/') || normalizedModel.startsWith('bedrock/')) {
return false
}
if (normalizedModel.startsWith('vllm/')) {
return false
}
const currentOllamaModels = getCurrentOllamaModels()
if (currentOllamaModels.some((ollamaModel) => ollamaModel.toLowerCase() === normalizedModel)) {
return false
}
if (!isHosted) {
try {
const providerId = getProviderFromModel(model)
if (
providerId === 'ollama' ||
providerId === 'vllm' ||
providerId === 'vertex' ||
providerId === 'bedrock'
) {
return false
}
} catch {
// If model resolution fails, fall through and require an API key.
}
}
return true
} }
/** /**
@@ -103,16 +60,27 @@ function shouldRequireApiKeyForModel(model: string): boolean {
* Handles hosted vs self-hosted environments and excludes providers that don't need API key. * Handles hosted vs self-hosted environments and excludes providers that don't need API key.
*/ */
export function getApiKeyCondition() { export function getApiKeyCondition() {
return (values?: Record<string, unknown>) => { return isHosted
const model = typeof values?.model === 'string' ? values.model : '' ? {
const shouldShow = shouldRequireApiKeyForModel(model) field: 'model',
return buildModelVisibilityCondition(model, shouldShow) value: [...getHostedModels(), ...providers.vertex.models, ...providers.bedrock.models],
} not: true,
}
: () => ({
field: 'model',
value: [
...getCurrentOllamaModels(),
...getCurrentVLLMModels(),
...providers.vertex.models,
...providers.bedrock.models,
],
not: true,
})
} }
/** /**
* Returns the standard provider credential subblocks used by LLM-based blocks. * Returns the standard provider credential subblocks used by LLM-based blocks.
* This includes: Vertex AI OAuth, API Key, Azure (OpenAI + Anthropic), Vertex AI config, and Bedrock config. * This includes: Vertex AI OAuth, API Key, Azure OpenAI, Vertex AI config, and Bedrock config.
* *
* Usage: Spread into your block's subBlocks array after block-specific fields * Usage: Spread into your block's subBlocks array after block-specific fields
*/ */
@@ -143,25 +111,25 @@ export function getProviderCredentialSubBlocks(): SubBlockConfig[] {
}, },
{ {
id: 'azureEndpoint', id: 'azureEndpoint',
title: 'Azure Endpoint', title: 'Azure OpenAI Endpoint',
type: 'short-input', type: 'short-input',
password: true, password: true,
placeholder: 'https://your-resource.services.ai.azure.com', placeholder: 'https://your-resource.openai.azure.com',
connectionDroppable: false, connectionDroppable: false,
condition: { condition: {
field: 'model', field: 'model',
value: [...providers['azure-openai'].models, ...providers['azure-anthropic'].models], value: providers['azure-openai'].models,
}, },
}, },
{ {
id: 'azureApiVersion', id: 'azureApiVersion',
title: 'Azure API Version', title: 'Azure API Version',
type: 'short-input', type: 'short-input',
placeholder: 'Enter API version', placeholder: '2024-07-01-preview',
connectionDroppable: false, connectionDroppable: false,
condition: { condition: {
field: 'model', field: 'model',
value: [...providers['azure-openai'].models, ...providers['azure-anthropic'].models], value: providers['azure-openai'].models,
}, },
}, },
{ {
@@ -234,7 +202,7 @@ export function getProviderCredentialSubBlocks(): SubBlockConfig[] {
*/ */
export const PROVIDER_CREDENTIAL_INPUTS = { export const PROVIDER_CREDENTIAL_INPUTS = {
apiKey: { type: 'string', description: 'Provider API key' }, apiKey: { type: 'string', description: 'Provider API key' },
azureEndpoint: { type: 'string', description: 'Azure endpoint URL' }, azureEndpoint: { type: 'string', description: 'Azure OpenAI endpoint URL' },
azureApiVersion: { type: 'string', description: 'Azure API version' }, azureApiVersion: { type: 'string', description: 'Azure API version' },
vertexProject: { type: 'string', description: 'Google Cloud project ID for Vertex AI' }, vertexProject: { type: 'string', description: 'Google Cloud project ID for Vertex AI' },
vertexLocation: { type: 'string', description: 'Google Cloud location for Vertex AI' }, vertexLocation: { type: 'string', description: 'Google Cloud location for Vertex AI' },

View File

@@ -5468,18 +5468,18 @@ export function AgentSkillsIcon(props: SVGProps<SVGSVGElement>) {
<svg <svg
{...props} {...props}
xmlns='http://www.w3.org/2000/svg' xmlns='http://www.w3.org/2000/svg'
width='16' width='24'
height='16' height='24'
viewBox='0 0 16 16' viewBox='0 0 32 32'
fill='none' fill='none'
> >
<path d='M16 0.5L29.4234 8.25V23.75L16 31.5L2.57661 23.75V8.25L16 0.5Z' fill='currentColor' />
<path <path
d='M8 1L14.0622 4.5V11.5L8 15L1.93782 11.5V4.5L8 1Z' d='M16 6L24.6603 11V21L16 26L7.33975 21V11L16 6Z'
stroke='currentColor' fill='currentColor'
strokeWidth='1.5' stroke='var(--background, white)'
fill='none' strokeWidth='3'
/> />
<path d='M8 4.5L11 6.25V9.75L8 11.5L5 9.75V6.25L8 4.5Z' fill='currentColor' />
</svg> </svg>
) )
} }

View File

@@ -5,10 +5,43 @@ import { CheckCircle, ChevronDown, ChevronRight, Loader2, Settings, XCircle } fr
import { Badge } from '@/components/emcn' import { Badge } from '@/components/emcn'
import { Button } from '@/components/ui/button' import { Button } from '@/components/ui/button'
import { Collapsible, CollapsibleContent, CollapsibleTrigger } from '@/components/ui/collapsible' import { Collapsible, CollapsibleContent, CollapsibleTrigger } from '@/components/ui/collapsible'
import type { ToolCallGroup, ToolCallState } from '@/lib/copilot/types'
import { cn } from '@/lib/core/utils/cn' import { cn } from '@/lib/core/utils/cn'
import { formatDuration } from '@/lib/core/utils/formatting' import { formatDuration } from '@/lib/core/utils/formatting'
interface ToolCallState {
id: string
name: string
displayName?: string
parameters?: Record<string, unknown>
state:
| 'detecting'
| 'pending'
| 'executing'
| 'completed'
| 'error'
| 'rejected'
| 'applied'
| 'ready_for_review'
| 'aborted'
| 'skipped'
| 'background'
startTime?: number
endTime?: number
duration?: number
result?: unknown
error?: string
progress?: string
}
interface ToolCallGroup {
id: string
toolCalls: ToolCallState[]
status: 'pending' | 'in_progress' | 'completed' | 'error'
startTime?: number
endTime?: number
summary?: string
}
interface ToolCallProps { interface ToolCallProps {
toolCall: ToolCallState toolCall: ToolCallState
isCompact?: boolean isCompact?: boolean

View File

@@ -4,6 +4,7 @@ import { BlockType } from '@/executor/constants'
import type { DAG } from '@/executor/dag/builder' import type { DAG } from '@/executor/dag/builder'
import type { EdgeManager } from '@/executor/execution/edge-manager' import type { EdgeManager } from '@/executor/execution/edge-manager'
import { serializePauseSnapshot } from '@/executor/execution/snapshot-serializer' import { serializePauseSnapshot } from '@/executor/execution/snapshot-serializer'
import type { SerializableExecutionState } from '@/executor/execution/types'
import type { NodeExecutionOrchestrator } from '@/executor/orchestrators/node' import type { NodeExecutionOrchestrator } from '@/executor/orchestrators/node'
import type { import type {
ExecutionContext, ExecutionContext,
@@ -135,6 +136,7 @@ export class ExecutionEngine {
success: false, success: false,
output: this.finalOutput, output: this.finalOutput,
logs: this.context.blockLogs, logs: this.context.blockLogs,
executionState: this.getSerializableExecutionState(),
metadata: this.context.metadata, metadata: this.context.metadata,
status: 'cancelled', status: 'cancelled',
} }
@@ -144,6 +146,7 @@ export class ExecutionEngine {
success: true, success: true,
output: this.finalOutput, output: this.finalOutput,
logs: this.context.blockLogs, logs: this.context.blockLogs,
executionState: this.getSerializableExecutionState(),
metadata: this.context.metadata, metadata: this.context.metadata,
} }
} catch (error) { } catch (error) {
@@ -157,6 +160,7 @@ export class ExecutionEngine {
success: false, success: false,
output: this.finalOutput, output: this.finalOutput,
logs: this.context.blockLogs, logs: this.context.blockLogs,
executionState: this.getSerializableExecutionState(),
metadata: this.context.metadata, metadata: this.context.metadata,
status: 'cancelled', status: 'cancelled',
} }
@@ -459,6 +463,7 @@ export class ExecutionEngine {
success: true, success: true,
output: this.collectPauseResponses(), output: this.collectPauseResponses(),
logs: this.context.blockLogs, logs: this.context.blockLogs,
executionState: this.getSerializableExecutionState(snapshotSeed),
metadata: this.context.metadata, metadata: this.context.metadata,
status: 'paused', status: 'paused',
pausePoints, pausePoints,
@@ -466,6 +471,24 @@ export class ExecutionEngine {
} }
} }
private getSerializableExecutionState(snapshotSeed?: {
snapshot: string
}): SerializableExecutionState | undefined {
try {
const serializedSnapshot =
snapshotSeed?.snapshot ?? serializePauseSnapshot(this.context, [], this.dag).snapshot
const parsedSnapshot = JSON.parse(serializedSnapshot) as {
state?: SerializableExecutionState
}
return parsedSnapshot.state
} catch (error) {
logger.warn('Failed to serialize execution state', {
error: error instanceof Error ? error.message : String(error),
})
return undefined
}
}
private collectPauseResponses(): NormalizedBlockOutput { private collectPauseResponses(): NormalizedBlockOutput {
const responses = Array.from(this.pausedBlocks.values()).map((pause) => pause.response) const responses = Array.from(this.pausedBlocks.values()).map((pause) => pause.response)

View File

@@ -326,7 +326,6 @@ export class AgentBlockHandler implements BlockHandler {
_context: { _context: {
workflowId: ctx.workflowId, workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId, workspaceId: ctx.workspaceId,
userId: ctx.userId,
isDeployedContext: ctx.isDeployedContext, isDeployedContext: ctx.isDeployedContext,
}, },
}, },
@@ -378,9 +377,6 @@ export class AgentBlockHandler implements BlockHandler {
if (ctx.workflowId) { if (ctx.workflowId) {
params.workflowId = ctx.workflowId params.workflowId = ctx.workflowId
} }
if (ctx.userId) {
params.userId = ctx.userId
}
const url = buildAPIUrl('/api/tools/custom', params) const url = buildAPIUrl('/api/tools/custom', params)
const response = await fetch(url.toString(), { const response = await fetch(url.toString(), {
@@ -491,9 +487,7 @@ export class AgentBlockHandler implements BlockHandler {
usageControl: tool.usageControl || 'auto', usageControl: tool.usageControl || 'auto',
executeFunction: async (callParams: Record<string, any>) => { executeFunction: async (callParams: Record<string, any>) => {
const headers = await buildAuthHeaders() const headers = await buildAuthHeaders()
const execParams: Record<string, string> = {} const execUrl = buildAPIUrl('/api/mcp/tools/execute')
if (ctx.userId) execParams.userId = ctx.userId
const execUrl = buildAPIUrl('/api/mcp/tools/execute', execParams)
const execResponse = await fetch(execUrl.toString(), { const execResponse = await fetch(execUrl.toString(), {
method: 'POST', method: 'POST',
@@ -602,7 +596,6 @@ export class AgentBlockHandler implements BlockHandler {
serverId, serverId,
workspaceId: ctx.workspaceId, workspaceId: ctx.workspaceId,
workflowId: ctx.workflowId, workflowId: ctx.workflowId,
...(ctx.userId ? { userId: ctx.userId } : {}),
}) })
const maxAttempts = 2 const maxAttempts = 2
@@ -677,9 +670,7 @@ export class AgentBlockHandler implements BlockHandler {
usageControl: tool.usageControl || 'auto', usageControl: tool.usageControl || 'auto',
executeFunction: async (callParams: Record<string, any>) => { executeFunction: async (callParams: Record<string, any>) => {
const headers = await buildAuthHeaders() const headers = await buildAuthHeaders()
const discoverExecParams: Record<string, string> = {} const execUrl = buildAPIUrl('/api/mcp/tools/execute')
if (ctx.userId) discoverExecParams.userId = ctx.userId
const execUrl = buildAPIUrl('/api/mcp/tools/execute', discoverExecParams)
const execResponse = await fetch(execUrl.toString(), { const execResponse = await fetch(execUrl.toString(), {
method: 'POST', method: 'POST',
@@ -1064,7 +1055,6 @@ export class AgentBlockHandler implements BlockHandler {
responseFormat: providerRequest.responseFormat, responseFormat: providerRequest.responseFormat,
workflowId: providerRequest.workflowId, workflowId: providerRequest.workflowId,
workspaceId: ctx.workspaceId, workspaceId: ctx.workspaceId,
userId: ctx.userId,
stream: providerRequest.stream, stream: providerRequest.stream,
messages: 'messages' in providerRequest ? providerRequest.messages : undefined, messages: 'messages' in providerRequest ? providerRequest.messages : undefined,
environmentVariables: ctx.environmentVariables || {}, environmentVariables: ctx.environmentVariables || {},

View File

@@ -72,7 +72,6 @@ export class ApiBlockHandler implements BlockHandler {
workflowId: ctx.workflowId, workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId, workspaceId: ctx.workspaceId,
executionId: ctx.executionId, executionId: ctx.executionId,
userId: ctx.userId,
isDeployedContext: ctx.isDeployedContext, isDeployedContext: ctx.isDeployedContext,
}, },
}, },

View File

@@ -48,7 +48,6 @@ export async function evaluateConditionExpression(
_context: { _context: {
workflowId: ctx.workflowId, workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId, workspaceId: ctx.workspaceId,
userId: ctx.userId,
isDeployedContext: ctx.isDeployedContext, isDeployedContext: ctx.isDeployedContext,
}, },
}, },

View File

@@ -104,7 +104,7 @@ export class EvaluatorBlockHandler implements BlockHandler {
} }
try { try {
const url = buildAPIUrl('/api/providers', ctx.userId ? { userId: ctx.userId } : {}) const url = buildAPIUrl('/api/providers')
const providerRequest: Record<string, any> = { const providerRequest: Record<string, any> = {
provider: providerId, provider: providerId,
@@ -121,17 +121,26 @@ export class EvaluatorBlockHandler implements BlockHandler {
temperature: EVALUATOR.DEFAULT_TEMPERATURE, temperature: EVALUATOR.DEFAULT_TEMPERATURE,
apiKey: finalApiKey, apiKey: finalApiKey,
azureEndpoint: inputs.azureEndpoint,
azureApiVersion: inputs.azureApiVersion,
vertexProject: evaluatorConfig.vertexProject,
vertexLocation: evaluatorConfig.vertexLocation,
bedrockAccessKeyId: evaluatorConfig.bedrockAccessKeyId,
bedrockSecretKey: evaluatorConfig.bedrockSecretKey,
bedrockRegion: evaluatorConfig.bedrockRegion,
workflowId: ctx.workflowId, workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId, workspaceId: ctx.workspaceId,
} }
if (providerId === 'vertex') {
providerRequest.vertexProject = evaluatorConfig.vertexProject
providerRequest.vertexLocation = evaluatorConfig.vertexLocation
}
if (providerId === 'azure-openai') {
providerRequest.azureEndpoint = inputs.azureEndpoint
providerRequest.azureApiVersion = inputs.azureApiVersion
}
if (providerId === 'bedrock') {
providerRequest.bedrockAccessKeyId = evaluatorConfig.bedrockAccessKeyId
providerRequest.bedrockSecretKey = evaluatorConfig.bedrockSecretKey
providerRequest.bedrockRegion = evaluatorConfig.bedrockRegion
}
const response = await fetch(url.toString(), { const response = await fetch(url.toString(), {
method: 'POST', method: 'POST',
headers: await buildAuthHeaders(), headers: await buildAuthHeaders(),

View File

@@ -39,7 +39,6 @@ export class FunctionBlockHandler implements BlockHandler {
_context: { _context: {
workflowId: ctx.workflowId, workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId, workspaceId: ctx.workspaceId,
userId: ctx.userId,
isDeployedContext: ctx.isDeployedContext, isDeployedContext: ctx.isDeployedContext,
}, },
}, },

View File

@@ -66,7 +66,6 @@ export class GenericBlockHandler implements BlockHandler {
workflowId: ctx.workflowId, workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId, workspaceId: ctx.workspaceId,
executionId: ctx.executionId, executionId: ctx.executionId,
userId: ctx.userId,
isDeployedContext: ctx.isDeployedContext, isDeployedContext: ctx.isDeployedContext,
}, },
}, },

View File

@@ -605,7 +605,6 @@ export class HumanInTheLoopBlockHandler implements BlockHandler {
_context: { _context: {
workflowId: ctx.workflowId, workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId, workspaceId: ctx.workspaceId,
userId: ctx.userId,
isDeployedContext: ctx.isDeployedContext, isDeployedContext: ctx.isDeployedContext,
}, },
blockData: blockDataWithPause, blockData: blockDataWithPause,

View File

@@ -80,7 +80,6 @@ export class RouterBlockHandler implements BlockHandler {
try { try {
const url = new URL('/api/providers', getBaseUrl()) const url = new URL('/api/providers', getBaseUrl())
if (ctx.userId) url.searchParams.set('userId', ctx.userId)
const messages = [{ role: 'user', content: routerConfig.prompt }] const messages = [{ role: 'user', content: routerConfig.prompt }]
const systemPrompt = generateRouterPrompt(routerConfig.prompt, targetBlocks) const systemPrompt = generateRouterPrompt(routerConfig.prompt, targetBlocks)
@@ -97,17 +96,26 @@ export class RouterBlockHandler implements BlockHandler {
context: JSON.stringify(messages), context: JSON.stringify(messages),
temperature: ROUTER.INFERENCE_TEMPERATURE, temperature: ROUTER.INFERENCE_TEMPERATURE,
apiKey: finalApiKey, apiKey: finalApiKey,
azureEndpoint: inputs.azureEndpoint,
azureApiVersion: inputs.azureApiVersion,
vertexProject: routerConfig.vertexProject,
vertexLocation: routerConfig.vertexLocation,
bedrockAccessKeyId: routerConfig.bedrockAccessKeyId,
bedrockSecretKey: routerConfig.bedrockSecretKey,
bedrockRegion: routerConfig.bedrockRegion,
workflowId: ctx.workflowId, workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId, workspaceId: ctx.workspaceId,
} }
if (providerId === 'vertex') {
providerRequest.vertexProject = routerConfig.vertexProject
providerRequest.vertexLocation = routerConfig.vertexLocation
}
if (providerId === 'azure-openai') {
providerRequest.azureEndpoint = inputs.azureEndpoint
providerRequest.azureApiVersion = inputs.azureApiVersion
}
if (providerId === 'bedrock') {
providerRequest.bedrockAccessKeyId = routerConfig.bedrockAccessKeyId
providerRequest.bedrockSecretKey = routerConfig.bedrockSecretKey
providerRequest.bedrockRegion = routerConfig.bedrockRegion
}
const response = await fetch(url.toString(), { const response = await fetch(url.toString(), {
method: 'POST', method: 'POST',
headers: await buildAuthHeaders(), headers: await buildAuthHeaders(),
@@ -210,7 +218,6 @@ export class RouterBlockHandler implements BlockHandler {
try { try {
const url = new URL('/api/providers', getBaseUrl()) const url = new URL('/api/providers', getBaseUrl())
if (ctx.userId) url.searchParams.set('userId', ctx.userId)
const messages = [{ role: 'user', content: routerConfig.context }] const messages = [{ role: 'user', content: routerConfig.context }]
const systemPrompt = generateRouterV2Prompt(routerConfig.context, routes) const systemPrompt = generateRouterV2Prompt(routerConfig.context, routes)
@@ -227,13 +234,6 @@ export class RouterBlockHandler implements BlockHandler {
context: JSON.stringify(messages), context: JSON.stringify(messages),
temperature: ROUTER.INFERENCE_TEMPERATURE, temperature: ROUTER.INFERENCE_TEMPERATURE,
apiKey: finalApiKey, apiKey: finalApiKey,
azureEndpoint: inputs.azureEndpoint,
azureApiVersion: inputs.azureApiVersion,
vertexProject: routerConfig.vertexProject,
vertexLocation: routerConfig.vertexLocation,
bedrockAccessKeyId: routerConfig.bedrockAccessKeyId,
bedrockSecretKey: routerConfig.bedrockSecretKey,
bedrockRegion: routerConfig.bedrockRegion,
workflowId: ctx.workflowId, workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId, workspaceId: ctx.workspaceId,
responseFormat: { responseFormat: {
@@ -257,6 +257,22 @@ export class RouterBlockHandler implements BlockHandler {
}, },
} }
if (providerId === 'vertex') {
providerRequest.vertexProject = routerConfig.vertexProject
providerRequest.vertexLocation = routerConfig.vertexLocation
}
if (providerId === 'azure-openai') {
providerRequest.azureEndpoint = inputs.azureEndpoint
providerRequest.azureApiVersion = inputs.azureApiVersion
}
if (providerId === 'bedrock') {
providerRequest.bedrockAccessKeyId = routerConfig.bedrockAccessKeyId
providerRequest.bedrockSecretKey = routerConfig.bedrockSecretKey
providerRequest.bedrockRegion = routerConfig.bedrockRegion
}
const response = await fetch(url.toString(), { const response = await fetch(url.toString(), {
method: 'POST', method: 'POST',
headers: await buildAuthHeaders(), headers: await buildAuthHeaders(),

View File

@@ -511,8 +511,6 @@ export class LoopOrchestrator {
contextVariables: {}, contextVariables: {},
timeoutMs: LOOP_CONDITION_TIMEOUT_MS, timeoutMs: LOOP_CONDITION_TIMEOUT_MS,
requestId, requestId,
ownerKey: `user:${ctx.userId}`,
ownerWeight: 1,
}) })
if (vmResult.error) { if (vmResult.error) {

View File

@@ -1,6 +1,7 @@
import type { TraceSpan } from '@/lib/logs/types' import type { TraceSpan } from '@/lib/logs/types'
import type { PermissionGroupConfig } from '@/lib/permission-groups/types' import type { PermissionGroupConfig } from '@/lib/permission-groups/types'
import type { BlockOutput } from '@/blocks/types' import type { BlockOutput } from '@/blocks/types'
import type { SerializableExecutionState } from '@/executor/execution/types'
import type { RunFromBlockContext } from '@/executor/utils/run-from-block' import type { RunFromBlockContext } from '@/executor/utils/run-from-block'
import type { SerializedBlock, SerializedWorkflow } from '@/serializer/types' import type { SerializedBlock, SerializedWorkflow } from '@/serializer/types'
@@ -302,6 +303,7 @@ export interface ExecutionResult {
output: NormalizedBlockOutput output: NormalizedBlockOutput
error?: string error?: string
logs?: BlockLog[] logs?: BlockLog[]
executionState?: SerializableExecutionState
metadata?: ExecutionMetadata metadata?: ExecutionMetadata
status?: 'completed' | 'paused' | 'cancelled' status?: 'completed' | 'paused' | 'cancelled'
pausePoints?: PausePoint[] pausePoints?: PausePoint[]

View File

@@ -1,5 +1,12 @@
import { useCallback } from 'react' import { useCallback } from 'react'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
declare global {
interface Window {
__skipDiffRecording?: boolean
}
}
import type { Edge } from 'reactflow' import type { Edge } from 'reactflow'
import { useSession } from '@/lib/auth/auth-client' import { useSession } from '@/lib/auth/auth-client'
import { enqueueReplaceWorkflowState } from '@/lib/workflows/operations/socket-operations' import { enqueueReplaceWorkflowState } from '@/lib/workflows/operations/socket-operations'
@@ -908,7 +915,7 @@ export function useUndoRedo() {
// Set flag to skip recording during this operation // Set flag to skip recording during this operation
;(window as any).__skipDiffRecording = true window.__skipDiffRecording = true
try { try {
// Restore baseline state and broadcast to everyone // Restore baseline state and broadcast to everyone
if (baselineSnapshot && activeWorkflowId) { if (baselineSnapshot && activeWorkflowId) {
@@ -945,7 +952,7 @@ export function useUndoRedo() {
logger.info('Clearing diff UI state') logger.info('Clearing diff UI state')
useWorkflowDiffStore.getState().clearDiff({ restoreBaseline: false }) useWorkflowDiffStore.getState().clearDiff({ restoreBaseline: false })
} finally { } finally {
;(window as any).__skipDiffRecording = false window.__skipDiffRecording = false
} }
logger.info('Undid apply-diff operation successfully') logger.info('Undid apply-diff operation successfully')
@@ -965,7 +972,7 @@ export function useUndoRedo() {
// Set flag to skip recording during this operation // Set flag to skip recording during this operation
;(window as any).__skipDiffRecording = true window.__skipDiffRecording = true
try { try {
// Apply the before-accept state (with markers for this user) // Apply the before-accept state (with markers for this user)
useWorkflowStore.getState().replaceWorkflowState(beforeAccept) useWorkflowStore.getState().replaceWorkflowState(beforeAccept)
@@ -1004,7 +1011,7 @@ export function useUndoRedo() {
diffAnalysis: diffAnalysis, diffAnalysis: diffAnalysis,
}) })
} finally { } finally {
;(window as any).__skipDiffRecording = false window.__skipDiffRecording = false
} }
logger.info('Undid accept-diff operation - restored diff view') logger.info('Undid accept-diff operation - restored diff view')
@@ -1018,7 +1025,7 @@ export function useUndoRedo() {
const { useWorkflowStore } = await import('@/stores/workflows/workflow/store') const { useWorkflowStore } = await import('@/stores/workflows/workflow/store')
const { useSubBlockStore } = await import('@/stores/workflows/subblock/store') const { useSubBlockStore } = await import('@/stores/workflows/subblock/store')
;(window as any).__skipDiffRecording = true window.__skipDiffRecording = true
try { try {
// Apply the before-reject state (with markers for this user) // Apply the before-reject state (with markers for this user)
useWorkflowStore.getState().replaceWorkflowState(beforeReject) useWorkflowStore.getState().replaceWorkflowState(beforeReject)
@@ -1055,7 +1062,7 @@ export function useUndoRedo() {
diffAnalysis: diffAnalysis, diffAnalysis: diffAnalysis,
}) })
} finally { } finally {
;(window as any).__skipDiffRecording = false window.__skipDiffRecording = false
} }
logger.info('Undid reject-diff operation - restored diff view') logger.info('Undid reject-diff operation - restored diff view')
@@ -1526,7 +1533,7 @@ export function useUndoRedo() {
// Set flag to skip recording during this operation // Set flag to skip recording during this operation
;(window as any).__skipDiffRecording = true window.__skipDiffRecording = true
try { try {
// Manually apply the proposed state and set up diff store (similar to setProposedChanges but with original baseline) // Manually apply the proposed state and set up diff store (similar to setProposedChanges but with original baseline)
const diffStore = useWorkflowDiffStore.getState() const diffStore = useWorkflowDiffStore.getState()
@@ -1567,7 +1574,7 @@ export function useUndoRedo() {
diffAnalysis: diffAnalysis, diffAnalysis: diffAnalysis,
}) })
} finally { } finally {
;(window as any).__skipDiffRecording = false window.__skipDiffRecording = false
} }
logger.info('Redid apply-diff operation') logger.info('Redid apply-diff operation')
@@ -1583,7 +1590,7 @@ export function useUndoRedo() {
// Set flag to skip recording during this operation // Set flag to skip recording during this operation
;(window as any).__skipDiffRecording = true window.__skipDiffRecording = true
try { try {
// Clear diff state FIRST to prevent flash of colors (local UI only) // Clear diff state FIRST to prevent flash of colors (local UI only)
// Use setState directly to ensure synchronous clearing // Use setState directly to ensure synchronous clearing
@@ -1621,7 +1628,7 @@ export function useUndoRedo() {
operationId: opId, operationId: opId,
}) })
} finally { } finally {
;(window as any).__skipDiffRecording = false window.__skipDiffRecording = false
} }
logger.info('Redid accept-diff operation - cleared diff view') logger.info('Redid accept-diff operation - cleared diff view')
@@ -1635,7 +1642,7 @@ export function useUndoRedo() {
const { useWorkflowStore } = await import('@/stores/workflows/workflow/store') const { useWorkflowStore } = await import('@/stores/workflows/workflow/store')
const { useSubBlockStore } = await import('@/stores/workflows/subblock/store') const { useSubBlockStore } = await import('@/stores/workflows/subblock/store')
;(window as any).__skipDiffRecording = true window.__skipDiffRecording = true
try { try {
// Clear diff state FIRST to prevent flash of colors (local UI only) // Clear diff state FIRST to prevent flash of colors (local UI only)
// Use setState directly to ensure synchronous clearing // Use setState directly to ensure synchronous clearing
@@ -1673,7 +1680,7 @@ export function useUndoRedo() {
operationId: opId, operationId: opId,
}) })
} finally { } finally {
;(window as any).__skipDiffRecording = false window.__skipDiffRecording = false
} }
logger.info('Redid reject-diff operation - cleared diff view') logger.info('Redid reject-diff operation - cleared diff view')

View File

@@ -2,13 +2,13 @@ import { db } from '@sim/db'
import { account, workflow as workflowTable } from '@sim/db/schema' import { account, workflow as workflowTable } from '@sim/db/schema'
import { eq } from 'drizzle-orm' import { eq } from 'drizzle-orm'
import type { NextRequest } from 'next/server' import type { NextRequest } from 'next/server'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils' import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
export interface CredentialAccessResult { export interface CredentialAccessResult {
ok: boolean ok: boolean
error?: string error?: string
authType?: 'session' | 'internal_jwt' authType?: 'session' | 'api_key' | 'internal_jwt'
requesterUserId?: string requesterUserId?: string
credentialOwnerUserId?: string credentialOwnerUserId?: string
workspaceId?: string workspaceId?: string
@@ -16,10 +16,10 @@ export interface CredentialAccessResult {
/** /**
* Centralizes auth + collaboration rules for credential use. * Centralizes auth + collaboration rules for credential use.
* - Uses checkSessionOrInternalAuth to authenticate the caller * - Uses checkHybridAuth to authenticate the caller
* - Fetches credential owner * - Fetches credential owner
* - Authorization rules: * - Authorization rules:
* - session: allow if requester owns the credential; otherwise require workflowId and * - session/api_key: allow if requester owns the credential; otherwise require workflowId and
* verify BOTH requester and owner have access to the workflow's workspace * verify BOTH requester and owner have access to the workflow's workspace
* - internal_jwt: require workflowId (by default) and verify credential owner has access to the * - internal_jwt: require workflowId (by default) and verify credential owner has access to the
* workflow's workspace (requester identity is the system/workflow) * workflow's workspace (requester identity is the system/workflow)
@@ -30,9 +30,7 @@ export async function authorizeCredentialUse(
): Promise<CredentialAccessResult> { ): Promise<CredentialAccessResult> {
const { credentialId, workflowId, requireWorkflowIdForInternal = true } = params const { credentialId, workflowId, requireWorkflowIdForInternal = true } = params
const auth = await checkSessionOrInternalAuth(request, { const auth = await checkHybridAuth(request, { requireWorkflowId: requireWorkflowIdForInternal })
requireWorkflowId: requireWorkflowIdForInternal,
})
if (!auth.success || !auth.userId) { if (!auth.success || !auth.userId) {
return { ok: false, error: auth.error || 'Authentication required' } return { ok: false, error: auth.error || 'Authentication required' }
} }
@@ -54,7 +52,7 @@ export async function authorizeCredentialUse(
if (auth.authType !== 'internal_jwt' && auth.userId === credentialOwnerUserId) { if (auth.authType !== 'internal_jwt' && auth.userId === credentialOwnerUserId) {
return { return {
ok: true, ok: true,
authType: auth.authType as CredentialAccessResult['authType'], authType: auth.authType,
requesterUserId: auth.userId, requesterUserId: auth.userId,
credentialOwnerUserId, credentialOwnerUserId,
} }
@@ -87,14 +85,14 @@ export async function authorizeCredentialUse(
} }
return { return {
ok: true, ok: true,
authType: auth.authType as CredentialAccessResult['authType'], authType: auth.authType,
requesterUserId: auth.userId, requesterUserId: auth.userId,
credentialOwnerUserId, credentialOwnerUserId,
workspaceId: wf.workspaceId, workspaceId: wf.workspaceId,
} }
} }
// Session: verify BOTH requester and owner belong to the workflow's workspace // Session/API key: verify BOTH requester and owner belong to the workflow's workspace
const requesterPerm = await getUserEntityPermissions(auth.userId, 'workspace', wf.workspaceId) const requesterPerm = await getUserEntityPermissions(auth.userId, 'workspace', wf.workspaceId)
const ownerPerm = await getUserEntityPermissions( const ownerPerm = await getUserEntityPermissions(
credentialOwnerUserId, credentialOwnerUserId,
@@ -107,7 +105,7 @@ export async function authorizeCredentialUse(
return { return {
ok: true, ok: true,
authType: auth.authType as CredentialAccessResult['authType'], authType: auth.authType,
requesterUserId: auth.userId, requesterUserId: auth.userId,
credentialOwnerUserId, credentialOwnerUserId,
workspaceId: wf.workspaceId, workspaceId: wf.workspaceId,

View File

@@ -1,4 +1,7 @@
import { db } from '@sim/db'
import { workflow } from '@sim/db/schema'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import type { NextRequest } from 'next/server' import type { NextRequest } from 'next/server'
import { authenticateApiKeyFromHeader, updateApiKeyLastUsed } from '@/lib/api-key/service' import { authenticateApiKeyFromHeader, updateApiKeyLastUsed } from '@/lib/api-key/service'
import { getSession } from '@/lib/auth' import { getSession } from '@/lib/auth'
@@ -10,33 +13,35 @@ export interface AuthResult {
success: boolean success: boolean
userId?: string userId?: string
authType?: 'session' | 'api_key' | 'internal_jwt' authType?: 'session' | 'api_key' | 'internal_jwt'
apiKeyType?: 'personal' | 'workspace'
error?: string error?: string
} }
/** /**
* Resolves userId from a verified internal JWT token. * Resolves userId from a verified internal JWT token.
* Extracts userId from the JWT payload, URL search params, or POST body. * Extracts workflowId/userId from URL params or POST body, then looks up userId if needed.
*/ */
async function resolveUserFromJwt( async function resolveUserFromJwt(
request: NextRequest, request: NextRequest,
verificationUserId: string | null, verificationUserId: string | null,
options: { requireWorkflowId?: boolean } options: { requireWorkflowId?: boolean }
): Promise<AuthResult> { ): Promise<AuthResult> {
let workflowId: string | null = null
let userId: string | null = verificationUserId let userId: string | null = verificationUserId
const { searchParams } = new URL(request.url)
workflowId = searchParams.get('workflowId')
if (!userId) { if (!userId) {
const { searchParams } = new URL(request.url)
userId = searchParams.get('userId') userId = searchParams.get('userId')
} }
if (!userId && request.method === 'POST') { if (!workflowId && !userId && request.method === 'POST') {
try { try {
const clonedRequest = request.clone() const clonedRequest = request.clone()
const bodyText = await clonedRequest.text() const bodyText = await clonedRequest.text()
if (bodyText) { if (bodyText) {
const body = JSON.parse(bodyText) const body = JSON.parse(bodyText)
userId = body.userId || body._context?.userId || null workflowId = body.workflowId || body._context?.workflowId
userId = userId || body.userId || body._context?.userId
} }
} catch { } catch {
// Ignore JSON parse errors // Ignore JSON parse errors
@@ -47,8 +52,22 @@ async function resolveUserFromJwt(
return { success: true, userId, authType: 'internal_jwt' } return { success: true, userId, authType: 'internal_jwt' }
} }
if (workflowId) {
const [workflowData] = await db
.select({ userId: workflow.userId })
.from(workflow)
.where(eq(workflow.id, workflowId))
.limit(1)
if (!workflowData) {
return { success: false, error: 'Workflow not found' }
}
return { success: true, userId: workflowData.userId, authType: 'internal_jwt' }
}
if (options.requireWorkflowId !== false) { if (options.requireWorkflowId !== false) {
return { success: false, error: 'userId required for internal JWT calls' } return { success: false, error: 'workflowId or userId required for internal JWT calls' }
} }
return { success: true, authType: 'internal_jwt' } return { success: true, authType: 'internal_jwt' }
@@ -203,7 +222,6 @@ export async function checkHybridAuth(
success: true, success: true,
userId: result.userId!, userId: result.userId!,
authType: 'api_key', authType: 'api_key',
apiKeyType: result.keyType,
} }
} }

View File

@@ -14,7 +14,7 @@ export type UsageLogCategory = 'model' | 'fixed'
/** /**
* Usage log source types * Usage log source types
*/ */
export type UsageLogSource = 'workflow' | 'wand' | 'copilot' export type UsageLogSource = 'workflow' | 'wand' | 'copilot' | 'mcp_copilot'
/** /**
* Metadata for 'model' category charges * Metadata for 'model' category charges

View File

@@ -1,4 +1,5 @@
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { COPILOT_CHAT_API_PATH, COPILOT_CHAT_STREAM_API_PATH } from '@/lib/copilot/constants'
import type { CopilotMode, CopilotModelId, CopilotTransportMode } from '@/lib/copilot/models' import type { CopilotMode, CopilotModelId, CopilotTransportMode } from '@/lib/copilot/models'
const logger = createLogger('CopilotAPI') const logger = createLogger('CopilotAPI')
@@ -82,6 +83,7 @@ export interface SendMessageRequest {
executionId?: string executionId?: string
}> }>
commands?: string[] commands?: string[]
resumeFromEventId?: number
} }
/** /**
@@ -120,7 +122,7 @@ export async function sendStreamingMessage(
request: SendMessageRequest request: SendMessageRequest
): Promise<StreamingResponse> { ): Promise<StreamingResponse> {
try { try {
const { abortSignal, ...requestBody } = request const { abortSignal, resumeFromEventId, ...requestBody } = request
try { try {
const preview = Array.isArray((requestBody as any).contexts) const preview = Array.isArray((requestBody as any).contexts)
? (requestBody as any).contexts.map((c: any) => ({ ? (requestBody as any).contexts.map((c: any) => ({
@@ -136,9 +138,56 @@ export async function sendStreamingMessage(
? (requestBody as any).contexts.length ? (requestBody as any).contexts.length
: 0, : 0,
contextsPreview: preview, contextsPreview: preview,
resumeFromEventId,
}) })
} catch {} } catch (error) {
const response = await fetch('/api/copilot/chat', { logger.warn('Failed to log streaming message context preview', {
error: error instanceof Error ? error.message : String(error),
})
}
const streamId = request.userMessageId
if (typeof resumeFromEventId === 'number') {
if (!streamId) {
return {
success: false,
error: 'streamId is required to resume a stream',
status: 400,
}
}
const url = `${COPILOT_CHAT_STREAM_API_PATH}?streamId=${encodeURIComponent(
streamId
)}&from=${encodeURIComponent(String(resumeFromEventId))}`
const response = await fetch(url, {
method: 'GET',
signal: abortSignal,
credentials: 'include',
})
if (!response.ok) {
const errorMessage = await handleApiError(response, 'Failed to resume streaming message')
return {
success: false,
error: errorMessage,
status: response.status,
}
}
if (!response.body) {
return {
success: false,
error: 'No response body received',
status: 500,
}
}
return {
success: true,
stream: response.body,
}
}
const response = await fetch(COPILOT_CHAT_API_PATH, {
method: 'POST', method: 'POST',
headers: { 'Content-Type': 'application/json' }, headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ ...requestBody, stream: true }), body: JSON.stringify({ ...requestBody, stream: true }),

View File

@@ -0,0 +1,66 @@
import { createLogger } from '@sim/logger'
import { CopilotFiles } from '@/lib/uploads'
import { createFileContent } from '@/lib/uploads/utils/file-utils'
const logger = createLogger('CopilotChatContext')
/**
* Build conversation history from stored chat messages.
*/
export function buildConversationHistory(
messages: unknown[],
conversationId?: string
): { history: unknown[]; conversationId?: string } {
const history = Array.isArray(messages) ? messages : []
return {
history,
...(conversationId ? { conversationId } : {}),
}
}
export interface FileAttachmentInput {
id: string
key: string
name?: string
filename?: string
mimeType?: string
media_type?: string
size: number
}
export interface FileContent {
type: string
[key: string]: unknown
}
/**
* Process file attachments into content for the payload.
*/
export async function processFileAttachments(
fileAttachments: FileAttachmentInput[],
userId: string
): Promise<FileContent[]> {
if (!Array.isArray(fileAttachments) || fileAttachments.length === 0) return []
const processedFileContents: FileContent[] = []
const requestId = `copilot-${userId}-${Date.now()}`
const processedAttachments = await CopilotFiles.processCopilotAttachments(
fileAttachments as Parameters<typeof CopilotFiles.processCopilotAttachments>[0],
requestId
)
for (const { buffer, attachment } of processedAttachments) {
const fileContent = createFileContent(buffer, attachment.media_type)
if (fileContent) {
processedFileContents.push(fileContent as FileContent)
}
}
logger.debug('Processed file attachments for payload', {
userId,
inputCount: fileAttachments.length,
outputCount: processedFileContents.length,
})
return processedFileContents
}

View File

@@ -0,0 +1,69 @@
import { db } from '@sim/db'
import { copilotChats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq } from 'drizzle-orm'
const logger = createLogger('CopilotChatLifecycle')
export interface ChatLoadResult {
chatId: string
chat: typeof copilotChats.$inferSelect | null
conversationHistory: unknown[]
isNew: boolean
}
/**
* Resolve or create a copilot chat session.
* If chatId is provided, loads the existing chat. Otherwise creates a new one.
*/
export async function resolveOrCreateChat(params: {
chatId?: string
userId: string
workflowId: string
model: string
}): Promise<ChatLoadResult> {
const { chatId, userId, workflowId, model } = params
if (chatId) {
const [chat] = await db
.select()
.from(copilotChats)
.where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, userId)))
.limit(1)
return {
chatId,
chat: chat ?? null,
conversationHistory: chat && Array.isArray(chat.messages) ? chat.messages : [],
isNew: false,
}
}
const [newChat] = await db
.insert(copilotChats)
.values({
userId,
workflowId,
title: null,
model,
messages: [],
})
.returning()
if (!newChat) {
logger.warn('Failed to create new copilot chat row', { userId, workflowId })
return {
chatId: '',
chat: null,
conversationHistory: [],
isNew: true,
}
}
return {
chatId: newChat.id,
chat: newChat,
conversationHistory: [],
isNew: true,
}
}

View File

@@ -0,0 +1,237 @@
import { createLogger } from '@sim/logger'
import { processFileAttachments } from '@/lib/copilot/chat-context'
import { getCopilotModel } from '@/lib/copilot/config'
import { SIM_AGENT_VERSION } from '@/lib/copilot/constants'
import { getCredentialsServerTool } from '@/lib/copilot/tools/server/user/get-credentials'
import type { CopilotProviderConfig } from '@/lib/copilot/types'
import { env } from '@/lib/core/config/env'
import { tools } from '@/tools/registry'
import { getLatestVersionTools, stripVersionSuffix } from '@/tools/utils'
const logger = createLogger('CopilotChatPayload')
export interface BuildPayloadParams {
message: string
workflowId: string
userId: string
userMessageId: string
mode: string
model: string
conversationHistory?: unknown[]
contexts?: Array<{ type: string; content: string }>
fileAttachments?: Array<{ id: string; key: string; size: number; [key: string]: unknown }>
commands?: string[]
chatId?: string
implicitFeedback?: string
}
interface ToolSchema {
name: string
description: string
input_schema: Record<string, unknown>
defer_loading?: boolean
executeLocally?: boolean
oauth?: { required: boolean; provider: string }
}
interface CredentialsPayload {
oauth: Record<
string,
{ accessToken: string; accountId: string; name: string; expiresAt?: string }
>
apiKeys: string[]
metadata?: {
connectedOAuth: Array<{ provider: string; name: string; scopes?: string[] }>
configuredApiKeys: string[]
}
}
type MessageContent = string | Array<{ type: string; text?: string; [key: string]: unknown }>
interface ConversationMessage {
role: string
content: MessageContent
}
function buildProviderConfig(selectedModel: string): CopilotProviderConfig | undefined {
const defaults = getCopilotModel('chat')
const envModel = env.COPILOT_MODEL || defaults.model
const providerEnv = env.COPILOT_PROVIDER
if (!providerEnv) return undefined
if (providerEnv === 'azure-openai') {
return {
provider: 'azure-openai',
model: envModel,
apiKey: env.AZURE_OPENAI_API_KEY,
apiVersion: 'preview',
endpoint: env.AZURE_OPENAI_ENDPOINT,
}
}
if (providerEnv === 'vertex') {
return {
provider: 'vertex',
model: envModel,
apiKey: env.COPILOT_API_KEY,
vertexProject: env.VERTEX_PROJECT,
vertexLocation: env.VERTEX_LOCATION,
}
}
return {
provider: providerEnv as Exclude<string, 'azure-openai' | 'vertex'>,
model: selectedModel,
apiKey: env.COPILOT_API_KEY,
} as CopilotProviderConfig
}
/**
* Build the request payload for the copilot backend.
*/
export async function buildCopilotRequestPayload(
params: BuildPayloadParams,
options: {
providerConfig?: CopilotProviderConfig
selectedModel: string
}
): Promise<Record<string, unknown>> {
const {
message,
workflowId,
userId,
userMessageId,
mode,
conversationHistory = [],
contexts,
fileAttachments,
commands,
chatId,
implicitFeedback,
} = params
const selectedModel = options.selectedModel
const providerConfig = options.providerConfig ?? buildProviderConfig(selectedModel)
const effectiveMode = mode === 'agent' ? 'build' : mode
const transportMode = effectiveMode === 'build' ? 'agent' : effectiveMode
const processedFileContents = await processFileAttachments(fileAttachments ?? [], userId)
const messages: ConversationMessage[] = []
for (const msg of conversationHistory as Array<Record<string, unknown>>) {
const msgAttachments = msg.fileAttachments as Array<Record<string, unknown>> | undefined
if (Array.isArray(msgAttachments) && msgAttachments.length > 0) {
const content: Array<{ type: string; text?: string; [key: string]: unknown }> = [
{ type: 'text', text: msg.content as string },
]
const processedHistoricalAttachments = await processFileAttachments(
(msgAttachments as BuildPayloadParams['fileAttachments']) ?? [],
userId
)
for (const fileContent of processedHistoricalAttachments) {
content.push(fileContent)
}
messages.push({ role: msg.role as string, content })
} else {
messages.push({ role: msg.role as string, content: msg.content as string })
}
}
if (implicitFeedback) {
messages.push({ role: 'system', content: implicitFeedback })
}
if (processedFileContents.length > 0) {
const content: Array<{ type: string; text?: string; [key: string]: unknown }> = [
{ type: 'text', text: message },
]
for (const fileContent of processedFileContents) {
content.push(fileContent)
}
messages.push({ role: 'user', content })
} else {
messages.push({ role: 'user', content: message })
}
let integrationTools: ToolSchema[] = []
let credentials: CredentialsPayload | null = null
if (effectiveMode === 'build') {
// function_execute sandbox tool is now defined in Go — no need to send it
try {
const rawCredentials = await getCredentialsServerTool.execute({ workflowId }, { userId })
const oauthMap: CredentialsPayload['oauth'] = {}
const connectedOAuth: Array<{ provider: string; name: string; scopes?: string[] }> = []
for (const cred of rawCredentials?.oauth?.connected?.credentials ?? []) {
if (cred.accessToken) {
oauthMap[cred.provider] = {
accessToken: cred.accessToken,
accountId: cred.id,
name: cred.name,
}
connectedOAuth.push({ provider: cred.provider, name: cred.name })
}
}
credentials = {
oauth: oauthMap,
apiKeys: rawCredentials?.environment?.variableNames ?? [],
metadata: {
connectedOAuth,
configuredApiKeys: rawCredentials?.environment?.variableNames ?? [],
},
}
} catch (error) {
logger.warn('Failed to fetch credentials for build payload', {
error: error instanceof Error ? error.message : String(error),
})
}
try {
const { createUserToolSchema } = await import('@/tools/params')
const latestTools = getLatestVersionTools(tools)
integrationTools = Object.entries(latestTools).map(([toolId, toolConfig]) => {
const userSchema = createUserToolSchema(toolConfig)
const strippedName = stripVersionSuffix(toolId)
return {
name: strippedName,
description: toolConfig.description || toolConfig.name || strippedName,
input_schema: userSchema as unknown as Record<string, unknown>,
defer_loading: true,
...(toolConfig.oauth?.required && {
oauth: {
required: true,
provider: toolConfig.oauth.provider,
},
}),
}
})
} catch (error) {
logger.warn('Failed to build tool schemas for payload', {
error: error instanceof Error ? error.message : String(error),
})
}
}
return {
message,
workflowId,
userId,
model: selectedModel,
mode: transportMode,
messageId: userMessageId,
version: SIM_AGENT_VERSION,
...(providerConfig ? { provider: providerConfig } : {}),
...(contexts && contexts.length > 0 ? { context: contexts } : {}),
...(chatId ? { chatId } : {}),
...(processedFileContents.length > 0 ? { fileAttachments: processedFileContents } : {}),
...(integrationTools.length > 0 ? { integrationTools } : {}),
...(credentials ? { credentials } : {}),
...(commands && commands.length > 0 ? { commands } : {}),
}
}

View File

@@ -0,0 +1,147 @@
import type {
ChatContext,
CopilotMessage,
MessageFileAttachment,
} from '@/stores/panel/copilot/types'
import type { ClientContentBlock, ClientStreamingContext } from './types'
const TEXT_BLOCK_TYPE = 'text'
const THINKING_BLOCK_TYPE = 'thinking'
const CONTINUE_OPTIONS_TAG = '<options>{"1":"Continue"}</options>'
export function createUserMessage(
content: string,
fileAttachments?: MessageFileAttachment[],
contexts?: ChatContext[],
messageId?: string
): CopilotMessage {
return {
id: messageId || crypto.randomUUID(),
role: 'user',
content,
timestamp: new Date().toISOString(),
...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }),
...(contexts && contexts.length > 0 && { contexts }),
...(contexts &&
contexts.length > 0 && {
contentBlocks: [{ type: 'contexts', contexts, timestamp: Date.now() }],
}),
}
}
export function createStreamingMessage(): CopilotMessage {
return {
id: crypto.randomUUID(),
role: 'assistant',
content: '',
timestamp: new Date().toISOString(),
}
}
export function createErrorMessage(
messageId: string,
content: string,
errorType?: 'usage_limit' | 'unauthorized' | 'forbidden' | 'rate_limit' | 'upgrade_required'
): CopilotMessage {
return {
id: messageId,
role: 'assistant',
content,
timestamp: new Date().toISOString(),
contentBlocks: [
{
type: 'text',
content,
timestamp: Date.now(),
},
],
errorType,
}
}
export function appendTextBlock(context: ClientStreamingContext, text: string) {
if (!text) return
context.accumulatedContent += text
if (context.currentTextBlock && context.contentBlocks.length > 0) {
const lastBlock = context.contentBlocks[context.contentBlocks.length - 1]
if (lastBlock.type === TEXT_BLOCK_TYPE && lastBlock === context.currentTextBlock) {
lastBlock.content += text
return
}
}
const newBlock: ClientContentBlock = { type: 'text', content: text, timestamp: Date.now() }
context.currentTextBlock = newBlock
context.contentBlocks.push(newBlock)
}
export function appendContinueOption(content: string): string {
if (/<options>/i.test(content)) return content
const suffix = content.trim().length > 0 ? '\n\n' : ''
return `${content}${suffix}${CONTINUE_OPTIONS_TAG}`
}
export function appendContinueOptionBlock(blocks: ClientContentBlock[]): ClientContentBlock[] {
if (!Array.isArray(blocks)) return blocks
const hasOptions = blocks.some(
(block) =>
block?.type === TEXT_BLOCK_TYPE &&
typeof block.content === 'string' &&
/<options>/i.test(block.content)
)
if (hasOptions) return blocks
return [
...blocks,
{
type: TEXT_BLOCK_TYPE,
content: CONTINUE_OPTIONS_TAG,
timestamp: Date.now(),
},
]
}
export function stripContinueOption(content: string): string {
if (!content || !content.includes(CONTINUE_OPTIONS_TAG)) return content
const next = content.replace(CONTINUE_OPTIONS_TAG, '')
return next.replace(/\n{2,}\s*$/g, '\n').trimEnd()
}
export function stripContinueOptionFromBlocks(blocks: ClientContentBlock[]): ClientContentBlock[] {
if (!Array.isArray(blocks)) return blocks
return blocks.flatMap((block) => {
if (
block?.type === TEXT_BLOCK_TYPE &&
typeof block.content === 'string' &&
block.content.includes(CONTINUE_OPTIONS_TAG)
) {
const nextContent = stripContinueOption(block.content)
if (!nextContent.trim()) return []
return [{ ...block, content: nextContent }]
}
return [block]
})
}
export function beginThinkingBlock(context: ClientStreamingContext) {
if (!context.currentThinkingBlock) {
const newBlock: ClientContentBlock = {
type: 'thinking',
content: '',
timestamp: Date.now(),
startTime: Date.now(),
}
context.currentThinkingBlock = newBlock
context.contentBlocks.push(newBlock)
}
context.isInThinkingBlock = true
context.currentTextBlock = null
}
export function finalizeThinkingBlock(context: ClientStreamingContext) {
if (context.currentThinkingBlock) {
context.currentThinkingBlock.duration =
Date.now() - (context.currentThinkingBlock.startTime || Date.now())
}
context.isInThinkingBlock = false
context.currentThinkingBlock = null
context.currentTextBlock = null
}

View File

@@ -0,0 +1,752 @@
import { createLogger } from '@sim/logger'
import { STREAM_STORAGE_KEY } from '@/lib/copilot/constants'
import { asRecord } from '@/lib/copilot/orchestrator/sse-utils'
import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
import {
isBackgroundState,
isRejectedState,
isReviewState,
resolveToolDisplay,
} from '@/lib/copilot/store-utils'
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
import type { CopilotStore, CopilotStreamInfo, CopilotToolCall } from '@/stores/panel/copilot/types'
import { useWorkflowDiffStore } from '@/stores/workflow-diff/store'
import type { WorkflowState } from '@/stores/workflows/workflow/types'
import { appendTextBlock, beginThinkingBlock, finalizeThinkingBlock } from './content-blocks'
import type { ClientContentBlock, ClientStreamingContext } from './types'
const logger = createLogger('CopilotClientSseHandlers')
const TEXT_BLOCK_TYPE = 'text'
const MAX_BATCH_INTERVAL = 50
const MIN_BATCH_INTERVAL = 16
const MAX_QUEUE_SIZE = 5
function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void {
if (typeof window === 'undefined') return
try {
if (!info) {
window.sessionStorage.removeItem(STREAM_STORAGE_KEY)
return
}
window.sessionStorage.setItem(STREAM_STORAGE_KEY, JSON.stringify(info))
} catch (error) {
logger.warn('Failed to write active stream to storage', {
error: error instanceof Error ? error.message : String(error),
})
}
}
type StoreSet = (
partial: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)
) => void
export type SSEHandler = (
data: SSEEvent,
context: ClientStreamingContext,
get: () => CopilotStore,
set: StoreSet
) => Promise<void> | void
const streamingUpdateQueue = new Map<string, ClientStreamingContext>()
let streamingUpdateRAF: number | null = null
let lastBatchTime = 0
export function stopStreamingUpdates() {
if (streamingUpdateRAF !== null) {
cancelAnimationFrame(streamingUpdateRAF)
streamingUpdateRAF = null
}
streamingUpdateQueue.clear()
}
function createOptimizedContentBlocks(contentBlocks: ClientContentBlock[]): ClientContentBlock[] {
const result: ClientContentBlock[] = new Array(contentBlocks.length)
for (let i = 0; i < contentBlocks.length; i++) {
const block = contentBlocks[i]
result[i] = { ...block }
}
return result
}
export function flushStreamingUpdates(set: StoreSet) {
if (streamingUpdateRAF !== null) {
cancelAnimationFrame(streamingUpdateRAF)
streamingUpdateRAF = null
}
if (streamingUpdateQueue.size === 0) return
const updates = new Map(streamingUpdateQueue)
streamingUpdateQueue.clear()
set((state: CopilotStore) => {
if (updates.size === 0) return state
return {
messages: state.messages.map((msg) => {
const update = updates.get(msg.id)
if (update) {
return {
...msg,
content: '',
contentBlocks:
update.contentBlocks.length > 0
? createOptimizedContentBlocks(update.contentBlocks)
: [],
}
}
return msg
}),
}
})
}
export function updateStreamingMessage(set: StoreSet, context: ClientStreamingContext) {
if (context.suppressStreamingUpdates) return
const now = performance.now()
streamingUpdateQueue.set(context.messageId, context)
const timeSinceLastBatch = now - lastBatchTime
const shouldFlushImmediately =
streamingUpdateQueue.size >= MAX_QUEUE_SIZE || timeSinceLastBatch > MAX_BATCH_INTERVAL
if (streamingUpdateRAF === null) {
const scheduleUpdate = () => {
streamingUpdateRAF = requestAnimationFrame(() => {
const updates = new Map(streamingUpdateQueue)
streamingUpdateQueue.clear()
streamingUpdateRAF = null
lastBatchTime = performance.now()
set((state: CopilotStore) => {
if (updates.size === 0) return state
const messages = state.messages
const lastMessage = messages[messages.length - 1]
const lastMessageUpdate = lastMessage ? updates.get(lastMessage.id) : null
if (updates.size === 1 && lastMessageUpdate) {
const newMessages = [...messages]
newMessages[messages.length - 1] = {
...lastMessage,
content: '',
contentBlocks:
lastMessageUpdate.contentBlocks.length > 0
? createOptimizedContentBlocks(lastMessageUpdate.contentBlocks)
: [],
}
return { messages: newMessages }
}
return {
messages: messages.map((msg) => {
const update = updates.get(msg.id)
if (update) {
return {
...msg,
content: '',
contentBlocks:
update.contentBlocks.length > 0
? createOptimizedContentBlocks(update.contentBlocks)
: [],
}
}
return msg
}),
}
})
})
}
if (shouldFlushImmediately) scheduleUpdate()
else setTimeout(scheduleUpdate, Math.max(0, MIN_BATCH_INTERVAL - timeSinceLastBatch))
}
}
export function upsertToolCallBlock(context: ClientStreamingContext, toolCall: CopilotToolCall) {
let found = false
for (let i = 0; i < context.contentBlocks.length; i++) {
const b = context.contentBlocks[i]
if (b.type === 'tool_call' && b.toolCall?.id === toolCall.id) {
context.contentBlocks[i] = { ...b, toolCall }
found = true
break
}
}
if (!found) {
context.contentBlocks.push({ type: 'tool_call', toolCall, timestamp: Date.now() })
}
}
function stripThinkingTags(text: string): string {
return text.replace(/<\/?thinking[^>]*>/gi, '').replace(/&lt;\/?thinking[^&]*&gt;/gi, '')
}
function appendThinkingContent(context: ClientStreamingContext, text: string) {
if (!text) return
const cleanedText = stripThinkingTags(text)
if (!cleanedText) return
if (context.currentThinkingBlock) {
context.currentThinkingBlock.content += cleanedText
} else {
const newBlock: ClientContentBlock = {
type: 'thinking',
content: cleanedText,
timestamp: Date.now(),
startTime: Date.now(),
}
context.currentThinkingBlock = newBlock
context.contentBlocks.push(newBlock)
}
context.isInThinkingBlock = true
context.currentTextBlock = null
}
export const sseHandlers: Record<string, SSEHandler> = {
chat_id: async (data, context, get, set) => {
context.newChatId = data.chatId
const { currentChat, activeStream } = get()
if (!currentChat && context.newChatId) {
await get().handleNewChatCreation(context.newChatId)
}
if (activeStream && context.newChatId && !activeStream.chatId) {
const updatedStream = { ...activeStream, chatId: context.newChatId }
set({ activeStream: updatedStream })
writeActiveStreamToStorage(updatedStream)
}
},
title_updated: (_data, _context, get, set) => {
const title = _data.title
if (!title) return
const { currentChat, chats } = get()
if (currentChat) {
set({
currentChat: { ...currentChat, title },
chats: chats.map((c) => (c.id === currentChat.id ? { ...c, title } : c)),
})
}
},
tool_result: (data, context, get, set) => {
try {
const eventData = asRecord(data?.data)
const toolCallId: string | undefined =
data?.toolCallId || (eventData.id as string | undefined)
const success: boolean | undefined = data?.success
const failedDependency: boolean = data?.failedDependency === true
const resultObj = asRecord(data?.result)
const skipped: boolean = resultObj.skipped === true
if (!toolCallId) return
const { toolCallsById } = get()
const current = toolCallsById[toolCallId]
if (current) {
if (
isRejectedState(current.state) ||
isReviewState(current.state) ||
isBackgroundState(current.state)
) {
return
}
const targetState = success
? ClientToolCallState.success
: failedDependency || skipped
? ClientToolCallState.rejected
: ClientToolCallState.error
const updatedMap = { ...toolCallsById }
updatedMap[toolCallId] = {
...current,
state: targetState,
display: resolveToolDisplay(current.name, targetState, current.id, current.params),
}
set({ toolCallsById: updatedMap })
if (targetState === ClientToolCallState.success && current.name === 'checkoff_todo') {
try {
const result = asRecord(data?.result) || asRecord(eventData.result)
const input = asRecord(current.params || current.input)
const todoId = (input.id || input.todoId || result.id || result.todoId) as
| string
| undefined
if (todoId) {
get().updatePlanTodoStatus(todoId, 'completed')
}
} catch (error) {
logger.warn('Failed to process checkoff_todo tool result', {
error: error instanceof Error ? error.message : String(error),
toolCallId,
})
}
}
if (
targetState === ClientToolCallState.success &&
current.name === 'mark_todo_in_progress'
) {
try {
const result = asRecord(data?.result) || asRecord(eventData.result)
const input = asRecord(current.params || current.input)
const todoId = (input.id || input.todoId || result.id || result.todoId) as
| string
| undefined
if (todoId) {
get().updatePlanTodoStatus(todoId, 'executing')
}
} catch (error) {
logger.warn('Failed to process mark_todo_in_progress tool result', {
error: error instanceof Error ? error.message : String(error),
toolCallId,
})
}
}
if (current.name === 'edit_workflow') {
try {
const resultPayload = asRecord(
data?.result || eventData.result || eventData.data || data?.data
)
const workflowState = asRecord(resultPayload?.workflowState)
const hasWorkflowState = !!resultPayload?.workflowState
logger.info('[SSE] edit_workflow result received', {
hasWorkflowState,
blockCount: hasWorkflowState ? Object.keys(workflowState.blocks ?? {}).length : 0,
edgeCount: Array.isArray(workflowState.edges) ? workflowState.edges.length : 0,
})
if (hasWorkflowState) {
const diffStore = useWorkflowDiffStore.getState()
diffStore
.setProposedChanges(resultPayload.workflowState as WorkflowState)
.catch((err) => {
logger.error('[SSE] Failed to apply edit_workflow diff', {
error: err instanceof Error ? err.message : String(err),
})
})
}
} catch (err) {
logger.error('[SSE] edit_workflow result handling failed', {
error: err instanceof Error ? err.message : String(err),
})
}
}
}
for (let i = 0; i < context.contentBlocks.length; i++) {
const b = context.contentBlocks[i]
if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) {
if (
isRejectedState(b.toolCall?.state) ||
isReviewState(b.toolCall?.state) ||
isBackgroundState(b.toolCall?.state)
)
break
const targetState = success
? ClientToolCallState.success
: failedDependency || skipped
? ClientToolCallState.rejected
: ClientToolCallState.error
context.contentBlocks[i] = {
...b,
toolCall: {
...b.toolCall,
state: targetState,
display: resolveToolDisplay(
b.toolCall?.name,
targetState,
toolCallId,
b.toolCall?.params
),
},
}
break
}
}
updateStreamingMessage(set, context)
} catch (error) {
logger.warn('Failed to process tool_result SSE event', {
error: error instanceof Error ? error.message : String(error),
})
}
},
tool_error: (data, context, get, set) => {
try {
const errorData = asRecord(data?.data)
const toolCallId: string | undefined =
data?.toolCallId || (errorData.id as string | undefined)
const failedDependency: boolean = data?.failedDependency === true
if (!toolCallId) return
const { toolCallsById } = get()
const current = toolCallsById[toolCallId]
if (current) {
if (
isRejectedState(current.state) ||
isReviewState(current.state) ||
isBackgroundState(current.state)
) {
return
}
const targetState = failedDependency
? ClientToolCallState.rejected
: ClientToolCallState.error
const updatedMap = { ...toolCallsById }
updatedMap[toolCallId] = {
...current,
state: targetState,
display: resolveToolDisplay(current.name, targetState, current.id, current.params),
}
set({ toolCallsById: updatedMap })
}
for (let i = 0; i < context.contentBlocks.length; i++) {
const b = context.contentBlocks[i]
if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) {
if (
isRejectedState(b.toolCall?.state) ||
isReviewState(b.toolCall?.state) ||
isBackgroundState(b.toolCall?.state)
)
break
const targetState = failedDependency
? ClientToolCallState.rejected
: ClientToolCallState.error
context.contentBlocks[i] = {
...b,
toolCall: {
...b.toolCall,
state: targetState,
display: resolveToolDisplay(
b.toolCall?.name,
targetState,
toolCallId,
b.toolCall?.params
),
},
}
break
}
}
updateStreamingMessage(set, context)
} catch (error) {
logger.warn('Failed to process tool_error SSE event', {
error: error instanceof Error ? error.message : String(error),
})
}
},
tool_generating: (data, context, get, set) => {
const { toolCallId, toolName } = data
if (!toolCallId || !toolName) return
const { toolCallsById } = get()
if (!toolCallsById[toolCallId]) {
const initialState = ClientToolCallState.pending
const tc: CopilotToolCall = {
id: toolCallId,
name: toolName,
state: initialState,
display: resolveToolDisplay(toolName, initialState, toolCallId),
}
const updated = { ...toolCallsById, [toolCallId]: tc }
set({ toolCallsById: updated })
logger.info('[toolCallsById] map updated', updated)
upsertToolCallBlock(context, tc)
updateStreamingMessage(set, context)
}
},
tool_call: (data, context, get, set) => {
const toolData = asRecord(data?.data)
const id: string | undefined = (toolData.id as string | undefined) || data?.toolCallId
const name: string | undefined = (toolData.name as string | undefined) || data?.toolName
if (!id) return
const args = toolData.arguments as Record<string, unknown> | undefined
const isPartial = toolData.partial === true
const { toolCallsById } = get()
const existing = toolCallsById[id]
const next: CopilotToolCall = existing
? {
...existing,
state: ClientToolCallState.pending,
...(args ? { params: args } : {}),
display: resolveToolDisplay(name, ClientToolCallState.pending, id, args),
}
: {
id,
name: name || 'unknown_tool',
state: ClientToolCallState.pending,
...(args ? { params: args } : {}),
display: resolveToolDisplay(name, ClientToolCallState.pending, id, args),
}
const updated = { ...toolCallsById, [id]: next }
set({ toolCallsById: updated })
logger.info('[toolCallsById] → pending', { id, name, params: args })
upsertToolCallBlock(context, next)
updateStreamingMessage(set, context)
if (isPartial) {
return
}
return
},
reasoning: (data, context, _get, set) => {
const phase = (data && (data.phase || data?.data?.phase)) as string | undefined
if (phase === 'start') {
beginThinkingBlock(context)
updateStreamingMessage(set, context)
return
}
if (phase === 'end') {
finalizeThinkingBlock(context)
updateStreamingMessage(set, context)
return
}
const chunk: string = typeof data?.data === 'string' ? data.data : data?.content || ''
if (!chunk) return
appendThinkingContent(context, chunk)
updateStreamingMessage(set, context)
},
content: (data, context, get, set) => {
if (!data.data) return
context.pendingContent += data.data
let contentToProcess = context.pendingContent
let hasProcessedContent = false
const thinkingStartRegex = /<thinking>/
const thinkingEndRegex = /<\/thinking>/
const designWorkflowStartRegex = /<design_workflow>/
const designWorkflowEndRegex = /<\/design_workflow>/
const splitTrailingPartialTag = (
text: string,
tags: string[]
): { text: string; remaining: string } => {
const partialIndex = text.lastIndexOf('<')
if (partialIndex < 0) {
return { text, remaining: '' }
}
const possibleTag = text.substring(partialIndex)
const matchesTagStart = tags.some((tag) => tag.startsWith(possibleTag))
if (!matchesTagStart) {
return { text, remaining: '' }
}
return {
text: text.substring(0, partialIndex),
remaining: possibleTag,
}
}
while (contentToProcess.length > 0) {
if (context.isInDesignWorkflowBlock) {
const endMatch = designWorkflowEndRegex.exec(contentToProcess)
if (endMatch) {
const designContent = contentToProcess.substring(0, endMatch.index)
context.designWorkflowContent += designContent
context.isInDesignWorkflowBlock = false
logger.info('[design_workflow] Tag complete, setting plan content', {
contentLength: context.designWorkflowContent.length,
})
set({ streamingPlanContent: context.designWorkflowContent })
contentToProcess = contentToProcess.substring(endMatch.index + endMatch[0].length)
hasProcessedContent = true
} else {
const { text, remaining } = splitTrailingPartialTag(contentToProcess, [
'</design_workflow>',
])
context.designWorkflowContent += text
set({ streamingPlanContent: context.designWorkflowContent })
contentToProcess = remaining
hasProcessedContent = true
if (remaining) {
break
}
}
continue
}
if (!context.isInThinkingBlock && !context.isInDesignWorkflowBlock) {
const designStartMatch = designWorkflowStartRegex.exec(contentToProcess)
if (designStartMatch) {
const textBeforeDesign = contentToProcess.substring(0, designStartMatch.index)
if (textBeforeDesign) {
appendTextBlock(context, textBeforeDesign)
hasProcessedContent = true
}
context.isInDesignWorkflowBlock = true
context.designWorkflowContent = ''
contentToProcess = contentToProcess.substring(
designStartMatch.index + designStartMatch[0].length
)
hasProcessedContent = true
continue
}
const nextMarkIndex = contentToProcess.indexOf('<marktodo>')
const nextCheckIndex = contentToProcess.indexOf('<checkofftodo>')
const hasMark = nextMarkIndex >= 0
const hasCheck = nextCheckIndex >= 0
const nextTagIndex =
hasMark && hasCheck
? Math.min(nextMarkIndex, nextCheckIndex)
: hasMark
? nextMarkIndex
: hasCheck
? nextCheckIndex
: -1
if (nextTagIndex >= 0) {
const isMarkTodo = hasMark && nextMarkIndex === nextTagIndex
const tagStart = isMarkTodo ? '<marktodo>' : '<checkofftodo>'
const tagEnd = isMarkTodo ? '</marktodo>' : '</checkofftodo>'
const closingIndex = contentToProcess.indexOf(tagEnd, nextTagIndex + tagStart.length)
if (closingIndex === -1) {
break
}
const todoId = contentToProcess
.substring(nextTagIndex + tagStart.length, closingIndex)
.trim()
logger.info(
isMarkTodo ? '[TODO] Detected marktodo tag' : '[TODO] Detected checkofftodo tag',
{ todoId }
)
if (todoId) {
try {
get().updatePlanTodoStatus(todoId, isMarkTodo ? 'executing' : 'completed')
logger.info(
isMarkTodo
? '[TODO] Successfully marked todo in progress'
: '[TODO] Successfully checked off todo',
{ todoId }
)
} catch (e) {
logger.error(
isMarkTodo
? '[TODO] Failed to mark todo in progress'
: '[TODO] Failed to checkoff todo',
{ todoId, error: e }
)
}
} else {
logger.warn('[TODO] Empty todoId extracted from todo tag', { tagType: tagStart })
}
let beforeTag = contentToProcess.substring(0, nextTagIndex)
let afterTag = contentToProcess.substring(closingIndex + tagEnd.length)
const hadNewlineBefore = /(\r?\n)+$/.test(beforeTag)
const hadNewlineAfter = /^(\r?\n)+/.test(afterTag)
beforeTag = beforeTag.replace(/(\r?\n)+$/, '')
afterTag = afterTag.replace(/^(\r?\n)+/, '')
contentToProcess =
beforeTag + (hadNewlineBefore && hadNewlineAfter ? '\n' : '') + afterTag
context.currentTextBlock = null
hasProcessedContent = true
continue
}
}
if (context.isInThinkingBlock) {
const endMatch = thinkingEndRegex.exec(contentToProcess)
if (endMatch) {
const thinkingContent = contentToProcess.substring(0, endMatch.index)
appendThinkingContent(context, thinkingContent)
finalizeThinkingBlock(context)
contentToProcess = contentToProcess.substring(endMatch.index + endMatch[0].length)
hasProcessedContent = true
} else {
const { text, remaining } = splitTrailingPartialTag(contentToProcess, ['</thinking>'])
if (text) {
appendThinkingContent(context, text)
hasProcessedContent = true
}
contentToProcess = remaining
if (remaining) {
break
}
}
} else {
const startMatch = thinkingStartRegex.exec(contentToProcess)
if (startMatch) {
const textBeforeThinking = contentToProcess.substring(0, startMatch.index)
if (textBeforeThinking) {
appendTextBlock(context, textBeforeThinking)
hasProcessedContent = true
}
context.isInThinkingBlock = true
context.currentTextBlock = null
contentToProcess = contentToProcess.substring(startMatch.index + startMatch[0].length)
hasProcessedContent = true
} else {
let partialTagIndex = contentToProcess.lastIndexOf('<')
const partialMarkTodo = contentToProcess.lastIndexOf('<marktodo')
const partialCheckoffTodo = contentToProcess.lastIndexOf('<checkofftodo')
if (partialMarkTodo > partialTagIndex) {
partialTagIndex = partialMarkTodo
}
if (partialCheckoffTodo > partialTagIndex) {
partialTagIndex = partialCheckoffTodo
}
let textToAdd = contentToProcess
let remaining = ''
if (partialTagIndex >= 0 && partialTagIndex > contentToProcess.length - 50) {
textToAdd = contentToProcess.substring(0, partialTagIndex)
remaining = contentToProcess.substring(partialTagIndex)
}
if (textToAdd) {
appendTextBlock(context, textToAdd)
hasProcessedContent = true
}
contentToProcess = remaining
break
}
}
}
context.pendingContent = contentToProcess
if (hasProcessedContent) {
updateStreamingMessage(set, context)
}
},
done: (_data, context) => {
logger.info('[SSE] DONE EVENT RECEIVED', {
doneEventCount: context.doneEventCount,
data: _data,
})
context.doneEventCount++
if (context.doneEventCount >= 1) {
logger.info('[SSE] Setting streamComplete = true, stream will terminate')
context.streamComplete = true
}
},
error: (data, context, _get, set) => {
logger.error('Stream error:', data.error)
set((state: CopilotStore) => ({
messages: state.messages.map((msg) =>
msg.id === context.messageId
? {
...msg,
content: context.accumulatedContent || 'An error occurred.',
error: data.error,
}
: msg
),
}))
context.streamComplete = true
},
stream_end: (_data, context, _get, set) => {
if (context.pendingContent) {
if (context.isInThinkingBlock && context.currentThinkingBlock) {
appendThinkingContent(context, context.pendingContent)
} else if (context.pendingContent.trim()) {
appendTextBlock(context, context.pendingContent)
}
context.pendingContent = ''
}
finalizeThinkingBlock(context)
updateStreamingMessage(set, context)
},
default: () => {},
}

View File

@@ -0,0 +1,3 @@
export type { SSEHandler } from './handlers'
export { sseHandlers } from './handlers'
export { applySseEvent, subAgentSSEHandlers } from './subagent-handlers'

View File

@@ -0,0 +1,374 @@
import { createLogger } from '@sim/logger'
import {
asRecord,
normalizeSseEvent,
shouldSkipToolCallEvent,
shouldSkipToolResultEvent,
} from '@/lib/copilot/orchestrator/sse-utils'
import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
import { resolveToolDisplay } from '@/lib/copilot/store-utils'
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
import type { CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types'
import { type SSEHandler, sseHandlers, updateStreamingMessage } from './handlers'
import type { ClientStreamingContext } from './types'
const logger = createLogger('CopilotClientSubagentHandlers')
type StoreSet = (
partial: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)
) => void
export function appendSubAgentContent(
context: ClientStreamingContext,
parentToolCallId: string,
text: string
) {
if (!context.subAgentContent[parentToolCallId]) {
context.subAgentContent[parentToolCallId] = ''
}
if (!context.subAgentBlocks[parentToolCallId]) {
context.subAgentBlocks[parentToolCallId] = []
}
context.subAgentContent[parentToolCallId] += text
const blocks = context.subAgentBlocks[parentToolCallId]
const lastBlock = blocks[blocks.length - 1]
if (lastBlock && lastBlock.type === 'subagent_text') {
lastBlock.content = (lastBlock.content || '') + text
} else {
blocks.push({
type: 'subagent_text',
content: text,
timestamp: Date.now(),
})
}
}
export function updateToolCallWithSubAgentData(
context: ClientStreamingContext,
get: () => CopilotStore,
set: StoreSet,
parentToolCallId: string
) {
const { toolCallsById } = get()
const parentToolCall = toolCallsById[parentToolCallId]
if (!parentToolCall) {
logger.warn('[SubAgent] updateToolCallWithSubAgentData: parent tool call not found', {
parentToolCallId,
availableToolCallIds: Object.keys(toolCallsById),
})
return
}
const blocks = context.subAgentBlocks[parentToolCallId] ?? []
const updatedToolCall: CopilotToolCall = {
...parentToolCall,
subAgentContent: context.subAgentContent[parentToolCallId] || '',
subAgentToolCalls: context.subAgentToolCalls[parentToolCallId] ?? [],
subAgentBlocks: blocks,
subAgentStreaming: true,
}
logger.info('[SubAgent] Updating tool call with subagent data', {
parentToolCallId,
parentToolName: parentToolCall.name,
subAgentContentLength: updatedToolCall.subAgentContent?.length,
subAgentBlocksCount: updatedToolCall.subAgentBlocks?.length,
subAgentToolCallsCount: updatedToolCall.subAgentToolCalls?.length,
})
const updatedMap = { ...toolCallsById, [parentToolCallId]: updatedToolCall }
set({ toolCallsById: updatedMap })
let foundInContentBlocks = false
for (let i = 0; i < context.contentBlocks.length; i++) {
const b = context.contentBlocks[i]
if (b.type === 'tool_call' && b.toolCall?.id === parentToolCallId) {
context.contentBlocks[i] = { ...b, toolCall: updatedToolCall }
foundInContentBlocks = true
break
}
}
if (!foundInContentBlocks) {
logger.warn('[SubAgent] Parent tool call not found in contentBlocks', {
parentToolCallId,
contentBlocksCount: context.contentBlocks.length,
toolCallBlockIds: context.contentBlocks
.filter((b) => b.type === 'tool_call')
.map((b) => b.toolCall?.id),
})
}
updateStreamingMessage(set, context)
}
export const subAgentSSEHandlers: Record<string, SSEHandler> = {
start: () => {
// Subagent start event - no action needed, parent is already tracked from subagent_start
},
content: (data, context, get, set) => {
const parentToolCallId = context.subAgentParentToolCallId
const contentStr = typeof data.data === 'string' ? data.data : data.content || ''
logger.info('[SubAgent] content event', {
parentToolCallId,
hasData: !!contentStr,
dataPreview: contentStr ? contentStr.substring(0, 50) : null,
})
if (!parentToolCallId || !contentStr) {
logger.warn('[SubAgent] content missing parentToolCallId or data', {
parentToolCallId,
hasData: !!contentStr,
})
return
}
appendSubAgentContent(context, parentToolCallId, contentStr)
updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
},
reasoning: (data, context, get, set) => {
const parentToolCallId = context.subAgentParentToolCallId
const dataObj = asRecord(data?.data)
const phase = data?.phase || (dataObj.phase as string | undefined)
if (!parentToolCallId) return
if (phase === 'start' || phase === 'end') return
const chunk = typeof data?.data === 'string' ? data.data : data?.content || ''
if (!chunk) return
appendSubAgentContent(context, parentToolCallId, chunk)
updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
},
tool_generating: () => {
// Tool generating event - no action needed, we'll handle the actual tool_call
},
tool_call: async (data, context, get, set) => {
const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) return
const toolData = asRecord(data?.data)
const id: string | undefined = (toolData.id as string | undefined) || data?.toolCallId
const name: string | undefined = (toolData.name as string | undefined) || data?.toolName
if (!id || !name) return
const isPartial = toolData.partial === true
let args: Record<string, unknown> | undefined = (toolData.arguments || toolData.input) as
| Record<string, unknown>
| undefined
if (typeof args === 'string') {
try {
args = JSON.parse(args) as Record<string, unknown>
} catch {
logger.warn('[SubAgent] Failed to parse arguments string', { args })
}
}
logger.info('[SubAgent] tool_call received', {
id,
name,
hasArgs: !!args,
argsKeys: args ? Object.keys(args) : [],
toolDataKeys: Object.keys(toolData),
dataKeys: Object.keys(data ?? {}),
})
if (!context.subAgentToolCalls[parentToolCallId]) {
context.subAgentToolCalls[parentToolCallId] = []
}
if (!context.subAgentBlocks[parentToolCallId]) {
context.subAgentBlocks[parentToolCallId] = []
}
const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex(
(tc: CopilotToolCall) => tc.id === id
)
const subAgentToolCall: CopilotToolCall = {
id,
name,
state: ClientToolCallState.pending,
...(args ? { params: args } : {}),
display: resolveToolDisplay(name, ClientToolCallState.pending, id, args),
}
if (existingIndex >= 0) {
context.subAgentToolCalls[parentToolCallId][existingIndex] = subAgentToolCall
} else {
context.subAgentToolCalls[parentToolCallId].push(subAgentToolCall)
context.subAgentBlocks[parentToolCallId].push({
type: 'subagent_tool_call',
toolCall: subAgentToolCall,
timestamp: Date.now(),
})
}
const { toolCallsById } = get()
const updated = { ...toolCallsById, [id]: subAgentToolCall }
set({ toolCallsById: updated })
updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
if (isPartial) {
return
}
},
tool_result: (data, context, get, set) => {
const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) return
const resultData = asRecord(data?.data)
const toolCallId: string | undefined = data?.toolCallId || (resultData.id as string | undefined)
const success: boolean | undefined = data?.success !== false
if (!toolCallId) return
if (!context.subAgentToolCalls[parentToolCallId]) return
if (!context.subAgentBlocks[parentToolCallId]) return
const targetState = success ? ClientToolCallState.success : ClientToolCallState.error
const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex(
(tc: CopilotToolCall) => tc.id === toolCallId
)
if (existingIndex >= 0) {
const existing = context.subAgentToolCalls[parentToolCallId][existingIndex]
const updatedSubAgentToolCall = {
...existing,
state: targetState,
display: resolveToolDisplay(existing.name, targetState, toolCallId, existing.params),
}
context.subAgentToolCalls[parentToolCallId][existingIndex] = updatedSubAgentToolCall
for (const block of context.subAgentBlocks[parentToolCallId]) {
if (block.type === 'subagent_tool_call' && block.toolCall?.id === toolCallId) {
block.toolCall = updatedSubAgentToolCall
break
}
}
const { toolCallsById } = get()
if (toolCallsById[toolCallId]) {
const updatedMap = {
...toolCallsById,
[toolCallId]: updatedSubAgentToolCall,
}
set({ toolCallsById: updatedMap })
logger.info('[SubAgent] Updated subagent tool call state in toolCallsById', {
toolCallId,
name: existing.name,
state: targetState,
})
}
}
updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
},
done: (_data, context, get, set) => {
const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) return
updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
},
}
export async function applySseEvent(
rawData: SSEEvent,
context: ClientStreamingContext,
get: () => CopilotStore,
set: (next: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)) => void
): Promise<boolean> {
const normalizedEvent = normalizeSseEvent(rawData)
if (shouldSkipToolCallEvent(normalizedEvent) || shouldSkipToolResultEvent(normalizedEvent)) {
return true
}
const data = normalizedEvent
if (data.type === 'subagent_start') {
const startData = asRecord(data.data)
const toolCallId = startData.tool_call_id as string | undefined
if (toolCallId) {
context.subAgentParentToolCallId = toolCallId
const { toolCallsById } = get()
const parentToolCall = toolCallsById[toolCallId]
if (parentToolCall) {
const updatedToolCall: CopilotToolCall = {
...parentToolCall,
subAgentStreaming: true,
}
const updatedMap = { ...toolCallsById, [toolCallId]: updatedToolCall }
set({ toolCallsById: updatedMap })
}
logger.info('[SSE] Subagent session started', {
subagent: data.subagent,
parentToolCallId: toolCallId,
})
}
return true
}
if (data.type === 'subagent_end') {
const parentToolCallId = context.subAgentParentToolCallId
if (parentToolCallId) {
const { toolCallsById } = get()
const parentToolCall = toolCallsById[parentToolCallId]
if (parentToolCall) {
const updatedToolCall: CopilotToolCall = {
...parentToolCall,
subAgentContent: context.subAgentContent[parentToolCallId] || '',
subAgentToolCalls: context.subAgentToolCalls[parentToolCallId] ?? [],
subAgentBlocks: context.subAgentBlocks[parentToolCallId] ?? [],
subAgentStreaming: false,
}
const updatedMap = { ...toolCallsById, [parentToolCallId]: updatedToolCall }
set({ toolCallsById: updatedMap })
logger.info('[SSE] Subagent session ended', {
subagent: data.subagent,
parentToolCallId,
contentLength: context.subAgentContent[parentToolCallId]?.length || 0,
toolCallCount: context.subAgentToolCalls[parentToolCallId]?.length || 0,
})
}
}
context.subAgentParentToolCallId = undefined
return true
}
if (data.subagent) {
const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) {
logger.warn('[SSE] Subagent event without parent tool call ID', {
type: data.type,
subagent: data.subagent,
})
return true
}
logger.info('[SSE] Processing subagent event', {
type: data.type,
subagent: data.subagent,
parentToolCallId,
hasHandler: !!subAgentSSEHandlers[data.type],
})
const subAgentHandler = subAgentSSEHandlers[data.type]
if (subAgentHandler) {
await subAgentHandler(data, context, get, set)
} else {
logger.warn('[SSE] No handler for subagent event type', { type: data.type })
}
return !context.streamComplete
}
const handler = sseHandlers[data.type] || sseHandlers.default
await handler(data, context, get, set)
return !context.streamComplete
}

View File

@@ -0,0 +1,45 @@
import type {
ChatContext,
CopilotToolCall,
SubAgentContentBlock,
} from '@/stores/panel/copilot/types'
/**
* A content block used in copilot messages and during streaming.
* Uses a literal type union for `type` to stay compatible with CopilotMessage.
*/
export type ContentBlockType = 'text' | 'thinking' | 'tool_call' | 'contexts'
export interface ClientContentBlock {
type: ContentBlockType
content?: string
timestamp: number
toolCall?: CopilotToolCall | null
startTime?: number
duration?: number
contexts?: ChatContext[]
}
export interface StreamingContext {
messageId: string
accumulatedContent: string
contentBlocks: ClientContentBlock[]
currentTextBlock: ClientContentBlock | null
isInThinkingBlock: boolean
currentThinkingBlock: ClientContentBlock | null
isInDesignWorkflowBlock: boolean
designWorkflowContent: string
pendingContent: string
newChatId?: string
doneEventCount: number
streamComplete?: boolean
wasAborted?: boolean
suppressContinueOption?: boolean
subAgentParentToolCallId?: string
subAgentContent: Record<string, string>
subAgentToolCalls: Record<string, CopilotToolCall[]>
subAgentBlocks: Record<string, SubAgentContentBlock[]>
suppressStreamingUpdates?: boolean
}
export type ClientStreamingContext = StreamingContext

View File

@@ -12,7 +12,6 @@ const VALID_PROVIDER_IDS: readonly ProviderId[] = [
'openai', 'openai',
'azure-openai', 'azure-openai',
'anthropic', 'anthropic',
'azure-anthropic',
'google', 'google',
'deepseek', 'deepseek',
'xai', 'xai',
@@ -109,14 +108,14 @@ function parseBooleanEnv(value: string | undefined): boolean | null {
export const DEFAULT_COPILOT_CONFIG: CopilotConfig = { export const DEFAULT_COPILOT_CONFIG: CopilotConfig = {
chat: { chat: {
defaultProvider: 'anthropic', defaultProvider: 'anthropic',
defaultModel: 'claude-3-7-sonnet-latest', defaultModel: 'claude-4.6-opus',
temperature: 0.1, temperature: 0.1,
maxTokens: 8192, maxTokens: 8192,
systemPrompt: AGENT_MODE_SYSTEM_PROMPT, systemPrompt: AGENT_MODE_SYSTEM_PROMPT,
}, },
rag: { rag: {
defaultProvider: 'anthropic', defaultProvider: 'anthropic',
defaultModel: 'claude-3-7-sonnet-latest', defaultModel: 'claude-4.6-opus',
temperature: 0.1, temperature: 0.1,
maxTokens: 2000, maxTokens: 2000,
embeddingModel: 'text-embedding-3-small', embeddingModel: 'text-embedding-3-small',

View File

@@ -1,2 +1,115 @@
import { env } from '@/lib/core/config/env'
export const SIM_AGENT_API_URL_DEFAULT = 'https://copilot.sim.ai' export const SIM_AGENT_API_URL_DEFAULT = 'https://copilot.sim.ai'
export const SIM_AGENT_VERSION = '1.0.3' export const SIM_AGENT_VERSION = '1.0.3'
/** Resolved copilot backend URL — reads from env with fallback to default. */
const rawAgentUrl = env.SIM_AGENT_API_URL || SIM_AGENT_API_URL_DEFAULT
export const SIM_AGENT_API_URL =
rawAgentUrl.startsWith('http://') || rawAgentUrl.startsWith('https://')
? rawAgentUrl
: SIM_AGENT_API_URL_DEFAULT
// ---------------------------------------------------------------------------
// Redis key prefixes
// ---------------------------------------------------------------------------
/** Redis key prefix for tool call confirmation payloads (polled by waitForToolDecision). */
export const REDIS_TOOL_CALL_PREFIX = 'tool_call:'
/** Redis key prefix for copilot SSE stream buffers. */
export const REDIS_COPILOT_STREAM_PREFIX = 'copilot_stream:'
// ---------------------------------------------------------------------------
// Timeouts
// ---------------------------------------------------------------------------
/** Default timeout for the copilot orchestration stream loop (5 min). */
export const ORCHESTRATION_TIMEOUT_MS = 300_000
/** Timeout for the client-side streaming response handler (10 min). */
export const STREAM_TIMEOUT_MS = 600_000
/** TTL for Redis tool call confirmation entries (24 h). */
export const REDIS_TOOL_CALL_TTL_SECONDS = 86_400
// ---------------------------------------------------------------------------
// Tool decision polling
// ---------------------------------------------------------------------------
/** Initial poll interval when waiting for a user tool decision. */
export const TOOL_DECISION_INITIAL_POLL_MS = 100
/** Maximum poll interval when waiting for a user tool decision. */
export const TOOL_DECISION_MAX_POLL_MS = 3_000
/** Backoff multiplier for the tool decision poll interval. */
export const TOOL_DECISION_POLL_BACKOFF = 1.5
// ---------------------------------------------------------------------------
// Stream resume
// ---------------------------------------------------------------------------
/** Maximum number of resume attempts before giving up. */
export const MAX_RESUME_ATTEMPTS = 3
/** SessionStorage key for persisting active stream metadata across page reloads. */
export const STREAM_STORAGE_KEY = 'copilot_active_stream'
// ---------------------------------------------------------------------------
// Client-side streaming batching
// ---------------------------------------------------------------------------
/** Delay (ms) before processing the next queued message after stream completion. */
export const QUEUE_PROCESS_DELAY_MS = 100
/** Delay (ms) before invalidating subscription queries after stream completion. */
export const SUBSCRIPTION_INVALIDATE_DELAY_MS = 1_000
// ---------------------------------------------------------------------------
// UI helpers
// ---------------------------------------------------------------------------
/** Maximum character length for an optimistic chat title derived from a user message. */
export const OPTIMISTIC_TITLE_MAX_LENGTH = 50
// ---------------------------------------------------------------------------
// Copilot API paths (client-side fetch targets)
// ---------------------------------------------------------------------------
/** POST — send a chat message to the copilot. */
export const COPILOT_CHAT_API_PATH = '/api/copilot/chat'
/** GET — resume/replay a copilot SSE stream. */
export const COPILOT_CHAT_STREAM_API_PATH = '/api/copilot/chat/stream'
/** POST — persist chat messages / plan artifact / config. */
export const COPILOT_UPDATE_MESSAGES_API_PATH = '/api/copilot/chat/update-messages'
/** DELETE — delete a copilot chat. */
export const COPILOT_DELETE_CHAT_API_PATH = '/api/copilot/chat/delete'
/** POST — confirm or reject a tool call. */
export const COPILOT_CONFIRM_API_PATH = '/api/copilot/confirm'
/** POST — forward diff-accepted/rejected stats to the copilot backend. */
export const COPILOT_STATS_API_PATH = '/api/copilot/stats'
/** GET — load checkpoints for a chat. */
export const COPILOT_CHECKPOINTS_API_PATH = '/api/copilot/checkpoints'
/** POST — revert to a checkpoint. */
export const COPILOT_CHECKPOINTS_REVERT_API_PATH = '/api/copilot/checkpoints/revert'
/** GET/POST/DELETE — manage auto-allowed tools. */
export const COPILOT_AUTO_ALLOWED_TOOLS_API_PATH = '/api/copilot/auto-allowed-tools'
/** GET — fetch user credentials for masking. */
export const COPILOT_CREDENTIALS_API_PATH = '/api/copilot/credentials'
// ---------------------------------------------------------------------------
// Dedup limits
// ---------------------------------------------------------------------------
/** Maximum entries in the in-memory SSE tool-event dedup cache. */
export const STREAM_BUFFER_MAX_DEDUP_ENTRIES = 1_000

View File

@@ -0,0 +1,129 @@
import { createLogger } from '@sim/logger'
import { COPILOT_CHECKPOINTS_API_PATH } from '@/lib/copilot/constants'
import type { CopilotMessage, CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types'
import { mergeSubblockState } from '@/stores/workflows/utils'
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
import type { WorkflowState } from '@/stores/workflows/workflow/types'
const logger = createLogger('CopilotMessageCheckpoints')
export function buildCheckpointWorkflowState(workflowId: string): WorkflowState | null {
const rawState = useWorkflowStore.getState().getWorkflowState()
if (!rawState) return null
const blocksWithSubblockValues = mergeSubblockState(rawState.blocks, workflowId)
const filteredBlocks = Object.entries(blocksWithSubblockValues).reduce(
(acc, [blockId, block]) => {
if (block?.type && block?.name) {
acc[blockId] = {
...block,
id: block.id || blockId,
enabled: block.enabled !== undefined ? block.enabled : true,
horizontalHandles: block.horizontalHandles !== undefined ? block.horizontalHandles : true,
height: block.height !== undefined ? block.height : 90,
subBlocks: block.subBlocks ?? {},
outputs: block.outputs ?? {},
data: block.data ?? {},
position: block.position || { x: 0, y: 0 },
}
}
return acc
},
{} as WorkflowState['blocks']
)
return {
blocks: filteredBlocks,
edges: rawState.edges ?? [],
loops: rawState.loops ?? {},
parallels: rawState.parallels ?? {},
lastSaved: rawState.lastSaved || Date.now(),
deploymentStatuses: rawState.deploymentStatuses ?? {},
}
}
export async function saveMessageCheckpoint(
messageId: string,
get: () => CopilotStore,
set: (partial: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)) => void
): Promise<boolean> {
const { workflowId, currentChat, messageSnapshots, messageCheckpoints } = get()
if (!workflowId || !currentChat?.id) return false
const snapshot = messageSnapshots[messageId]
if (!snapshot) return false
const nextSnapshots = { ...messageSnapshots }
delete nextSnapshots[messageId]
set({ messageSnapshots: nextSnapshots })
try {
const response = await fetch(COPILOT_CHECKPOINTS_API_PATH, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
workflowId,
chatId: currentChat.id,
messageId,
workflowState: JSON.stringify(snapshot),
}),
})
if (!response.ok) {
throw new Error(`Failed to create checkpoint: ${response.statusText}`)
}
const result = await response.json()
const newCheckpoint = result.checkpoint
if (newCheckpoint) {
const existingCheckpoints = messageCheckpoints[messageId] ?? []
const updatedCheckpoints = {
...messageCheckpoints,
[messageId]: [newCheckpoint, ...existingCheckpoints],
}
set({ messageCheckpoints: updatedCheckpoints })
}
return true
} catch (error) {
logger.error('Failed to create checkpoint from snapshot:', error)
return false
}
}
export function extractToolCallsRecursively(
toolCall: CopilotToolCall,
map: Record<string, CopilotToolCall>
): void {
if (!toolCall?.id) return
map[toolCall.id] = toolCall
if (Array.isArray(toolCall.subAgentBlocks)) {
for (const block of toolCall.subAgentBlocks) {
if (block?.type === 'subagent_tool_call' && block.toolCall?.id) {
extractToolCallsRecursively(block.toolCall, map)
}
}
}
if (Array.isArray(toolCall.subAgentToolCalls)) {
for (const subTc of toolCall.subAgentToolCalls) {
extractToolCallsRecursively(subTc, map)
}
}
}
export function buildToolCallsById(messages: CopilotMessage[]): Record<string, CopilotToolCall> {
const toolCallsById: Record<string, CopilotToolCall> = {}
for (const msg of messages) {
if (msg.contentBlocks) {
for (const block of msg.contentBlocks) {
if (block?.type === 'tool_call' && block.toolCall?.id) {
extractToolCallsRecursively(block.toolCall, toolCallsById)
}
}
}
}
return toolCallsById
}

Some files were not shown because too many files have changed in this diff Show More