Compare commits

..

14 Commits

Author SHA1 Message Date
Siddharth Ganesan
3edb71f9b5 Checkpoint 2026-02-14 12:52:55 -08:00
Siddharth Ganesan
c0e76b374a Fix 2026-02-13 14:18:39 -08:00
Siddharth Ganesan
d12830abfe Checkpoint 2026-02-12 18:42:35 -08:00
Siddharth Ganesan
546c9c3c8a Fix 2026-02-12 17:55:49 -08:00
Siddharth Ganesan
17789c1df6 Fix 2026-02-12 17:28:58 -08:00
Siddharth Ganesan
541665e41a Checkpoint 2026-02-12 17:12:54 -08:00
Siddharth Ganesan
3d5336994b Checkpoitn 2026-02-12 15:20:09 -08:00
Siddharth Ganesan
311c4d38f3 Fix 2026-02-12 12:18:05 -08:00
Siddharth Ganesan
e7abcd34df Fix 2026-02-12 12:05:53 -08:00
Siddharth Ganesan
433552019e Checkpoint 2026-02-12 11:51:34 -08:00
Siddharth Ganesan
f733b8dd88 Checkpoint 2026-02-12 11:14:33 -08:00
Siddharth Ganesan
76bd405293 Checkpoint 2026-02-12 10:22:52 -08:00
Siddharth Ganesan
c22bd2caaa SSE interface 2026-02-11 18:22:26 -08:00
Siddharth Ganesan
462aa15341 Implement basic tooling for workflow.apply 2026-02-11 16:30:11 -08:00
196 changed files with 11424 additions and 19238 deletions

View File

@@ -1157,21 +1157,6 @@ export function AirweaveIcon(props: SVGProps<SVGSVGElement>) {
) )
} }
export function GoogleBooksIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 478.633 540.068'>
<path
fill='#1C51A4'
d='M449.059,218.231L245.519,99.538l-0.061,193.23c0.031,1.504-0.368,2.977-1.166,4.204c-0.798,1.258-1.565,1.995-2.915,2.547c-1.35,0.552-2.792,0.706-4.204,0.399c-1.412-0.307-2.7-1.043-3.713-2.117l-69.166-70.609l-69.381,70.179c-1.013,0.982-2.301,1.657-3.652,1.903c-1.381,0.246-2.792,0.092-4.081-0.491c-1.289-0.583-1.626-0.522-2.394-1.749c-0.767-1.197-1.197-2.608-1.197-4.081L85.031,6.007l-2.915-1.289C43.973-11.638,0,16.409,0,59.891v420.306c0,46.029,49.312,74.782,88.775,51.767l360.285-210.138C488.491,298.782,488.491,241.246,449.059,218.231z'
/>
<path
fill='#80D7FB'
d='M88.805,8.124c-2.179-1.289-4.419-2.363-6.659-3.345l0.123,288.663c0,1.442,0.43,2.854,1.197,4.081c0.767,1.197,1.872,2.148,3.161,2.731c1.289,0.583,2.7,0.736,4.081,0.491c1.381-0.246,2.639-0.921,3.652-1.903l69.749-69.688l69.811,69.749c1.013,1.074,2.301,1.81,3.713,2.117c1.412,0.307,2.884,0.153,4.204-0.399c1.319-0.552,2.455-1.565,3.253-2.792c0.798-1.258,1.197-2.731,1.166-4.204V99.998L88.805,8.124z'
/>
</svg>
)
}
export function GoogleDocsIcon(props: SVGProps<SVGSVGElement>) { export function GoogleDocsIcon(props: SVGProps<SVGSVGElement>) {
return ( return (
<svg <svg

View File

@@ -38,7 +38,6 @@ import {
GithubIcon, GithubIcon,
GitLabIcon, GitLabIcon,
GmailIcon, GmailIcon,
GoogleBooksIcon,
GoogleCalendarIcon, GoogleCalendarIcon,
GoogleDocsIcon, GoogleDocsIcon,
GoogleDriveIcon, GoogleDriveIcon,
@@ -173,7 +172,6 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
github_v2: GithubIcon, github_v2: GithubIcon,
gitlab: GitLabIcon, gitlab: GitLabIcon,
gmail_v2: GmailIcon, gmail_v2: GmailIcon,
google_books: GoogleBooksIcon,
google_calendar_v2: GoogleCalendarIcon, google_calendar_v2: GoogleCalendarIcon,
google_docs: GoogleDocsIcon, google_docs: GoogleDocsIcon,
google_drive: GoogleDriveIcon, google_drive: GoogleDriveIcon,

View File

@@ -1,96 +0,0 @@
---
title: Google Books
description: Search and retrieve book information
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="google_books"
color="#E0E0E0"
/>
## Usage Instructions
Search for books using the Google Books API. Find volumes by title, author, ISBN, or keywords, and retrieve detailed information about specific books including descriptions, ratings, and publication details.
## Tools
### `google_books_volume_search`
Search for books using the Google Books API
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Google Books API key |
| `query` | string | Yes | Search query. Supports special keywords: intitle:, inauthor:, inpublisher:, subject:, isbn: |
| `filter` | string | No | Filter results by availability \(partial, full, free-ebooks, paid-ebooks, ebooks\) |
| `printType` | string | No | Restrict to print type \(all, books, magazines\) |
| `orderBy` | string | No | Sort order \(relevance, newest\) |
| `startIndex` | number | No | Index of the first result to return \(for pagination\) |
| `maxResults` | number | No | Maximum number of results to return \(1-40\) |
| `langRestrict` | string | No | Restrict results to a specific language \(ISO 639-1 code\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `totalItems` | number | Total number of matching results |
| `volumes` | array | List of matching volumes |
| ↳ `id` | string | Volume ID |
| ↳ `title` | string | Book title |
| ↳ `subtitle` | string | Book subtitle |
| ↳ `authors` | array | List of authors |
| ↳ `publisher` | string | Publisher name |
| ↳ `publishedDate` | string | Publication date |
| ↳ `description` | string | Book description |
| ↳ `pageCount` | number | Number of pages |
| ↳ `categories` | array | Book categories |
| ↳ `averageRating` | number | Average rating \(1-5\) |
| ↳ `ratingsCount` | number | Number of ratings |
| ↳ `language` | string | Language code |
| ↳ `previewLink` | string | Link to preview on Google Books |
| ↳ `infoLink` | string | Link to info page |
| ↳ `thumbnailUrl` | string | Book cover thumbnail URL |
| ↳ `isbn10` | string | ISBN-10 identifier |
| ↳ `isbn13` | string | ISBN-13 identifier |
### `google_books_volume_details`
Get detailed information about a specific book volume
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Google Books API key |
| `volumeId` | string | Yes | The ID of the volume to retrieve |
| `projection` | string | No | Projection level \(full, lite\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `id` | string | Volume ID |
| `title` | string | Book title |
| `subtitle` | string | Book subtitle |
| `authors` | array | List of authors |
| `publisher` | string | Publisher name |
| `publishedDate` | string | Publication date |
| `description` | string | Book description |
| `pageCount` | number | Number of pages |
| `categories` | array | Book categories |
| `averageRating` | number | Average rating \(1-5\) |
| `ratingsCount` | number | Number of ratings |
| `language` | string | Language code |
| `previewLink` | string | Link to preview on Google Books |
| `infoLink` | string | Link to info page |
| `thumbnailUrl` | string | Book cover thumbnail URL |
| `isbn10` | string | ISBN-10 identifier |
| `isbn13` | string | ISBN-13 identifier |

View File

@@ -33,7 +33,6 @@
"github", "github",
"gitlab", "gitlab",
"gmail", "gmail",
"google_books",
"google_calendar", "google_calendar",
"google_docs", "google_docs",
"google_drive", "google_drive",

View File

@@ -71,7 +71,6 @@ Retrieve an object from an AWS S3 bucket
| --------- | ---- | -------- | ----------- | | --------- | ---- | -------- | ----------- |
| `accessKeyId` | string | Yes | Your AWS Access Key ID | | `accessKeyId` | string | Yes | Your AWS Access Key ID |
| `secretAccessKey` | string | Yes | Your AWS Secret Access Key | | `secretAccessKey` | string | Yes | Your AWS Secret Access Key |
| `region` | string | No | Optional region override when URL does not include region \(e.g., us-east-1, eu-west-1\) |
| `s3Uri` | string | Yes | S3 Object URL \(e.g., https://bucket.s3.region.amazonaws.com/path/to/file\) | | `s3Uri` | string | Yes | S3 Object URL \(e.g., https://bucket.s3.region.amazonaws.com/path/to/file\) |
#### Output #### Output

View File

@@ -79,7 +79,7 @@ Send messages to Slack channels or direct messages. Supports Slack mrkdwn format
| `channel` | string | No | Slack channel ID \(e.g., C1234567890\) | | `channel` | string | No | Slack channel ID \(e.g., C1234567890\) |
| `dmUserId` | string | No | Slack user ID for direct messages \(e.g., U1234567890\) | | `dmUserId` | string | No | Slack user ID for direct messages \(e.g., U1234567890\) |
| `text` | string | Yes | Message text to send \(supports Slack mrkdwn formatting\) | | `text` | string | Yes | Message text to send \(supports Slack mrkdwn formatting\) |
| `threadTs` | string | No | Thread timestamp to reply to \(creates thread reply\) | | `thread_ts` | string | No | Thread timestamp to reply to \(creates thread reply\) |
| `files` | file[] | No | Files to attach to the message | | `files` | file[] | No | Files to attach to the message |
#### Output #### Output

View File

@@ -13,7 +13,6 @@ BETTER_AUTH_URL=http://localhost:3000
# NextJS (Required) # NextJS (Required)
NEXT_PUBLIC_APP_URL=http://localhost:3000 NEXT_PUBLIC_APP_URL=http://localhost:3000
# INTERNAL_API_BASE_URL=http://sim-app.default.svc.cluster.local:3000 # Optional: internal URL for server-side /api self-calls; defaults to NEXT_PUBLIC_APP_URL
# Security (Required) # Security (Required)
ENCRYPTION_KEY=your_encryption_key # Use `openssl rand -hex 32` to generate, used to encrypt environment variables ENCRYPTION_KEY=your_encryption_key # Use `openssl rand -hex 32` to generate, used to encrypt environment variables

View File

@@ -1,7 +1,7 @@
import type { Artifact, Message, PushNotificationConfig, Task, TaskState } from '@a2a-js/sdk' import type { Artifact, Message, PushNotificationConfig, Task, TaskState } from '@a2a-js/sdk'
import { v4 as uuidv4 } from 'uuid' import { v4 as uuidv4 } from 'uuid'
import { generateInternalToken } from '@/lib/auth/internal' import { generateInternalToken } from '@/lib/auth/internal'
import { getInternalApiBaseUrl } from '@/lib/core/utils/urls' import { getBaseUrl } from '@/lib/core/utils/urls'
/** A2A v0.3 JSON-RPC method names */ /** A2A v0.3 JSON-RPC method names */
export const A2A_METHODS = { export const A2A_METHODS = {
@@ -118,7 +118,7 @@ export interface ExecuteRequestResult {
export async function buildExecuteRequest( export async function buildExecuteRequest(
config: ExecuteRequestConfig config: ExecuteRequestConfig
): Promise<ExecuteRequestResult> { ): Promise<ExecuteRequestResult> {
const url = `${getInternalApiBaseUrl()}/api/workflows/${config.workflowId}/execute` const url = `${getBaseUrl()}/api/workflows/${config.workflowId}/execute`
const headers: Record<string, string> = { 'Content-Type': 'application/json' } const headers: Record<string, string> = { 'Content-Type': 'application/json' }
let useInternalAuth = false let useInternalAuth = false

View File

@@ -1,187 +0,0 @@
/**
* POST /api/attribution
*
* Automatic UTM-based referral attribution.
*
* Reads the `sim_utm` cookie (set by proxy on auth pages), matches a campaign
* by UTM specificity, and atomically inserts an attribution record + applies
* bonus credits.
*
* Idempotent — the unique constraint on `userId` prevents double-attribution.
*/
import { db } from '@sim/db'
import { referralAttribution, referralCampaigns, userStats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { nanoid } from 'nanoid'
import { cookies } from 'next/headers'
import { NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { applyBonusCredits } from '@/lib/billing/credits/bonus'
const logger = createLogger('AttributionAPI')
const COOKIE_NAME = 'sim_utm'
const UtmCookieSchema = z.object({
utm_source: z.string().optional(),
utm_medium: z.string().optional(),
utm_campaign: z.string().optional(),
utm_content: z.string().optional(),
referrer_url: z.string().optional(),
landing_page: z.string().optional(),
created_at: z.string().optional(),
})
/**
* Finds the most specific active campaign matching the given UTM params.
* Null fields on a campaign act as wildcards. Ties broken by newest campaign.
*/
async function findMatchingCampaign(utmData: z.infer<typeof UtmCookieSchema>) {
const campaigns = await db
.select()
.from(referralCampaigns)
.where(eq(referralCampaigns.isActive, true))
let bestMatch: (typeof campaigns)[number] | null = null
let bestScore = -1
for (const campaign of campaigns) {
let score = 0
let mismatch = false
const fields = [
{ campaignVal: campaign.utmSource, utmVal: utmData.utm_source },
{ campaignVal: campaign.utmMedium, utmVal: utmData.utm_medium },
{ campaignVal: campaign.utmCampaign, utmVal: utmData.utm_campaign },
{ campaignVal: campaign.utmContent, utmVal: utmData.utm_content },
] as const
for (const { campaignVal, utmVal } of fields) {
if (campaignVal === null) continue
if (campaignVal === utmVal) {
score++
} else {
mismatch = true
break
}
}
if (!mismatch && score > 0) {
if (
score > bestScore ||
(score === bestScore &&
bestMatch &&
campaign.createdAt.getTime() > bestMatch.createdAt.getTime())
) {
bestScore = score
bestMatch = campaign
}
}
}
return bestMatch
}
export async function POST() {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const cookieStore = await cookies()
const utmCookie = cookieStore.get(COOKIE_NAME)
if (!utmCookie?.value) {
return NextResponse.json({ attributed: false, reason: 'no_utm_cookie' })
}
let utmData: z.infer<typeof UtmCookieSchema>
try {
let decoded: string
try {
decoded = decodeURIComponent(utmCookie.value)
} catch {
decoded = utmCookie.value
}
utmData = UtmCookieSchema.parse(JSON.parse(decoded))
} catch {
logger.warn('Failed to parse UTM cookie', { userId: session.user.id })
cookieStore.delete(COOKIE_NAME)
return NextResponse.json({ attributed: false, reason: 'invalid_cookie' })
}
const matchedCampaign = await findMatchingCampaign(utmData)
if (!matchedCampaign) {
cookieStore.delete(COOKIE_NAME)
return NextResponse.json({ attributed: false, reason: 'no_matching_campaign' })
}
const bonusAmount = Number(matchedCampaign.bonusCreditAmount)
let attributed = false
await db.transaction(async (tx) => {
const [existingStats] = await tx
.select({ id: userStats.id })
.from(userStats)
.where(eq(userStats.userId, session.user.id))
.limit(1)
if (!existingStats) {
await tx.insert(userStats).values({
id: nanoid(),
userId: session.user.id,
})
}
const result = await tx
.insert(referralAttribution)
.values({
id: nanoid(),
userId: session.user.id,
campaignId: matchedCampaign.id,
utmSource: utmData.utm_source || null,
utmMedium: utmData.utm_medium || null,
utmCampaign: utmData.utm_campaign || null,
utmContent: utmData.utm_content || null,
referrerUrl: utmData.referrer_url || null,
landingPage: utmData.landing_page || null,
bonusCreditAmount: bonusAmount.toString(),
})
.onConflictDoNothing({ target: referralAttribution.userId })
.returning({ id: referralAttribution.id })
if (result.length > 0) {
await applyBonusCredits(session.user.id, bonusAmount, tx)
attributed = true
}
})
if (attributed) {
logger.info('Referral attribution created and bonus credits applied', {
userId: session.user.id,
campaignId: matchedCampaign.id,
campaignName: matchedCampaign.name,
utmSource: utmData.utm_source,
utmCampaign: utmData.utm_campaign,
utmContent: utmData.utm_content,
bonusAmount,
})
} else {
logger.info('User already attributed, skipping', { userId: session.user.id })
}
cookieStore.delete(COOKIE_NAME)
return NextResponse.json({
attributed,
bonusAmount: attributed ? bonusAmount : undefined,
reason: attributed ? undefined : 'already_attributed',
})
} catch (error) {
logger.error('Attribution error', { error })
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
}
}

View File

@@ -4,10 +4,20 @@
* @vitest-environment node * @vitest-environment node
*/ */
import { databaseMock, loggerMock } from '@sim/testing' import { loggerMock } from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
vi.mock('@sim/db', () => databaseMock) vi.mock('@sim/db', () => ({
db: {
select: vi.fn().mockReturnThis(),
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockReturnValue([]),
update: vi.fn().mockReturnThis(),
set: vi.fn().mockReturnThis(),
orderBy: vi.fn().mockReturnThis(),
},
}))
vi.mock('@/lib/oauth/oauth', () => ({ vi.mock('@/lib/oauth/oauth', () => ({
refreshOAuthToken: vi.fn(), refreshOAuthToken: vi.fn(),
@@ -24,36 +34,13 @@ import {
refreshTokenIfNeeded, refreshTokenIfNeeded,
} from '@/app/api/auth/oauth/utils' } from '@/app/api/auth/oauth/utils'
const mockDb = db as any const mockDbTyped = db as any
const mockRefreshOAuthToken = refreshOAuthToken as any const mockRefreshOAuthToken = refreshOAuthToken as any
/**
* Creates a chainable mock for db.select() calls.
* Returns a nested chain: select() -> from() -> where() -> limit() / orderBy()
*/
function mockSelectChain(limitResult: unknown[]) {
const mockLimit = vi.fn().mockReturnValue(limitResult)
const mockOrderBy = vi.fn().mockReturnValue(limitResult)
const mockWhere = vi.fn().mockReturnValue({ limit: mockLimit, orderBy: mockOrderBy })
const mockFrom = vi.fn().mockReturnValue({ where: mockWhere })
mockDb.select.mockReturnValueOnce({ from: mockFrom })
return { mockFrom, mockWhere, mockLimit }
}
/**
* Creates a chainable mock for db.update() calls.
* Returns a nested chain: update() -> set() -> where()
*/
function mockUpdateChain() {
const mockWhere = vi.fn().mockResolvedValue({})
const mockSet = vi.fn().mockReturnValue({ where: mockWhere })
mockDb.update.mockReturnValueOnce({ set: mockSet })
return { mockSet, mockWhere }
}
describe('OAuth Utils', () => { describe('OAuth Utils', () => {
beforeEach(() => { beforeEach(() => {
vi.clearAllMocks() vi.clearAllMocks()
mockDbTyped.limit.mockReturnValue([])
}) })
afterEach(() => { afterEach(() => {
@@ -63,20 +50,20 @@ describe('OAuth Utils', () => {
describe('getCredential', () => { describe('getCredential', () => {
it('should return credential when found', async () => { it('should return credential when found', async () => {
const mockCredential = { id: 'credential-id', userId: 'test-user-id' } const mockCredential = { id: 'credential-id', userId: 'test-user-id' }
const { mockFrom, mockWhere, mockLimit } = mockSelectChain([mockCredential]) mockDbTyped.limit.mockReturnValueOnce([mockCredential])
const credential = await getCredential('request-id', 'credential-id', 'test-user-id') const credential = await getCredential('request-id', 'credential-id', 'test-user-id')
expect(mockDb.select).toHaveBeenCalled() expect(mockDbTyped.select).toHaveBeenCalled()
expect(mockFrom).toHaveBeenCalled() expect(mockDbTyped.from).toHaveBeenCalled()
expect(mockWhere).toHaveBeenCalled() expect(mockDbTyped.where).toHaveBeenCalled()
expect(mockLimit).toHaveBeenCalledWith(1) expect(mockDbTyped.limit).toHaveBeenCalledWith(1)
expect(credential).toEqual(mockCredential) expect(credential).toEqual(mockCredential)
}) })
it('should return undefined when credential is not found', async () => { it('should return undefined when credential is not found', async () => {
mockSelectChain([]) mockDbTyped.limit.mockReturnValueOnce([])
const credential = await getCredential('request-id', 'nonexistent-id', 'test-user-id') const credential = await getCredential('request-id', 'nonexistent-id', 'test-user-id')
@@ -115,12 +102,11 @@ describe('OAuth Utils', () => {
refreshToken: 'new-refresh-token', refreshToken: 'new-refresh-token',
}) })
mockUpdateChain()
const result = await refreshTokenIfNeeded('request-id', mockCredential, 'credential-id') const result = await refreshTokenIfNeeded('request-id', mockCredential, 'credential-id')
expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token') expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token')
expect(mockDb.update).toHaveBeenCalled() expect(mockDbTyped.update).toHaveBeenCalled()
expect(mockDbTyped.set).toHaveBeenCalled()
expect(result).toEqual({ accessToken: 'new-token', refreshed: true }) expect(result).toEqual({ accessToken: 'new-token', refreshed: true })
}) })
@@ -166,7 +152,7 @@ describe('OAuth Utils', () => {
providerId: 'google', providerId: 'google',
userId: 'test-user-id', userId: 'test-user-id',
} }
mockSelectChain([mockCredential]) mockDbTyped.limit.mockReturnValueOnce([mockCredential])
const token = await refreshAccessTokenIfNeeded('credential-id', 'test-user-id', 'request-id') const token = await refreshAccessTokenIfNeeded('credential-id', 'test-user-id', 'request-id')
@@ -183,8 +169,7 @@ describe('OAuth Utils', () => {
providerId: 'google', providerId: 'google',
userId: 'test-user-id', userId: 'test-user-id',
} }
mockSelectChain([mockCredential]) mockDbTyped.limit.mockReturnValueOnce([mockCredential])
mockUpdateChain()
mockRefreshOAuthToken.mockResolvedValueOnce({ mockRefreshOAuthToken.mockResolvedValueOnce({
accessToken: 'new-token', accessToken: 'new-token',
@@ -195,12 +180,13 @@ describe('OAuth Utils', () => {
const token = await refreshAccessTokenIfNeeded('credential-id', 'test-user-id', 'request-id') const token = await refreshAccessTokenIfNeeded('credential-id', 'test-user-id', 'request-id')
expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token') expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token')
expect(mockDb.update).toHaveBeenCalled() expect(mockDbTyped.update).toHaveBeenCalled()
expect(mockDbTyped.set).toHaveBeenCalled()
expect(token).toBe('new-token') expect(token).toBe('new-token')
}) })
it('should return null if credential not found', async () => { it('should return null if credential not found', async () => {
mockSelectChain([]) mockDbTyped.limit.mockReturnValueOnce([])
const token = await refreshAccessTokenIfNeeded('nonexistent-id', 'test-user-id', 'request-id') const token = await refreshAccessTokenIfNeeded('nonexistent-id', 'test-user-id', 'request-id')
@@ -216,7 +202,7 @@ describe('OAuth Utils', () => {
providerId: 'google', providerId: 'google',
userId: 'test-user-id', userId: 'test-user-id',
} }
mockSelectChain([mockCredential]) mockDbTyped.limit.mockReturnValueOnce([mockCredential])
mockRefreshOAuthToken.mockResolvedValueOnce(null) mockRefreshOAuthToken.mockResolvedValueOnce(null)

View File

@@ -1,145 +1,81 @@
import { db } from '@sim/db'
import { settings } from '@sim/db/schema'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import { authenticateCopilotRequestSessionOnly } from '@/lib/copilot/request-helpers'
import { env } from '@/lib/core/config/env'
const logger = createLogger('CopilotAutoAllowedToolsAPI') const logger = createLogger('CopilotAutoAllowedToolsAPI')
/** function copilotHeaders(): HeadersInit {
* GET - Fetch user's auto-allowed integration tools const headers: Record<string, string> = {
*/ 'Content-Type': 'application/json',
export async function GET() {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
const [userSettings] = await db
.select()
.from(settings)
.where(eq(settings.userId, userId))
.limit(1)
if (userSettings) {
const autoAllowedTools = (userSettings.copilotAutoAllowedTools as string[]) || []
return NextResponse.json({ autoAllowedTools })
}
await db.insert(settings).values({
id: userId,
userId,
copilotAutoAllowedTools: [],
})
return NextResponse.json({ autoAllowedTools: [] })
} catch (error) {
logger.error('Failed to fetch auto-allowed tools', { error })
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
} }
if (env.COPILOT_API_KEY) {
headers['x-api-key'] = env.COPILOT_API_KEY
}
return headers
} }
/**
* POST - Add a tool to the auto-allowed list
*/
export async function POST(request: NextRequest) {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
const body = await request.json()
if (!body.toolId || typeof body.toolId !== 'string') {
return NextResponse.json({ error: 'toolId must be a string' }, { status: 400 })
}
const toolId = body.toolId
const [existing] = await db.select().from(settings).where(eq(settings.userId, userId)).limit(1)
if (existing) {
const currentTools = (existing.copilotAutoAllowedTools as string[]) || []
if (!currentTools.includes(toolId)) {
const updatedTools = [...currentTools, toolId]
await db
.update(settings)
.set({
copilotAutoAllowedTools: updatedTools,
updatedAt: new Date(),
})
.where(eq(settings.userId, userId))
logger.info('Added tool to auto-allowed list', { userId, toolId })
return NextResponse.json({ success: true, autoAllowedTools: updatedTools })
}
return NextResponse.json({ success: true, autoAllowedTools: currentTools })
}
await db.insert(settings).values({
id: userId,
userId,
copilotAutoAllowedTools: [toolId],
})
logger.info('Created settings and added tool to auto-allowed list', { userId, toolId })
return NextResponse.json({ success: true, autoAllowedTools: [toolId] })
} catch (error) {
logger.error('Failed to add auto-allowed tool', { error })
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
}
}
/**
* DELETE - Remove a tool from the auto-allowed list
*/
export async function DELETE(request: NextRequest) { export async function DELETE(request: NextRequest) {
const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly()
if (!isAuthenticated || !userId) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const toolIdFromQuery = new URL(request.url).searchParams.get('toolId') || undefined
const toolIdFromBody = await request
.json()
.then((body) => (typeof body?.toolId === 'string' ? body.toolId : undefined))
.catch(() => undefined)
const toolId = toolIdFromBody || toolIdFromQuery
if (!toolId) {
return NextResponse.json({ error: 'toolId is required' }, { status: 400 })
}
try { try {
const session = await getSession() const res = await fetch(`${SIM_AGENT_API_URL}/api/tool-preferences/auto-allowed`, {
method: 'DELETE',
headers: copilotHeaders(),
body: JSON.stringify({
userId,
toolId,
}),
})
if (!session?.user?.id) { const payload = await res.json().catch(() => ({}))
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) if (!res.ok) {
logger.warn('Failed to remove auto-allowed tool via copilot backend', {
status: res.status,
userId,
toolId,
})
return NextResponse.json(
{
success: false,
error: payload?.error || 'Failed to remove auto-allowed tool',
autoAllowedTools: [],
},
{ status: res.status }
)
} }
const userId = session.user.id return NextResponse.json({
const { searchParams } = new URL(request.url) success: true,
const toolId = searchParams.get('toolId') autoAllowedTools: Array.isArray(payload?.autoAllowedTools) ? payload.autoAllowedTools : [],
})
if (!toolId) {
return NextResponse.json({ error: 'toolId query parameter is required' }, { status: 400 })
}
const [existing] = await db.select().from(settings).where(eq(settings.userId, userId)).limit(1)
if (existing) {
const currentTools = (existing.copilotAutoAllowedTools as string[]) || []
const updatedTools = currentTools.filter((t) => t !== toolId)
await db
.update(settings)
.set({
copilotAutoAllowedTools: updatedTools,
updatedAt: new Date(),
})
.where(eq(settings.userId, userId))
logger.info('Removed tool from auto-allowed list', { userId, toolId })
return NextResponse.json({ success: true, autoAllowedTools: updatedTools })
}
return NextResponse.json({ success: true, autoAllowedTools: [] })
} catch (error) { } catch (error) {
logger.error('Failed to remove auto-allowed tool', { error }) logger.error('Error removing auto-allowed tool', {
return NextResponse.json({ error: 'Internal server error' }, { status: 500 }) userId,
toolId,
error: error instanceof Error ? error.message : String(error),
})
return NextResponse.json(
{
success: false,
error: 'Failed to remove auto-allowed tool',
autoAllowedTools: [],
},
{ status: 500 }
)
} }
} }

View File

@@ -28,13 +28,24 @@ import { resolveWorkflowIdForUser } from '@/lib/workflows/utils'
const logger = createLogger('CopilotChatAPI') const logger = createLogger('CopilotChatAPI')
function truncateForLog(value: string, maxLength = 120): string {
if (!value || maxLength <= 0) return ''
return value.length <= maxLength ? value : `${value.slice(0, maxLength)}...`
}
async function requestChatTitleFromCopilot(params: { async function requestChatTitleFromCopilot(params: {
message: string message: string
model: string model: string
provider?: string provider?: string
}): Promise<string | null> { }): Promise<string | null> {
const { message, model, provider } = params const { message, model, provider } = params
if (!message || !model) return null if (!message || !model) {
logger.warn('Skipping chat title request because message/model is missing', {
hasMessage: !!message,
hasModel: !!model,
})
return null
}
const headers: Record<string, string> = { const headers: Record<string, string> = {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
@@ -44,6 +55,13 @@ async function requestChatTitleFromCopilot(params: {
} }
try { try {
logger.info('Requesting chat title from copilot backend', {
model,
provider: provider || null,
messageLength: message.length,
messagePreview: truncateForLog(message),
})
const response = await fetch(`${SIM_AGENT_API_URL}/api/generate-chat-title`, { const response = await fetch(`${SIM_AGENT_API_URL}/api/generate-chat-title`, {
method: 'POST', method: 'POST',
headers, headers,
@@ -63,10 +81,32 @@ async function requestChatTitleFromCopilot(params: {
return null return null
} }
const title = typeof payload?.title === 'string' ? payload.title.trim() : '' const rawTitle = typeof payload?.title === 'string' ? payload.title : ''
const title = rawTitle.trim()
logger.info('Received chat title response from copilot backend', {
status: response.status,
hasRawTitle: !!rawTitle,
rawTitle,
normalizedTitle: title,
messagePreview: truncateForLog(message),
})
if (!title) {
logger.warn('Copilot backend returned empty chat title', {
payload,
model,
provider: provider || null,
})
}
return title || null return title || null
} catch (error) { } catch (error) {
logger.error('Error generating chat title:', error) logger.error('Error generating chat title:', {
error,
model,
provider: provider || null,
messagePreview: truncateForLog(message),
})
return null return null
} }
} }
@@ -85,7 +125,7 @@ const ChatMessageSchema = z.object({
chatId: z.string().optional(), chatId: z.string().optional(),
workflowId: z.string().optional(), workflowId: z.string().optional(),
workflowName: z.string().optional(), workflowName: z.string().optional(),
model: z.string().optional().default('claude-opus-4-5'), model: z.string().optional().default('claude-opus-4-6'),
mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'), mode: z.enum(COPILOT_REQUEST_MODES).optional().default('agent'),
prefetch: z.boolean().optional(), prefetch: z.boolean().optional(),
createNewChat: z.boolean().optional().default(false), createNewChat: z.boolean().optional().default(false),
@@ -238,7 +278,8 @@ export async function POST(req: NextRequest) {
let currentChat: any = null let currentChat: any = null
let conversationHistory: any[] = [] let conversationHistory: any[] = []
let actualChatId = chatId let actualChatId = chatId
const selectedModel = model || 'claude-opus-4-5' let chatWasCreatedForRequest = false
const selectedModel = model || 'claude-opus-4-6'
if (chatId || createNewChat) { if (chatId || createNewChat) {
const chatResult = await resolveOrCreateChat({ const chatResult = await resolveOrCreateChat({
@@ -249,6 +290,7 @@ export async function POST(req: NextRequest) {
}) })
currentChat = chatResult.chat currentChat = chatResult.chat
actualChatId = chatResult.chatId || chatId actualChatId = chatResult.chatId || chatId
chatWasCreatedForRequest = chatResult.isNew
const history = buildConversationHistory( const history = buildConversationHistory(
chatResult.conversationHistory, chatResult.conversationHistory,
(chatResult.chat?.conversationId as string | undefined) || conversationId (chatResult.chat?.conversationId as string | undefined) || conversationId
@@ -256,6 +298,18 @@ export async function POST(req: NextRequest) {
conversationHistory = history.history conversationHistory = history.history
} }
const shouldGenerateTitleForRequest =
!!actualChatId &&
chatWasCreatedForRequest &&
!currentChat?.title &&
conversationHistory.length === 0
const titleGenerationParams = {
message,
model: selectedModel,
provider,
}
const effectiveMode = mode === 'agent' ? 'build' : mode const effectiveMode = mode === 'agent' ? 'build' : mode
const effectiveConversationId = const effectiveConversationId =
(currentChat?.conversationId as string | undefined) || conversationId (currentChat?.conversationId as string | undefined) || conversationId
@@ -348,10 +402,22 @@ export async function POST(req: NextRequest) {
await pushEvent({ type: 'chat_id', chatId: actualChatId }) await pushEvent({ type: 'chat_id', chatId: actualChatId })
} }
if (actualChatId && !currentChat?.title && conversationHistory.length === 0) { if (shouldGenerateTitleForRequest) {
requestChatTitleFromCopilot({ message, model: selectedModel, provider }) logger.info(`[${tracker.requestId}] Starting title generation for streaming response`, {
chatId: actualChatId,
model: titleGenerationParams.model,
provider: provider || null,
messageLength: message.length,
messagePreview: truncateForLog(message),
chatWasCreatedForRequest,
})
requestChatTitleFromCopilot(titleGenerationParams)
.then(async (title) => { .then(async (title) => {
if (title) { if (title) {
logger.info(`[${tracker.requestId}] Generated title for streaming response`, {
chatId: actualChatId,
title,
})
await db await db
.update(copilotChats) .update(copilotChats)
.set({ .set({
@@ -359,12 +425,30 @@ export async function POST(req: NextRequest) {
updatedAt: new Date(), updatedAt: new Date(),
}) })
.where(eq(copilotChats.id, actualChatId!)) .where(eq(copilotChats.id, actualChatId!))
await pushEvent({ type: 'title_updated', title }) await pushEvent({ type: 'title_updated', title, chatId: actualChatId })
logger.info(`[${tracker.requestId}] Emitted title_updated SSE event`, {
chatId: actualChatId,
title,
})
} else {
logger.warn(`[${tracker.requestId}] No title returned for streaming response`, {
chatId: actualChatId,
model: selectedModel,
})
} }
}) })
.catch((error) => { .catch((error) => {
logger.error(`[${tracker.requestId}] Title generation failed:`, error) logger.error(`[${tracker.requestId}] Title generation failed:`, error)
}) })
} else if (actualChatId && !chatWasCreatedForRequest) {
logger.info(
`[${tracker.requestId}] Skipping title generation because chat already exists`,
{
chatId: actualChatId,
model: titleGenerationParams.model,
provider: provider || null,
}
)
} }
try { try {
@@ -479,9 +563,9 @@ export async function POST(req: NextRequest) {
const updatedMessages = [...conversationHistory, userMessage, assistantMessage] const updatedMessages = [...conversationHistory, userMessage, assistantMessage]
// Start title generation in parallel if this is first message (non-streaming) // Start title generation in parallel if this is first message (non-streaming)
if (actualChatId && !currentChat.title && conversationHistory.length === 0) { if (shouldGenerateTitleForRequest) {
logger.info(`[${tracker.requestId}] Starting title generation for non-streaming response`) logger.info(`[${tracker.requestId}] Starting title generation for non-streaming response`)
requestChatTitleFromCopilot({ message, model: selectedModel, provider }) requestChatTitleFromCopilot(titleGenerationParams)
.then(async (title) => { .then(async (title) => {
if (title) { if (title) {
await db await db
@@ -492,11 +576,22 @@ export async function POST(req: NextRequest) {
}) })
.where(eq(copilotChats.id, actualChatId!)) .where(eq(copilotChats.id, actualChatId!))
logger.info(`[${tracker.requestId}] Generated and saved title: ${title}`) logger.info(`[${tracker.requestId}] Generated and saved title: ${title}`)
} else {
logger.warn(`[${tracker.requestId}] No title returned for non-streaming response`, {
chatId: actualChatId,
model: selectedModel,
})
} }
}) })
.catch((error) => { .catch((error) => {
logger.error(`[${tracker.requestId}] Title generation failed:`, error) logger.error(`[${tracker.requestId}] Title generation failed:`, error)
}) })
} else if (actualChatId && !chatWasCreatedForRequest) {
logger.info(`[${tracker.requestId}] Skipping title generation because chat already exists`, {
chatId: actualChatId,
model: titleGenerationParams.model,
provider: provider || null,
})
} }
// Update chat in database immediately (without blocking for title) // Update chat in database immediately (without blocking for title)

View File

@@ -18,9 +18,9 @@ describe('Copilot Checkpoints Revert API Route', () => {
setupCommonApiMocks() setupCommonApiMocks()
mockCryptoUuid() mockCryptoUuid()
// Mock getBaseUrl to return localhost for tests
vi.doMock('@/lib/core/utils/urls', () => ({ vi.doMock('@/lib/core/utils/urls', () => ({
getBaseUrl: vi.fn(() => 'http://localhost:3000'), getBaseUrl: vi.fn(() => 'http://localhost:3000'),
getInternalApiBaseUrl: vi.fn(() => 'http://localhost:3000'),
getBaseDomain: vi.fn(() => 'localhost:3000'), getBaseDomain: vi.fn(() => 'localhost:3000'),
getEmailDomain: vi.fn(() => 'localhost:3000'), getEmailDomain: vi.fn(() => 'localhost:3000'),
})) }))

View File

@@ -11,7 +11,7 @@ import {
createRequestTracker, createRequestTracker,
createUnauthorizedResponse, createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers' } from '@/lib/copilot/request-helpers'
import { getInternalApiBaseUrl } from '@/lib/core/utils/urls' import { getBaseUrl } from '@/lib/core/utils/urls'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils' import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
import { isUuidV4 } from '@/executor/constants' import { isUuidV4 } from '@/executor/constants'
@@ -99,7 +99,7 @@ export async function POST(request: NextRequest) {
} }
const stateResponse = await fetch( const stateResponse = await fetch(
`${getInternalApiBaseUrl()}/api/workflows/${checkpoint.workflowId}/state`, `${getBaseUrl()}/api/workflows/${checkpoint.workflowId}/state`,
{ {
method: 'PUT', method: 'PUT',
headers: { headers: {

View File

@@ -1,7 +1,11 @@
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { REDIS_TOOL_CALL_PREFIX, REDIS_TOOL_CALL_TTL_SECONDS } from '@/lib/copilot/constants' import {
REDIS_TOOL_CALL_PREFIX,
REDIS_TOOL_CALL_TTL_SECONDS,
SIM_AGENT_API_URL,
} from '@/lib/copilot/constants'
import { import {
authenticateCopilotRequestSessionOnly, authenticateCopilotRequestSessionOnly,
createBadRequestResponse, createBadRequestResponse,
@@ -10,6 +14,7 @@ import {
createUnauthorizedResponse, createUnauthorizedResponse,
type NotificationStatus, type NotificationStatus,
} from '@/lib/copilot/request-helpers' } from '@/lib/copilot/request-helpers'
import { env } from '@/lib/core/config/env'
import { getRedisClient } from '@/lib/core/config/redis' import { getRedisClient } from '@/lib/core/config/redis'
const logger = createLogger('CopilotConfirmAPI') const logger = createLogger('CopilotConfirmAPI')
@@ -21,6 +26,8 @@ const ConfirmationSchema = z.object({
errorMap: () => ({ message: 'Invalid notification status' }), errorMap: () => ({ message: 'Invalid notification status' }),
}), }),
message: z.string().optional(), // Optional message for background moves or additional context message: z.string().optional(), // Optional message for background moves or additional context
toolName: z.string().optional(),
remember: z.boolean().optional(),
}) })
/** /**
@@ -57,6 +64,44 @@ async function updateToolCallStatus(
} }
} }
async function saveAutoAllowedToolPreference(userId: string, toolName: string): Promise<boolean> {
const headers: Record<string, string> = {
'Content-Type': 'application/json',
}
if (env.COPILOT_API_KEY) {
headers['x-api-key'] = env.COPILOT_API_KEY
}
try {
const response = await fetch(`${SIM_AGENT_API_URL}/api/tool-preferences/auto-allowed`, {
method: 'POST',
headers,
body: JSON.stringify({
userId,
toolId: toolName,
}),
})
if (!response.ok) {
logger.warn('Failed to persist auto-allowed tool preference', {
userId,
toolName,
status: response.status,
})
return false
}
return true
} catch (error) {
logger.error('Error persisting auto-allowed tool preference', {
userId,
toolName,
error: error instanceof Error ? error.message : String(error),
})
return false
}
}
/** /**
* POST /api/copilot/confirm * POST /api/copilot/confirm
* Update tool call status (Accept/Reject) * Update tool call status (Accept/Reject)
@@ -74,7 +119,7 @@ export async function POST(req: NextRequest) {
} }
const body = await req.json() const body = await req.json()
const { toolCallId, status, message } = ConfirmationSchema.parse(body) const { toolCallId, status, message, toolName, remember } = ConfirmationSchema.parse(body)
// Update the tool call status in Redis // Update the tool call status in Redis
const updated = await updateToolCallStatus(toolCallId, status, message) const updated = await updateToolCallStatus(toolCallId, status, message)
@@ -90,14 +135,22 @@ export async function POST(req: NextRequest) {
return createBadRequestResponse('Failed to update tool call status or tool call not found') return createBadRequestResponse('Failed to update tool call status or tool call not found')
} }
const duration = tracker.getDuration() let rememberSaved = false
if (status === 'accepted' && remember === true && toolName && authenticatedUserId) {
rememberSaved = await saveAutoAllowedToolPreference(authenticatedUserId, toolName)
}
return NextResponse.json({ const response: Record<string, unknown> = {
success: true, success: true,
message: message || `Tool call ${toolCallId} has been ${status.toLowerCase()}`, message: message || `Tool call ${toolCallId} has been ${status.toLowerCase()}`,
toolCallId, toolCallId,
status, status,
}) }
if (remember === true) {
response.rememberSaved = rememberSaved
}
return NextResponse.json(response)
} catch (error) { } catch (error) {
const duration = tracker.getDuration() const duration = tracker.getDuration()

View File

@@ -4,12 +4,16 @@
* *
* @vitest-environment node * @vitest-environment node
*/ */
import { createEnvMock, databaseMock, loggerMock } from '@sim/testing' import { createEnvMock, createMockLogger } from '@sim/testing'
import { beforeEach, describe, expect, it, vi } from 'vitest' import { beforeEach, describe, expect, it, vi } from 'vitest'
const loggerMock = vi.hoisted(() => ({
createLogger: () => createMockLogger(),
}))
vi.mock('drizzle-orm') vi.mock('drizzle-orm')
vi.mock('@sim/logger', () => loggerMock) vi.mock('@sim/logger', () => loggerMock)
vi.mock('@sim/db', () => databaseMock) vi.mock('@sim/db')
vi.mock('@/lib/knowledge/documents/utils', () => ({ vi.mock('@/lib/knowledge/documents/utils', () => ({
retryWithExponentialBackoff: (fn: any) => fn(), retryWithExponentialBackoff: (fn: any) => fn(),
})) }))

View File

@@ -0,0 +1,89 @@
/**
* @vitest-environment node
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
describe('mcp copilot route manifest contract', () => {
const previousInternalSecret = process.env.INTERNAL_API_SECRET
const previousAgentUrl = process.env.SIM_AGENT_API_URL
const previousFetch = global.fetch
beforeEach(() => {
vi.resetModules()
process.env.INTERNAL_API_SECRET = 'x'.repeat(32)
process.env.SIM_AGENT_API_URL = 'https://copilot.sim.ai'
})
afterEach(() => {
vi.restoreAllMocks()
global.fetch = previousFetch
if (previousInternalSecret === undefined) {
delete process.env.INTERNAL_API_SECRET
} else {
process.env.INTERNAL_API_SECRET = previousInternalSecret
}
if (previousAgentUrl === undefined) {
delete process.env.SIM_AGENT_API_URL
} else {
process.env.SIM_AGENT_API_URL = previousAgentUrl
}
})
it('loads and caches tool manifest from copilot backend', async () => {
const payload = {
directTools: [
{
name: 'list_workspaces',
description: 'List workspaces',
inputSchema: { type: 'object', properties: {} },
toolId: 'list_user_workspaces',
},
],
subagentTools: [
{
name: 'sim_build',
description: 'Build workflows',
inputSchema: { type: 'object', properties: {} },
agentId: 'build',
},
],
generatedAt: '2026-02-12T00:00:00Z',
}
const fetchSpy = vi.spyOn(global, 'fetch').mockResolvedValue(
new Response(JSON.stringify(payload), {
status: 200,
headers: { 'Content-Type': 'application/json' },
})
)
const mod = await import('./route')
mod.clearMcpToolManifestCacheForTests()
const first = await mod.getMcpToolManifest()
const second = await mod.getMcpToolManifest()
expect(first).toEqual(payload)
expect(second).toEqual(payload)
expect(fetchSpy).toHaveBeenCalledTimes(1)
expect(fetchSpy.mock.calls[0]?.[0]).toBe('https://copilot.sim.ai/api/mcp/tools/manifest')
})
it('rejects invalid manifest payloads from copilot backend', async () => {
const fetchSpy = vi.spyOn(global, 'fetch').mockResolvedValue(
new Response(JSON.stringify({ tools: [] }), {
status: 200,
headers: { 'Content-Type': 'application/json' },
})
)
const mod = await import('./route')
mod.clearMcpToolManifestCacheForTests()
await expect(mod.fetchMcpToolManifestFromCopilot()).rejects.toThrow(
'invalid manifest payload from copilot'
)
expect(fetchSpy).toHaveBeenCalledTimes(1)
})
})

View File

@@ -28,7 +28,6 @@ import {
executeToolServerSide, executeToolServerSide,
prepareExecutionContext, prepareExecutionContext,
} from '@/lib/copilot/orchestrator/tool-executor' } from '@/lib/copilot/orchestrator/tool-executor'
import { DIRECT_TOOL_DEFS, SUBAGENT_TOOL_DEFS } from '@/lib/copilot/tools/mcp/definitions'
import { env } from '@/lib/core/config/env' import { env } from '@/lib/core/config/env'
import { RateLimiter } from '@/lib/core/rate-limiter' import { RateLimiter } from '@/lib/core/rate-limiter'
import { import {
@@ -38,7 +37,33 @@ import {
const logger = createLogger('CopilotMcpAPI') const logger = createLogger('CopilotMcpAPI')
const mcpRateLimiter = new RateLimiter() const mcpRateLimiter = new RateLimiter()
const DEFAULT_COPILOT_MODEL = 'claude-opus-4-5' const DEFAULT_COPILOT_MODEL = 'claude-opus-4-6'
const MCP_TOOL_MANIFEST_CACHE_TTL_MS = 60_000
type McpDirectToolDef = {
name: string
description: string
inputSchema: { type: 'object'; properties?: Record<string, unknown>; required?: string[] }
toolId: string
}
type McpSubagentToolDef = {
name: string
description: string
inputSchema: { type: 'object'; properties?: Record<string, unknown>; required?: string[] }
agentId: string
}
type McpToolManifest = {
directTools: McpDirectToolDef[]
subagentTools: McpSubagentToolDef[]
generatedAt?: string
}
let cachedMcpToolManifest: {
value: McpToolManifest
expiresAt: number
} | null = null
export const dynamic = 'force-dynamic' export const dynamic = 'force-dynamic'
export const runtime = 'nodejs' export const runtime = 'nodejs'
@@ -112,6 +137,58 @@ async function authenticateCopilotApiKey(apiKey: string): Promise<CopilotKeyAuth
} }
} }
export function isMcpToolManifest(value: unknown): value is McpToolManifest {
if (!value || typeof value !== 'object') return false
const payload = value as Record<string, unknown>
return Array.isArray(payload.directTools) && Array.isArray(payload.subagentTools)
}
export async function fetchMcpToolManifestFromCopilot(): Promise<McpToolManifest> {
const internalSecret = env.INTERNAL_API_SECRET
if (!internalSecret) {
throw new Error('INTERNAL_API_SECRET not configured')
}
const res = await fetch(`${SIM_AGENT_API_URL}/api/mcp/tools/manifest`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
'x-api-key': internalSecret,
},
signal: AbortSignal.timeout(10_000),
})
if (!res.ok) {
const bodyText = await res.text().catch(() => '')
throw new Error(`manifest fetch failed (${res.status}): ${bodyText || res.statusText}`)
}
const payload: unknown = await res.json()
if (!isMcpToolManifest(payload)) {
throw new Error('invalid manifest payload from copilot')
}
return payload
}
export async function getMcpToolManifest(): Promise<McpToolManifest> {
const now = Date.now()
if (cachedMcpToolManifest && cachedMcpToolManifest.expiresAt > now) {
return cachedMcpToolManifest.value
}
const manifest = await fetchMcpToolManifestFromCopilot()
cachedMcpToolManifest = {
value: manifest,
expiresAt: now + MCP_TOOL_MANIFEST_CACHE_TTL_MS,
}
return manifest
}
export function clearMcpToolManifestCacheForTests(): void {
cachedMcpToolManifest = null
}
/** /**
* MCP Server instructions that guide LLMs on how to use the Sim copilot tools. * MCP Server instructions that guide LLMs on how to use the Sim copilot tools.
* This is included in the initialize response to help external LLMs understand * This is included in the initialize response to help external LLMs understand
@@ -380,13 +457,15 @@ function buildMcpServer(abortSignal?: AbortSignal): Server {
) )
server.setRequestHandler(ListToolsRequestSchema, async () => { server.setRequestHandler(ListToolsRequestSchema, async () => {
const directTools = DIRECT_TOOL_DEFS.map((tool) => ({ const manifest = await getMcpToolManifest()
const directTools = manifest.directTools.map((tool) => ({
name: tool.name, name: tool.name,
description: tool.description, description: tool.description,
inputSchema: tool.inputSchema, inputSchema: tool.inputSchema,
})) }))
const subagentTools = SUBAGENT_TOOL_DEFS.map((tool) => ({ const subagentTools = manifest.subagentTools.map((tool) => ({
name: tool.name, name: tool.name,
description: tool.description, description: tool.description,
inputSchema: tool.inputSchema, inputSchema: tool.inputSchema,
@@ -455,12 +534,15 @@ function buildMcpServer(abortSignal?: AbortSignal): Server {
throw new McpError(ErrorCode.InvalidParams, 'Tool name required') throw new McpError(ErrorCode.InvalidParams, 'Tool name required')
} }
const manifest = await getMcpToolManifest()
const result = await handleToolsCall( const result = await handleToolsCall(
{ {
name: params.name, name: params.name,
arguments: params.arguments, arguments: params.arguments,
}, },
authResult.userId, authResult.userId,
manifest,
abortSignal abortSignal
) )
@@ -556,16 +638,17 @@ function trackMcpCopilotCall(userId: string): void {
async function handleToolsCall( async function handleToolsCall(
params: { name: string; arguments?: Record<string, unknown> }, params: { name: string; arguments?: Record<string, unknown> },
userId: string, userId: string,
manifest: McpToolManifest,
abortSignal?: AbortSignal abortSignal?: AbortSignal
): Promise<CallToolResult> { ): Promise<CallToolResult> {
const args = params.arguments || {} const args = params.arguments || {}
const directTool = DIRECT_TOOL_DEFS.find((tool) => tool.name === params.name) const directTool = manifest.directTools.find((tool) => tool.name === params.name)
if (directTool) { if (directTool) {
return handleDirectToolCall(directTool, args, userId) return handleDirectToolCall(directTool, args, userId)
} }
const subagentTool = SUBAGENT_TOOL_DEFS.find((tool) => tool.name === params.name) const subagentTool = manifest.subagentTools.find((tool) => tool.name === params.name)
if (subagentTool) { if (subagentTool) {
return handleSubagentToolCall(subagentTool, args, userId, abortSignal) return handleSubagentToolCall(subagentTool, args, userId, abortSignal)
} }
@@ -574,7 +657,7 @@ async function handleToolsCall(
} }
async function handleDirectToolCall( async function handleDirectToolCall(
toolDef: (typeof DIRECT_TOOL_DEFS)[number], toolDef: McpDirectToolDef,
args: Record<string, unknown>, args: Record<string, unknown>,
userId: string userId: string
): Promise<CallToolResult> { ): Promise<CallToolResult> {
@@ -711,7 +794,7 @@ async function handleBuildToolCall(
} }
async function handleSubagentToolCall( async function handleSubagentToolCall(
toolDef: (typeof SUBAGENT_TOOL_DEFS)[number], toolDef: McpSubagentToolDef,
args: Record<string, unknown>, args: Record<string, unknown>,
userId: string, userId: string,
abortSignal?: AbortSignal abortSignal?: AbortSignal

View File

@@ -72,7 +72,6 @@ describe('MCP Serve Route', () => {
})) }))
vi.doMock('@/lib/core/utils/urls', () => ({ vi.doMock('@/lib/core/utils/urls', () => ({
getBaseUrl: () => 'http://localhost:3000', getBaseUrl: () => 'http://localhost:3000',
getInternalApiBaseUrl: () => 'http://localhost:3000',
})) }))
vi.doMock('@/lib/core/execution-limits', () => ({ vi.doMock('@/lib/core/execution-limits', () => ({
getMaxExecutionTimeout: () => 10_000, getMaxExecutionTimeout: () => 10_000,

View File

@@ -22,7 +22,7 @@ import { type NextRequest, NextResponse } from 'next/server'
import { type AuthResult, checkHybridAuth } from '@/lib/auth/hybrid' import { type AuthResult, checkHybridAuth } from '@/lib/auth/hybrid'
import { generateInternalToken } from '@/lib/auth/internal' import { generateInternalToken } from '@/lib/auth/internal'
import { getMaxExecutionTimeout } from '@/lib/core/execution-limits' import { getMaxExecutionTimeout } from '@/lib/core/execution-limits'
import { getInternalApiBaseUrl } from '@/lib/core/utils/urls' import { getBaseUrl } from '@/lib/core/utils/urls'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils' import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
const logger = createLogger('WorkflowMcpServeAPI') const logger = createLogger('WorkflowMcpServeAPI')
@@ -285,7 +285,7 @@ async function handleToolsCall(
) )
} }
const executeUrl = `${getInternalApiBaseUrl()}/api/workflows/${tool.workflowId}/execute` const executeUrl = `${getBaseUrl()}/api/workflows/${tool.workflowId}/execute`
const headers: Record<string, string> = { 'Content-Type': 'application/json' } const headers: Record<string, string> = { 'Content-Type': 'application/json' }
if (publicServerOwnerId) { if (publicServerOwnerId) {

View File

@@ -1,170 +0,0 @@
/**
* POST /api/referral-code/redeem
*
* Redeem a referral/promo code to receive bonus credits.
*
* Body:
* - code: string — The referral code to redeem
*
* Response: { redeemed: boolean, bonusAmount?: number, error?: string }
*
* Constraints:
* - Enterprise users cannot redeem codes
* - One redemption per user, ever (unique constraint on userId)
* - One redemption per organization for team users (partial unique on organizationId)
*/
import { db } from '@sim/db'
import { referralAttribution, referralCampaigns, userStats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq } from 'drizzle-orm'
import { nanoid } from 'nanoid'
import { NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import { applyBonusCredits } from '@/lib/billing/credits/bonus'
const logger = createLogger('ReferralCodeRedemption')
const RedeemCodeSchema = z.object({
code: z.string().min(1, 'Code is required'),
})
export async function POST(request: Request) {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const body = await request.json()
const { code } = RedeemCodeSchema.parse(body)
const subscription = await getHighestPrioritySubscription(session.user.id)
if (subscription?.plan === 'enterprise') {
return NextResponse.json({
redeemed: false,
error: 'Enterprise accounts cannot redeem referral codes',
})
}
const isTeam = subscription?.plan === 'team'
const orgId = isTeam ? subscription.referenceId : null
const normalizedCode = code.trim().toUpperCase()
const [campaign] = await db
.select()
.from(referralCampaigns)
.where(and(eq(referralCampaigns.code, normalizedCode), eq(referralCampaigns.isActive, true)))
.limit(1)
if (!campaign) {
logger.info('Invalid code redemption attempt', {
userId: session.user.id,
code: normalizedCode,
})
return NextResponse.json({ error: 'Invalid or expired code' }, { status: 404 })
}
const [existingUserAttribution] = await db
.select({ id: referralAttribution.id })
.from(referralAttribution)
.where(eq(referralAttribution.userId, session.user.id))
.limit(1)
if (existingUserAttribution) {
return NextResponse.json({
redeemed: false,
error: 'You have already redeemed a code',
})
}
if (orgId) {
const [existingOrgAttribution] = await db
.select({ id: referralAttribution.id })
.from(referralAttribution)
.where(eq(referralAttribution.organizationId, orgId))
.limit(1)
if (existingOrgAttribution) {
return NextResponse.json({
redeemed: false,
error: 'A code has already been redeemed for your organization',
})
}
}
const bonusAmount = Number(campaign.bonusCreditAmount)
let redeemed = false
await db.transaction(async (tx) => {
const [existingStats] = await tx
.select({ id: userStats.id })
.from(userStats)
.where(eq(userStats.userId, session.user.id))
.limit(1)
if (!existingStats) {
await tx.insert(userStats).values({
id: nanoid(),
userId: session.user.id,
})
}
const result = await tx
.insert(referralAttribution)
.values({
id: nanoid(),
userId: session.user.id,
organizationId: orgId,
campaignId: campaign.id,
utmSource: null,
utmMedium: null,
utmCampaign: null,
utmContent: null,
referrerUrl: null,
landingPage: null,
bonusCreditAmount: bonusAmount.toString(),
})
.onConflictDoNothing()
.returning({ id: referralAttribution.id })
if (result.length > 0) {
await applyBonusCredits(session.user.id, bonusAmount, tx)
redeemed = true
}
})
if (redeemed) {
logger.info('Referral code redeemed', {
userId: session.user.id,
organizationId: orgId,
code: normalizedCode,
campaignId: campaign.id,
campaignName: campaign.name,
bonusAmount,
})
}
if (!redeemed) {
return NextResponse.json({
redeemed: false,
error: 'You have already redeemed a code',
})
}
return NextResponse.json({
redeemed: true,
bonusAmount,
})
} catch (error) {
if (error instanceof z.ZodError) {
return NextResponse.json({ error: error.errors[0].message }, { status: 400 })
}
logger.error('Referral code redemption error', { error })
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
}
}

View File

@@ -3,14 +3,17 @@
* *
* @vitest-environment node * @vitest-environment node
*/ */
import { databaseMock, loggerMock } from '@sim/testing' import { loggerMock } from '@sim/testing'
import { NextRequest } from 'next/server' import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
const { mockGetSession, mockAuthorizeWorkflowByWorkspacePermission } = vi.hoisted(() => ({ const { mockGetSession, mockAuthorizeWorkflowByWorkspacePermission, mockDbSelect, mockDbUpdate } =
mockGetSession: vi.fn(), vi.hoisted(() => ({
mockAuthorizeWorkflowByWorkspacePermission: vi.fn(), mockGetSession: vi.fn(),
})) mockAuthorizeWorkflowByWorkspacePermission: vi.fn(),
mockDbSelect: vi.fn(),
mockDbUpdate: vi.fn(),
}))
vi.mock('@/lib/auth', () => ({ vi.mock('@/lib/auth', () => ({
getSession: mockGetSession, getSession: mockGetSession,
@@ -20,7 +23,12 @@ vi.mock('@/lib/workflows/utils', () => ({
authorizeWorkflowByWorkspacePermission: mockAuthorizeWorkflowByWorkspacePermission, authorizeWorkflowByWorkspacePermission: mockAuthorizeWorkflowByWorkspacePermission,
})) }))
vi.mock('@sim/db', () => databaseMock) vi.mock('@sim/db', () => ({
db: {
select: mockDbSelect,
update: mockDbUpdate,
},
}))
vi.mock('@sim/db/schema', () => ({ vi.mock('@sim/db/schema', () => ({
workflow: { id: 'id', userId: 'userId', workspaceId: 'workspaceId' }, workflow: { id: 'id', userId: 'userId', workspaceId: 'workspaceId' },
@@ -51,9 +59,6 @@ function createParams(id: string): { params: Promise<{ id: string }> } {
return { params: Promise.resolve({ id }) } return { params: Promise.resolve({ id }) }
} }
const mockDbSelect = databaseMock.db.select as ReturnType<typeof vi.fn>
const mockDbUpdate = databaseMock.db.update as ReturnType<typeof vi.fn>
function mockDbChain(selectResults: unknown[][]) { function mockDbChain(selectResults: unknown[][]) {
let selectCallIndex = 0 let selectCallIndex = 0
mockDbSelect.mockImplementation(() => ({ mockDbSelect.mockImplementation(() => ({

View File

@@ -3,14 +3,17 @@
* *
* @vitest-environment node * @vitest-environment node
*/ */
import { databaseMock, loggerMock } from '@sim/testing' import { loggerMock } from '@sim/testing'
import { NextRequest } from 'next/server' import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
const { mockGetSession, mockAuthorizeWorkflowByWorkspacePermission } = vi.hoisted(() => ({ const { mockGetSession, mockAuthorizeWorkflowByWorkspacePermission, mockDbSelect } = vi.hoisted(
mockGetSession: vi.fn(), () => ({
mockAuthorizeWorkflowByWorkspacePermission: vi.fn(), mockGetSession: vi.fn(),
})) mockAuthorizeWorkflowByWorkspacePermission: vi.fn(),
mockDbSelect: vi.fn(),
})
)
vi.mock('@/lib/auth', () => ({ vi.mock('@/lib/auth', () => ({
getSession: mockGetSession, getSession: mockGetSession,
@@ -20,7 +23,11 @@ vi.mock('@/lib/workflows/utils', () => ({
authorizeWorkflowByWorkspacePermission: mockAuthorizeWorkflowByWorkspacePermission, authorizeWorkflowByWorkspacePermission: mockAuthorizeWorkflowByWorkspacePermission,
})) }))
vi.mock('@sim/db', () => databaseMock) vi.mock('@sim/db', () => ({
db: {
select: mockDbSelect,
},
}))
vi.mock('@sim/db/schema', () => ({ vi.mock('@sim/db/schema', () => ({
workflow: { id: 'id', userId: 'userId', workspaceId: 'workspaceId' }, workflow: { id: 'id', userId: 'userId', workspaceId: 'workspaceId' },
@@ -55,8 +62,6 @@ function createRequest(url: string): NextRequest {
return new NextRequest(new URL(url), { method: 'GET' }) return new NextRequest(new URL(url), { method: 'GET' })
} }
const mockDbSelect = databaseMock.db.select as ReturnType<typeof vi.fn>
function mockDbChain(results: any[]) { function mockDbChain(results: any[]) {
let callIndex = 0 let callIndex = 0
mockDbSelect.mockImplementation(() => ({ mockDbSelect.mockImplementation(() => ({

View File

@@ -6,7 +6,7 @@ import { type NextRequest, NextResponse } from 'next/server'
import { v4 as uuidv4 } from 'uuid' import { v4 as uuidv4 } from 'uuid'
import { getSession } from '@/lib/auth' import { getSession } from '@/lib/auth'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
import { getInternalApiBaseUrl } from '@/lib/core/utils/urls' import { getBaseUrl } from '@/lib/core/utils/urls'
import { import {
type RegenerateStateInput, type RegenerateStateInput,
regenerateWorkflowStateIds, regenerateWorkflowStateIds,
@@ -115,18 +115,15 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
// Step 3: Save the workflow state using the existing state endpoint (like imports do) // Step 3: Save the workflow state using the existing state endpoint (like imports do)
// Ensure variables in state are remapped for the new workflow as well // Ensure variables in state are remapped for the new workflow as well
const workflowStateWithVariables = { ...workflowState, variables: remappedVariables } const workflowStateWithVariables = { ...workflowState, variables: remappedVariables }
const stateResponse = await fetch( const stateResponse = await fetch(`${getBaseUrl()}/api/workflows/${newWorkflowId}/state`, {
`${getInternalApiBaseUrl()}/api/workflows/${newWorkflowId}/state`, method: 'PUT',
{ headers: {
method: 'PUT', 'Content-Type': 'application/json',
headers: { // Forward the session cookie for authentication
'Content-Type': 'application/json', cookie: request.headers.get('cookie') || '',
// Forward the session cookie for authentication },
cookie: request.headers.get('cookie') || '', body: JSON.stringify(workflowStateWithVariables),
}, })
body: JSON.stringify(workflowStateWithVariables),
}
)
if (!stateResponse.ok) { if (!stateResponse.ok) {
logger.error(`[${requestId}] Failed to save workflow state for template use`) logger.error(`[${requestId}] Failed to save workflow state for template use`)

View File

@@ -66,12 +66,6 @@
* Credits: * Credits:
* POST /api/v1/admin/credits - Issue credits to user (by userId or email) * POST /api/v1/admin/credits - Issue credits to user (by userId or email)
* *
* Referral Campaigns:
* GET /api/v1/admin/referral-campaigns - List campaigns (?active=true/false)
* POST /api/v1/admin/referral-campaigns - Create campaign
* GET /api/v1/admin/referral-campaigns/:id - Get campaign details
* PATCH /api/v1/admin/referral-campaigns/:id - Update campaign fields
*
* Access Control (Permission Groups): * Access Control (Permission Groups):
* GET /api/v1/admin/access-control - List permission groups (?organizationId=X) * GET /api/v1/admin/access-control - List permission groups (?organizationId=X)
* DELETE /api/v1/admin/access-control - Delete permission groups for org (?organizationId=X) * DELETE /api/v1/admin/access-control - Delete permission groups for org (?organizationId=X)
@@ -103,7 +97,6 @@ export type {
AdminOrganization, AdminOrganization,
AdminOrganizationBillingSummary, AdminOrganizationBillingSummary,
AdminOrganizationDetail, AdminOrganizationDetail,
AdminReferralCampaign,
AdminSeatAnalytics, AdminSeatAnalytics,
AdminSingleResponse, AdminSingleResponse,
AdminSubscription, AdminSubscription,
@@ -118,7 +111,6 @@ export type {
AdminWorkspaceMember, AdminWorkspaceMember,
DbMember, DbMember,
DbOrganization, DbOrganization,
DbReferralCampaign,
DbSubscription, DbSubscription,
DbUser, DbUser,
DbUserStats, DbUserStats,
@@ -147,7 +139,6 @@ export {
parseWorkflowVariables, parseWorkflowVariables,
toAdminFolder, toAdminFolder,
toAdminOrganization, toAdminOrganization,
toAdminReferralCampaign,
toAdminSubscription, toAdminSubscription,
toAdminUser, toAdminUser,
toAdminWorkflow, toAdminWorkflow,

View File

@@ -1,142 +0,0 @@
/**
* GET /api/v1/admin/referral-campaigns/:id
*
* Get a single referral campaign by ID.
*
* PATCH /api/v1/admin/referral-campaigns/:id
*
* Update campaign fields. All fields are optional.
*
* Body:
* - name: string (non-empty) - Campaign name
* - bonusCreditAmount: number (> 0) - Bonus credits in dollars
* - isActive: boolean - Enable/disable the campaign
* - code: string | null (min 6 chars, auto-uppercased, null to remove) - Redeemable code
* - utmSource: string | null - UTM source match (null = wildcard)
* - utmMedium: string | null - UTM medium match (null = wildcard)
* - utmCampaign: string | null - UTM campaign match (null = wildcard)
* - utmContent: string | null - UTM content match (null = wildcard)
*/
import { db } from '@sim/db'
import { referralCampaigns } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { withAdminAuthParams } from '@/app/api/v1/admin/middleware'
import {
badRequestResponse,
internalErrorResponse,
notFoundResponse,
singleResponse,
} from '@/app/api/v1/admin/responses'
import { toAdminReferralCampaign } from '@/app/api/v1/admin/types'
const logger = createLogger('AdminReferralCampaignDetailAPI')
interface RouteParams {
id: string
}
export const GET = withAdminAuthParams<RouteParams>(async (_, context) => {
try {
const { id: campaignId } = await context.params
const [campaign] = await db
.select()
.from(referralCampaigns)
.where(eq(referralCampaigns.id, campaignId))
.limit(1)
if (!campaign) {
return notFoundResponse('Campaign')
}
logger.info(`Admin API: Retrieved referral campaign ${campaignId}`)
return singleResponse(toAdminReferralCampaign(campaign, getBaseUrl()))
} catch (error) {
logger.error('Admin API: Failed to get referral campaign', { error })
return internalErrorResponse('Failed to get referral campaign')
}
})
export const PATCH = withAdminAuthParams<RouteParams>(async (request, context) => {
try {
const { id: campaignId } = await context.params
const body = await request.json()
const [existing] = await db
.select()
.from(referralCampaigns)
.where(eq(referralCampaigns.id, campaignId))
.limit(1)
if (!existing) {
return notFoundResponse('Campaign')
}
const updateData: Record<string, unknown> = { updatedAt: new Date() }
if (body.name !== undefined) {
if (typeof body.name !== 'string' || body.name.trim().length === 0) {
return badRequestResponse('name must be a non-empty string')
}
updateData.name = body.name.trim()
}
if (body.bonusCreditAmount !== undefined) {
if (
typeof body.bonusCreditAmount !== 'number' ||
!Number.isFinite(body.bonusCreditAmount) ||
body.bonusCreditAmount <= 0
) {
return badRequestResponse('bonusCreditAmount must be a positive number')
}
updateData.bonusCreditAmount = body.bonusCreditAmount.toString()
}
if (body.isActive !== undefined) {
if (typeof body.isActive !== 'boolean') {
return badRequestResponse('isActive must be a boolean')
}
updateData.isActive = body.isActive
}
if (body.code !== undefined) {
if (body.code !== null) {
if (typeof body.code !== 'string') {
return badRequestResponse('code must be a string or null')
}
if (body.code.trim().length < 6) {
return badRequestResponse('code must be at least 6 characters')
}
}
updateData.code = body.code ? body.code.trim().toUpperCase() : null
}
for (const field of ['utmSource', 'utmMedium', 'utmCampaign', 'utmContent'] as const) {
if (body[field] !== undefined) {
if (body[field] !== null && typeof body[field] !== 'string') {
return badRequestResponse(`${field} must be a string or null`)
}
updateData[field] = body[field] || null
}
}
const [updated] = await db
.update(referralCampaigns)
.set(updateData)
.where(eq(referralCampaigns.id, campaignId))
.returning()
logger.info(`Admin API: Updated referral campaign ${campaignId}`, {
fields: Object.keys(updateData).filter((k) => k !== 'updatedAt'),
})
return singleResponse(toAdminReferralCampaign(updated, getBaseUrl()))
} catch (error) {
logger.error('Admin API: Failed to update referral campaign', { error })
return internalErrorResponse('Failed to update referral campaign')
}
})

View File

@@ -1,140 +0,0 @@
/**
* GET /api/v1/admin/referral-campaigns
*
* List referral campaigns with optional filtering and pagination.
*
* Query Parameters:
* - active: string (optional) - Filter by active status ('true' or 'false')
* - limit: number (default: 50, max: 250)
* - offset: number (default: 0)
*
* POST /api/v1/admin/referral-campaigns
*
* Create a new referral campaign.
*
* Body:
* - name: string (required) - Campaign name
* - bonusCreditAmount: number (required, > 0) - Bonus credits in dollars
* - code: string | null (optional, min 6 chars, auto-uppercased) - Redeemable code
* - utmSource: string | null (optional) - UTM source match (null = wildcard)
* - utmMedium: string | null (optional) - UTM medium match (null = wildcard)
* - utmCampaign: string | null (optional) - UTM campaign match (null = wildcard)
* - utmContent: string | null (optional) - UTM content match (null = wildcard)
*/
import { db } from '@sim/db'
import { referralCampaigns } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { count, eq, type SQL } from 'drizzle-orm'
import { nanoid } from 'nanoid'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { withAdminAuth } from '@/app/api/v1/admin/middleware'
import {
badRequestResponse,
internalErrorResponse,
listResponse,
singleResponse,
} from '@/app/api/v1/admin/responses'
import {
type AdminReferralCampaign,
createPaginationMeta,
parsePaginationParams,
toAdminReferralCampaign,
} from '@/app/api/v1/admin/types'
const logger = createLogger('AdminReferralCampaignsAPI')
export const GET = withAdminAuth(async (request) => {
const url = new URL(request.url)
const { limit, offset } = parsePaginationParams(url)
const activeFilter = url.searchParams.get('active')
try {
const conditions: SQL<unknown>[] = []
if (activeFilter === 'true') {
conditions.push(eq(referralCampaigns.isActive, true))
} else if (activeFilter === 'false') {
conditions.push(eq(referralCampaigns.isActive, false))
}
const whereClause = conditions.length > 0 ? conditions[0] : undefined
const baseUrl = getBaseUrl()
const [countResult, campaigns] = await Promise.all([
db.select({ total: count() }).from(referralCampaigns).where(whereClause),
db
.select()
.from(referralCampaigns)
.where(whereClause)
.orderBy(referralCampaigns.createdAt)
.limit(limit)
.offset(offset),
])
const total = countResult[0].total
const data: AdminReferralCampaign[] = campaigns.map((c) => toAdminReferralCampaign(c, baseUrl))
const pagination = createPaginationMeta(total, limit, offset)
logger.info(`Admin API: Listed ${data.length} referral campaigns (total: ${total})`)
return listResponse(data, pagination)
} catch (error) {
logger.error('Admin API: Failed to list referral campaigns', { error })
return internalErrorResponse('Failed to list referral campaigns')
}
})
export const POST = withAdminAuth(async (request) => {
try {
const body = await request.json()
const { name, code, utmSource, utmMedium, utmCampaign, utmContent, bonusCreditAmount } = body
if (!name || typeof name !== 'string') {
return badRequestResponse('name is required and must be a string')
}
if (
typeof bonusCreditAmount !== 'number' ||
!Number.isFinite(bonusCreditAmount) ||
bonusCreditAmount <= 0
) {
return badRequestResponse('bonusCreditAmount must be a positive number')
}
if (code !== undefined && code !== null) {
if (typeof code !== 'string') {
return badRequestResponse('code must be a string or null')
}
if (code.trim().length < 6) {
return badRequestResponse('code must be at least 6 characters')
}
}
const id = nanoid()
const [campaign] = await db
.insert(referralCampaigns)
.values({
id,
name,
code: code ? code.trim().toUpperCase() : null,
utmSource: utmSource || null,
utmMedium: utmMedium || null,
utmCampaign: utmCampaign || null,
utmContent: utmContent || null,
bonusCreditAmount: bonusCreditAmount.toString(),
})
.returning()
logger.info(`Admin API: Created referral campaign ${id}`, {
name,
code: campaign.code,
bonusCreditAmount,
})
return singleResponse(toAdminReferralCampaign(campaign, getBaseUrl()))
} catch (error) {
logger.error('Admin API: Failed to create referral campaign', { error })
return internalErrorResponse('Failed to create referral campaign')
}
})

View File

@@ -8,7 +8,6 @@
import type { import type {
member, member,
organization, organization,
referralCampaigns,
subscription, subscription,
user, user,
userStats, userStats,
@@ -32,7 +31,6 @@ export type DbOrganization = InferSelectModel<typeof organization>
export type DbSubscription = InferSelectModel<typeof subscription> export type DbSubscription = InferSelectModel<typeof subscription>
export type DbMember = InferSelectModel<typeof member> export type DbMember = InferSelectModel<typeof member>
export type DbUserStats = InferSelectModel<typeof userStats> export type DbUserStats = InferSelectModel<typeof userStats>
export type DbReferralCampaign = InferSelectModel<typeof referralCampaigns>
// ============================================================================= // =============================================================================
// Pagination // Pagination
@@ -648,49 +646,3 @@ export interface AdminDeployResult {
export interface AdminUndeployResult { export interface AdminUndeployResult {
isDeployed: boolean isDeployed: boolean
} }
// =============================================================================
// Referral Campaign Types
// =============================================================================
export interface AdminReferralCampaign {
id: string
name: string
code: string | null
utmSource: string | null
utmMedium: string | null
utmCampaign: string | null
utmContent: string | null
bonusCreditAmount: string
isActive: boolean
signupUrl: string | null
createdAt: string
updatedAt: string
}
export function toAdminReferralCampaign(
dbCampaign: DbReferralCampaign,
baseUrl: string
): AdminReferralCampaign {
const utmParams = new URLSearchParams()
if (dbCampaign.utmSource) utmParams.set('utm_source', dbCampaign.utmSource)
if (dbCampaign.utmMedium) utmParams.set('utm_medium', dbCampaign.utmMedium)
if (dbCampaign.utmCampaign) utmParams.set('utm_campaign', dbCampaign.utmCampaign)
if (dbCampaign.utmContent) utmParams.set('utm_content', dbCampaign.utmContent)
const query = utmParams.toString()
return {
id: dbCampaign.id,
name: dbCampaign.name,
code: dbCampaign.code,
utmSource: dbCampaign.utmSource,
utmMedium: dbCampaign.utmMedium,
utmCampaign: dbCampaign.utmCampaign,
utmContent: dbCampaign.utmContent,
bonusCreditAmount: dbCampaign.bonusCreditAmount,
isActive: dbCampaign.isActive,
signupUrl: query ? `${baseUrl}/signup?${query}` : null,
createdAt: dbCampaign.createdAt.toISOString(),
updatedAt: dbCampaign.updatedAt.toISOString(),
}
}

View File

@@ -8,7 +8,7 @@ import { resolveWorkflowIdForUser } from '@/lib/workflows/utils'
import { authenticateV1Request } from '@/app/api/v1/auth' import { authenticateV1Request } from '@/app/api/v1/auth'
const logger = createLogger('CopilotHeadlessAPI') const logger = createLogger('CopilotHeadlessAPI')
const DEFAULT_COPILOT_MODEL = 'claude-opus-4-5' const DEFAULT_COPILOT_MODEL = 'claude-opus-4-6'
const RequestSchema = z.object({ const RequestSchema = z.object({
message: z.string().min(1, 'message is required'), message: z.string().min(1, 'message is required'),

View File

@@ -238,11 +238,6 @@ Use this context to calculate relative dates like "yesterday", "last week", "beg
finalSystemPrompt += currentTimeContext finalSystemPrompt += currentTimeContext
} }
if (generationType === 'cron-expression') {
finalSystemPrompt +=
'\n\nIMPORTANT: Return ONLY the raw cron expression (e.g., "0 9 * * 1-5"). Do NOT wrap it in markdown code blocks, backticks, or quotes. Do NOT include any explanation or text before or after the expression.'
}
if (generationType === 'json-object') { if (generationType === 'json-object') {
finalSystemPrompt += finalSystemPrompt +=
'\n\nIMPORTANT: Return ONLY the raw JSON object. Do NOT wrap it in markdown code blocks (no ```json or ```). Do NOT include any explanation or text before or after the JSON. The response must start with { and end with }.' '\n\nIMPORTANT: Return ONLY the raw JSON object. Do NOT wrap it in markdown code blocks (no ```json or ```). Do NOT include any explanation or text before or after the JSON. The response must start with { and end with }.'

View File

@@ -29,7 +29,7 @@ const patchBodySchema = z
description: z description: z
.string() .string()
.trim() .trim()
.max(2000, 'Description must be 2000 characters or less') .max(500, 'Description must be 500 characters or less')
.nullable() .nullable()
.optional(), .optional(),
isActive: z.literal(true).optional(), // Set to true to activate this version isActive: z.literal(true).optional(), // Set to true to activate this version

View File

@@ -12,7 +12,7 @@ import {
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
import { SSE_HEADERS } from '@/lib/core/utils/sse' import { SSE_HEADERS } from '@/lib/core/utils/sse'
import { getBaseUrl } from '@/lib/core/utils/urls' import { getBaseUrl } from '@/lib/core/utils/urls'
import { createExecutionEventWriter, setExecutionMeta } from '@/lib/execution/event-buffer' import { markExecutionCancelled } from '@/lib/execution/cancellation'
import { processInputFileFields } from '@/lib/execution/files' import { processInputFileFields } from '@/lib/execution/files'
import { preprocessExecution } from '@/lib/execution/preprocessing' import { preprocessExecution } from '@/lib/execution/preprocessing'
import { LoggingSession } from '@/lib/logs/execution/logging-session' import { LoggingSession } from '@/lib/logs/execution/logging-session'
@@ -700,27 +700,15 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
const timeoutController = createTimeoutAbortController(preprocessResult.executionTimeout?.sync) const timeoutController = createTimeoutAbortController(preprocessResult.executionTimeout?.sync)
let isStreamClosed = false let isStreamClosed = false
const eventWriter = createExecutionEventWriter(executionId)
setExecutionMeta(executionId, {
status: 'active',
userId: actorUserId,
workflowId,
}).catch(() => {})
const stream = new ReadableStream<Uint8Array>({ const stream = new ReadableStream<Uint8Array>({
async start(controller) { async start(controller) {
let finalMetaStatus: 'complete' | 'error' | 'cancelled' | null = null
const sendEvent = (event: ExecutionEvent) => { const sendEvent = (event: ExecutionEvent) => {
if (!isStreamClosed) { if (isStreamClosed) return
try {
controller.enqueue(encodeSSEEvent(event)) try {
} catch { controller.enqueue(encodeSSEEvent(event))
isStreamClosed = true } catch {
} isStreamClosed = true
}
if (event.type !== 'stream:chunk' && event.type !== 'stream:done') {
eventWriter.write(event).catch(() => {})
} }
} }
@@ -841,12 +829,14 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
const reader = streamingExec.stream.getReader() const reader = streamingExec.stream.getReader()
const decoder = new TextDecoder() const decoder = new TextDecoder()
let chunkCount = 0
try { try {
while (true) { while (true) {
const { done, value } = await reader.read() const { done, value } = await reader.read()
if (done) break if (done) break
chunkCount++
const chunk = decoder.decode(value, { stream: true }) const chunk = decoder.decode(value, { stream: true })
sendEvent({ sendEvent({
type: 'stream:chunk', type: 'stream:chunk',
@@ -961,7 +951,6 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
duration: result.metadata?.duration || 0, duration: result.metadata?.duration || 0,
}, },
}) })
finalMetaStatus = 'error'
} else { } else {
logger.info(`[${requestId}] Workflow execution was cancelled`) logger.info(`[${requestId}] Workflow execution was cancelled`)
@@ -974,7 +963,6 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
duration: result.metadata?.duration || 0, duration: result.metadata?.duration || 0,
}, },
}) })
finalMetaStatus = 'cancelled'
} }
return return
} }
@@ -998,7 +986,6 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
endTime: result.metadata?.endTime || new Date().toISOString(), endTime: result.metadata?.endTime || new Date().toISOString(),
}, },
}) })
finalMetaStatus = 'complete'
} catch (error: unknown) { } catch (error: unknown) {
const isTimeout = isTimeoutError(error) || timeoutController.isTimedOut() const isTimeout = isTimeoutError(error) || timeoutController.isTimedOut()
const errorMessage = isTimeout const errorMessage = isTimeout
@@ -1030,18 +1017,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
duration: executionResult?.metadata?.duration || 0, duration: executionResult?.metadata?.duration || 0,
}, },
}) })
finalMetaStatus = 'error'
} finally { } finally {
try {
await eventWriter.close()
} catch (closeError) {
logger.warn(`[${requestId}] Failed to close event writer`, {
error: closeError instanceof Error ? closeError.message : String(closeError),
})
}
if (finalMetaStatus) {
setExecutionMeta(executionId, { status: finalMetaStatus }).catch(() => {})
}
timeoutController.cleanup() timeoutController.cleanup()
if (executionId) { if (executionId) {
await cleanupExecutionBase64Cache(executionId) await cleanupExecutionBase64Cache(executionId)
@@ -1056,7 +1032,10 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
}, },
cancel() { cancel() {
isStreamClosed = true isStreamClosed = true
logger.info(`[${requestId}] Client disconnected from SSE stream`) timeoutController.cleanup()
logger.info(`[${requestId}] Client aborted SSE stream, signalling cancellation`)
timeoutController.abort()
markExecutionCancelled(executionId).catch(() => {})
}, },
}) })

View File

@@ -1,170 +0,0 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { checkHybridAuth } from '@/lib/auth/hybrid'
import { SSE_HEADERS } from '@/lib/core/utils/sse'
import {
type ExecutionStreamStatus,
getExecutionMeta,
readExecutionEvents,
} from '@/lib/execution/event-buffer'
import { formatSSEEvent } from '@/lib/workflows/executor/execution-events'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
const logger = createLogger('ExecutionStreamReconnectAPI')
const POLL_INTERVAL_MS = 500
const MAX_POLL_DURATION_MS = 10 * 60 * 1000 // 10 minutes
function isTerminalStatus(status: ExecutionStreamStatus): boolean {
return status === 'complete' || status === 'error' || status === 'cancelled'
}
export const runtime = 'nodejs'
export const dynamic = 'force-dynamic'
export async function GET(
req: NextRequest,
{ params }: { params: Promise<{ id: string; executionId: string }> }
) {
const { id: workflowId, executionId } = await params
try {
const auth = await checkHybridAuth(req, { requireWorkflowId: false })
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
const workflowAuthorization = await authorizeWorkflowByWorkspacePermission({
workflowId,
userId: auth.userId,
action: 'read',
})
if (!workflowAuthorization.allowed) {
return NextResponse.json(
{ error: workflowAuthorization.message || 'Access denied' },
{ status: workflowAuthorization.status }
)
}
const meta = await getExecutionMeta(executionId)
if (!meta) {
return NextResponse.json({ error: 'Execution buffer not found or expired' }, { status: 404 })
}
if (meta.workflowId && meta.workflowId !== workflowId) {
return NextResponse.json(
{ error: 'Execution does not belong to this workflow' },
{ status: 403 }
)
}
const fromParam = req.nextUrl.searchParams.get('from')
const parsed = fromParam ? Number.parseInt(fromParam, 10) : 0
const fromEventId = Number.isFinite(parsed) && parsed >= 0 ? parsed : 0
logger.info('Reconnection stream requested', {
workflowId,
executionId,
fromEventId,
metaStatus: meta.status,
})
const encoder = new TextEncoder()
let closed = false
const stream = new ReadableStream<Uint8Array>({
async start(controller) {
let lastEventId = fromEventId
const pollDeadline = Date.now() + MAX_POLL_DURATION_MS
const enqueue = (text: string) => {
if (closed) return
try {
controller.enqueue(encoder.encode(text))
} catch {
closed = true
}
}
try {
const events = await readExecutionEvents(executionId, lastEventId)
for (const entry of events) {
if (closed) return
enqueue(formatSSEEvent(entry.event))
lastEventId = entry.eventId
}
const currentMeta = await getExecutionMeta(executionId)
if (!currentMeta || isTerminalStatus(currentMeta.status)) {
enqueue('data: [DONE]\n\n')
if (!closed) controller.close()
return
}
while (!closed && Date.now() < pollDeadline) {
await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS))
if (closed) return
const newEvents = await readExecutionEvents(executionId, lastEventId)
for (const entry of newEvents) {
if (closed) return
enqueue(formatSSEEvent(entry.event))
lastEventId = entry.eventId
}
const polledMeta = await getExecutionMeta(executionId)
if (!polledMeta || isTerminalStatus(polledMeta.status)) {
const finalEvents = await readExecutionEvents(executionId, lastEventId)
for (const entry of finalEvents) {
if (closed) return
enqueue(formatSSEEvent(entry.event))
lastEventId = entry.eventId
}
enqueue('data: [DONE]\n\n')
if (!closed) controller.close()
return
}
}
if (!closed) {
logger.warn('Reconnection stream poll deadline reached', { executionId })
enqueue('data: [DONE]\n\n')
controller.close()
}
} catch (error) {
logger.error('Error in reconnection stream', {
executionId,
error: error instanceof Error ? error.message : String(error),
})
if (!closed) {
try {
controller.close()
} catch {}
}
}
},
cancel() {
closed = true
logger.info('Client disconnected from reconnection stream', { executionId })
},
})
return new NextResponse(stream, {
headers: {
...SSE_HEADERS,
'X-Execution-Id': executionId,
},
})
} catch (error: any) {
logger.error('Failed to start reconnection stream', {
workflowId,
executionId,
error: error.message,
})
return NextResponse.json(
{ error: error.message || 'Failed to start reconnection stream' },
{ status: 500 }
)
}
}

View File

@@ -5,7 +5,7 @@
* @vitest-environment node * @vitest-environment node
*/ */
import { loggerMock, setupGlobalFetchMock } from '@sim/testing' import { loggerMock } from '@sim/testing'
import { NextRequest } from 'next/server' import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
@@ -284,7 +284,9 @@ describe('Workflow By ID API Route', () => {
where: vi.fn().mockResolvedValue([{ id: 'workflow-123' }]), where: vi.fn().mockResolvedValue([{ id: 'workflow-123' }]),
}) })
setupGlobalFetchMock({ ok: true }) global.fetch = vi.fn().mockResolvedValue({
ok: true,
})
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123', { const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123', {
method: 'DELETE', method: 'DELETE',
@@ -329,7 +331,9 @@ describe('Workflow By ID API Route', () => {
where: vi.fn().mockResolvedValue([{ id: 'workflow-123' }]), where: vi.fn().mockResolvedValue([{ id: 'workflow-123' }]),
}) })
setupGlobalFetchMock({ ok: true }) global.fetch = vi.fn().mockResolvedValue({
ok: true,
})
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123', { const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123', {
method: 'DELETE', method: 'DELETE',

View File

@@ -14,6 +14,14 @@ const logger = createLogger('DiffControls')
const NOTIFICATION_WIDTH = 240 const NOTIFICATION_WIDTH = 240
const NOTIFICATION_GAP = 16 const NOTIFICATION_GAP = 16
function isWorkflowEditToolCall(name?: string, params?: Record<string, unknown>): boolean {
if (name !== 'workflow_change') return false
const mode = typeof params?.mode === 'string' ? params.mode.toLowerCase() : ''
if (mode === 'apply') return true
return typeof params?.proposalId === 'string' && params.proposalId.length > 0
}
export const DiffControls = memo(function DiffControls() { export const DiffControls = memo(function DiffControls() {
const isTerminalResizing = useTerminalStore((state) => state.isResizing) const isTerminalResizing = useTerminalStore((state) => state.isResizing)
const isPanelResizing = usePanelStore((state) => state.isResizing) const isPanelResizing = usePanelStore((state) => state.isResizing)
@@ -64,7 +72,7 @@ export const DiffControls = memo(function DiffControls() {
const b = blocks[bi] const b = blocks[bi]
if (b?.type === 'tool_call') { if (b?.type === 'tool_call') {
const tn = b.toolCall?.name const tn = b.toolCall?.name
if (tn === 'edit_workflow') { if (isWorkflowEditToolCall(tn, b.toolCall?.params)) {
id = b.toolCall?.id id = b.toolCall?.id
break outer break outer
} }
@@ -72,7 +80,9 @@ export const DiffControls = memo(function DiffControls() {
} }
} }
if (!id) { if (!id) {
const candidates = Object.values(toolCallsById).filter((t) => t.name === 'edit_workflow') const candidates = Object.values(toolCallsById).filter((t) =>
isWorkflowEditToolCall(t.name, t.params)
)
id = candidates.length ? candidates[candidates.length - 1].id : undefined id = candidates.length ? candidates[candidates.length - 1].id : undefined
} }
if (id) updatePreviewToolCallState('accepted', id) if (id) updatePreviewToolCallState('accepted', id)
@@ -102,7 +112,7 @@ export const DiffControls = memo(function DiffControls() {
const b = blocks[bi] const b = blocks[bi]
if (b?.type === 'tool_call') { if (b?.type === 'tool_call') {
const tn = b.toolCall?.name const tn = b.toolCall?.name
if (tn === 'edit_workflow') { if (isWorkflowEditToolCall(tn, b.toolCall?.params)) {
id = b.toolCall?.id id = b.toolCall?.id
break outer break outer
} }
@@ -110,7 +120,9 @@ export const DiffControls = memo(function DiffControls() {
} }
} }
if (!id) { if (!id) {
const candidates = Object.values(toolCallsById).filter((t) => t.name === 'edit_workflow') const candidates = Object.values(toolCallsById).filter((t) =>
isWorkflowEditToolCall(t.name, t.params)
)
id = candidates.length ? candidates[candidates.length - 1].id : undefined id = candidates.length ? candidates[candidates.length - 1].id : undefined
} }
if (id) updatePreviewToolCallState('rejected', id) if (id) updatePreviewToolCallState('rejected', id)

View File

@@ -47,6 +47,27 @@ interface ParsedTags {
cleanContent: string cleanContent: string
} }
function getToolCallParams(toolCall?: CopilotToolCall): Record<string, unknown> {
const candidate = ((toolCall as any)?.parameters ||
(toolCall as any)?.input ||
(toolCall as any)?.params ||
{}) as Record<string, unknown>
return candidate && typeof candidate === 'object' ? candidate : {}
}
function isWorkflowChangeApplyMode(toolCall?: CopilotToolCall): boolean {
if (!toolCall || toolCall.name !== 'workflow_change') return false
const params = getToolCallParams(toolCall)
const mode = typeof params.mode === 'string' ? params.mode.toLowerCase() : ''
if (mode === 'apply') return true
return typeof params.proposalId === 'string' && params.proposalId.length > 0
}
function isWorkflowEditSummaryTool(toolCall?: CopilotToolCall): boolean {
if (!toolCall) return false
return isWorkflowChangeApplyMode(toolCall)
}
/** /**
* Extracts plan steps from plan_respond tool calls in subagent blocks. * Extracts plan steps from plan_respond tool calls in subagent blocks.
* @param blocks - The subagent content blocks to search * @param blocks - The subagent content blocks to search
@@ -871,7 +892,10 @@ const SubagentContentRenderer = memo(function SubagentContentRenderer({
) )
} }
if (segment.type === 'tool' && segment.block.toolCall) { if (segment.type === 'tool' && segment.block.toolCall) {
if (toolCall.name === 'edit' && segment.block.toolCall.name === 'edit_workflow') { if (
(toolCall.name === 'edit' || toolCall.name === 'build') &&
isWorkflowEditSummaryTool(segment.block.toolCall)
) {
return ( return (
<div key={`tool-${segment.block.toolCall.id || index}`}> <div key={`tool-${segment.block.toolCall.id || index}`}>
<WorkflowEditSummary toolCall={segment.block.toolCall} /> <WorkflowEditSummary toolCall={segment.block.toolCall} />
@@ -968,12 +992,11 @@ const WorkflowEditSummary = memo(function WorkflowEditSummary({
} }
}, [blocks]) }, [blocks])
if (toolCall.name !== 'edit_workflow') { if (!isWorkflowEditSummaryTool(toolCall)) {
return null return null
} }
const params = const params = getToolCallParams(toolCall)
(toolCall as any).parameters || (toolCall as any).input || (toolCall as any).params || {}
let operations = Array.isArray(params.operations) ? params.operations : [] let operations = Array.isArray(params.operations) ? params.operations : []
if (operations.length === 0 && Array.isArray((toolCall as any).operations)) { if (operations.length === 0 && Array.isArray((toolCall as any).operations)) {
@@ -1219,11 +1242,6 @@ const WorkflowEditSummary = memo(function WorkflowEditSummary({
) )
}) })
/** Checks if a tool is server-side executed (not a client tool) */
function isIntegrationTool(toolName: string): boolean {
return !TOOL_DISPLAY_REGISTRY[toolName]
}
function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean { function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean {
if (!toolCall.name || toolCall.name === 'unknown_tool') { if (!toolCall.name || toolCall.name === 'unknown_tool') {
return false return false
@@ -1233,59 +1251,96 @@ function shouldShowRunSkipButtons(toolCall: CopilotToolCall): boolean {
return false return false
} }
// Never show buttons for tools the user has marked as always-allowed if (toolCall.ui?.showInterrupt !== true) {
if (useCopilotStore.getState().isToolAutoAllowed(toolCall.name)) {
return false return false
} }
const hasInterrupt = !!TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.interrupt return true
if (hasInterrupt) {
return true
}
// Integration tools (user-installed) always require approval
if (isIntegrationTool(toolCall.name)) {
return true
}
return false
} }
const toolCallLogger = createLogger('CopilotToolCall') const toolCallLogger = createLogger('CopilotToolCall')
async function sendToolDecision( async function sendToolDecision(
toolCallId: string, toolCallId: string,
status: 'accepted' | 'rejected' | 'background' status: 'accepted' | 'rejected' | 'background',
options?: {
toolName?: string
remember?: boolean
}
) { ) {
try { try {
await fetch('/api/copilot/confirm', { await fetch('/api/copilot/confirm', {
method: 'POST', method: 'POST',
headers: { 'Content-Type': 'application/json' }, headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ toolCallId, status }), body: JSON.stringify({
toolCallId,
status,
...(options?.toolName ? { toolName: options.toolName } : {}),
...(options?.remember ? { remember: true } : {}),
}),
}) })
} catch (error) { } catch (error) {
toolCallLogger.warn('Failed to send tool decision', { toolCallLogger.warn('Failed to send tool decision', {
toolCallId, toolCallId,
status, status,
remember: options?.remember === true,
toolName: options?.toolName,
error: error instanceof Error ? error.message : String(error), error: error instanceof Error ? error.message : String(error),
}) })
} }
} }
async function removeAutoAllowedToolPreference(toolName: string): Promise<boolean> {
try {
const response = await fetch(`/api/copilot/auto-allowed-tools?toolId=${encodeURIComponent(toolName)}`, {
method: 'DELETE',
})
return response.ok
} catch (error) {
toolCallLogger.warn('Failed to remove auto-allowed tool preference', {
toolName,
error: error instanceof Error ? error.message : String(error),
})
return false
}
}
type ToolUiAction = NonNullable<NonNullable<CopilotToolCall['ui']>['actions']>[number]
function actionDecision(action: ToolUiAction): 'accepted' | 'rejected' | 'background' {
const id = action.id.toLowerCase()
if (id.includes('background')) return 'background'
if (action.kind === 'reject') return 'rejected'
return 'accepted'
}
function isClientRunCapability(toolCall: CopilotToolCall): boolean {
if (toolCall.execution?.target === 'sim_client_capability') {
return toolCall.execution.capabilityId === 'workflow.run' || !toolCall.execution.capabilityId
}
return CLIENT_EXECUTABLE_RUN_TOOLS.has(toolCall.name)
}
async function handleRun( async function handleRun(
toolCall: CopilotToolCall, toolCall: CopilotToolCall,
setToolCallState: any, setToolCallState: any,
onStateChange?: any, onStateChange?: any,
editedParams?: any editedParams?: any,
options?: {
remember?: boolean
}
) { ) {
setToolCallState(toolCall, 'executing', editedParams ? { params: editedParams } : undefined) setToolCallState(toolCall, 'executing', editedParams ? { params: editedParams } : undefined)
onStateChange?.('executing') onStateChange?.('executing')
await sendToolDecision(toolCall.id, 'accepted') await sendToolDecision(toolCall.id, 'accepted', {
toolName: toolCall.name,
remember: options?.remember === true,
})
// Client-executable run tools: execute on the client for real-time feedback // Client-executable run tools: execute on the client for real-time feedback
// (block pulsing, console logs, stop button). The server defers execution // (block pulsing, console logs, stop button). The server defers execution
// for these tools; the client reports back via mark-complete. // for these tools; the client reports back via mark-complete.
if (CLIENT_EXECUTABLE_RUN_TOOLS.has(toolCall.name)) { if (isClientRunCapability(toolCall)) {
const params = editedParams || toolCall.params || {} const params = editedParams || toolCall.params || {}
executeRunToolOnClient(toolCall.id, toolCall.name, params) executeRunToolOnClient(toolCall.id, toolCall.name, params)
} }
@@ -1298,6 +1353,9 @@ async function handleSkip(toolCall: CopilotToolCall, setToolCallState: any, onSt
} }
function getDisplayName(toolCall: CopilotToolCall): string { function getDisplayName(toolCall: CopilotToolCall): string {
if (toolCall.ui?.phaseLabel) return toolCall.ui.phaseLabel
if (toolCall.ui?.title) return `${getStateVerb(toolCall.state)} ${toolCall.ui.title}`
const fromStore = (toolCall as any).display?.text const fromStore = (toolCall as any).display?.text
if (fromStore) return fromStore if (fromStore) return fromStore
const registryEntry = TOOL_DISPLAY_REGISTRY[toolCall.name] const registryEntry = TOOL_DISPLAY_REGISTRY[toolCall.name]
@@ -1342,53 +1400,37 @@ function RunSkipButtons({
toolCall, toolCall,
onStateChange, onStateChange,
editedParams, editedParams,
actions,
}: { }: {
toolCall: CopilotToolCall toolCall: CopilotToolCall
onStateChange?: (state: any) => void onStateChange?: (state: any) => void
editedParams?: any editedParams?: any
actions: ToolUiAction[]
}) { }) {
const [isProcessing, setIsProcessing] = useState(false) const [isProcessing, setIsProcessing] = useState(false)
const [buttonsHidden, setButtonsHidden] = useState(false) const [buttonsHidden, setButtonsHidden] = useState(false)
const actionInProgressRef = useRef(false) const actionInProgressRef = useRef(false)
const { setToolCallState, addAutoAllowedTool } = useCopilotStore() const { setToolCallState } = useCopilotStore()
const onRun = async () => { const onAction = async (action: ToolUiAction) => {
// Prevent race condition - check ref synchronously // Prevent race condition - check ref synchronously
if (actionInProgressRef.current) return if (actionInProgressRef.current) return
actionInProgressRef.current = true actionInProgressRef.current = true
setIsProcessing(true) setIsProcessing(true)
setButtonsHidden(true) setButtonsHidden(true)
try { try {
await handleRun(toolCall, setToolCallState, onStateChange, editedParams) const decision = actionDecision(action)
} finally { if (decision === 'accepted') {
setIsProcessing(false) await handleRun(toolCall, setToolCallState, onStateChange, editedParams, {
actionInProgressRef.current = false remember: action.remember === true,
} })
} } else if (decision === 'rejected') {
await handleSkip(toolCall, setToolCallState, onStateChange)
const onAlwaysAllow = async () => { } else {
// Prevent race condition - check ref synchronously setToolCallState(toolCall, ClientToolCallState.background)
if (actionInProgressRef.current) return onStateChange?.('background')
actionInProgressRef.current = true await sendToolDecision(toolCall.id, 'background')
setIsProcessing(true) }
setButtonsHidden(true)
try {
await addAutoAllowedTool(toolCall.name)
await handleRun(toolCall, setToolCallState, onStateChange, editedParams)
} finally {
setIsProcessing(false)
actionInProgressRef.current = false
}
}
const onSkip = async () => {
// Prevent race condition - check ref synchronously
if (actionInProgressRef.current) return
actionInProgressRef.current = true
setIsProcessing(true)
setButtonsHidden(true)
try {
await handleSkip(toolCall, setToolCallState, onStateChange)
} finally { } finally {
setIsProcessing(false) setIsProcessing(false)
actionInProgressRef.current = false actionInProgressRef.current = false
@@ -1397,23 +1439,22 @@ function RunSkipButtons({
if (buttonsHidden) return null if (buttonsHidden) return null
// Show "Always Allow" for all tools that require confirmation
const showAlwaysAllow = true
// Standardized buttons for all interrupt tools: Allow, Always Allow, Skip
return ( return (
<div className='mt-[10px] flex gap-[6px]'> <div className='mt-[10px] flex gap-[6px]'>
<Button onClick={onRun} disabled={isProcessing} variant='tertiary'> {actions.map((action, index) => {
{isProcessing ? 'Allowing...' : 'Allow'} const variant =
</Button> action.kind === 'reject' ? 'default' : action.remember ? 'default' : 'tertiary'
{showAlwaysAllow && ( return (
<Button onClick={onAlwaysAllow} disabled={isProcessing} variant='default'> <Button
{isProcessing ? 'Allowing...' : 'Always Allow'} key={action.id}
</Button> onClick={() => onAction(action)}
)} disabled={isProcessing}
<Button onClick={onSkip} disabled={isProcessing} variant='default'> variant={variant}
Skip >
</Button> {isProcessing && index === 0 ? 'Working...' : action.label}
</Button>
)
})}
</div> </div>
) )
} }
@@ -1430,10 +1471,16 @@ export function ToolCall({
const liveToolCall = useCopilotStore((s) => const liveToolCall = useCopilotStore((s) =>
effectiveId ? s.toolCallsById[effectiveId] : undefined effectiveId ? s.toolCallsById[effectiveId] : undefined
) )
const toolCall = liveToolCall || toolCallProp const rawToolCall = liveToolCall || toolCallProp
const hasRealToolCall = !!rawToolCall
// Guard: nothing to render without a toolCall const toolCall: CopilotToolCall =
if (!toolCall) return null rawToolCall ||
({
id: effectiveId || '',
name: '',
state: ClientToolCallState.generating,
params: {},
} as CopilotToolCall)
const isExpandablePending = const isExpandablePending =
toolCall?.state === 'pending' && toolCall?.state === 'pending' &&
@@ -1441,17 +1488,15 @@ export function ToolCall({
const [expanded, setExpanded] = useState(isExpandablePending) const [expanded, setExpanded] = useState(isExpandablePending)
const [showRemoveAutoAllow, setShowRemoveAutoAllow] = useState(false) const [showRemoveAutoAllow, setShowRemoveAutoAllow] = useState(false)
const [autoAllowRemovedForCall, setAutoAllowRemovedForCall] = useState(false)
// State for editable parameters // State for editable parameters
const params = (toolCall as any).parameters || (toolCall as any).input || toolCall.params || {} const params = (toolCall as any).parameters || (toolCall as any).input || toolCall.params || {}
const [editedParams, setEditedParams] = useState(params) const [editedParams, setEditedParams] = useState(params)
const paramsRef = useRef(params) const paramsRef = useRef(params)
// Check if this integration tool is auto-allowed const { setToolCallState } = useCopilotStore()
const { removeAutoAllowedTool, setToolCallState } = useCopilotStore() const isAutoAllowed = toolCall.ui?.autoAllowed === true && !autoAllowRemovedForCall
const isAutoAllowed = useCopilotStore(
(s) => isIntegrationTool(toolCall.name) && s.isToolAutoAllowed(toolCall.name)
)
// Update edited params when toolCall params change (deep comparison to avoid resetting user edits on ref change) // Update edited params when toolCall params change (deep comparison to avoid resetting user edits on ref change)
useEffect(() => { useEffect(() => {
@@ -1461,6 +1506,14 @@ export function ToolCall({
} }
}, [params]) }, [params])
useEffect(() => {
setAutoAllowRemovedForCall(false)
setShowRemoveAutoAllow(false)
}, [toolCall.id])
// Guard: nothing to render without a toolCall
if (!hasRealToolCall) return null
// Skip rendering some internal tools // Skip rendering some internal tools
if ( if (
toolCall.name === 'checkoff_todo' || toolCall.name === 'checkoff_todo' ||
@@ -1472,7 +1525,9 @@ export function ToolCall({
return null return null
// Special rendering for subagent tools - show as thinking text with tool calls at top level // Special rendering for subagent tools - show as thinking text with tool calls at top level
const isSubagentTool = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.subagent === true const isSubagentTool =
toolCall.execution?.target === 'go_subagent' ||
TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig?.subagent === true
// For ALL subagent tools, don't show anything until we have blocks with content // For ALL subagent tools, don't show anything until we have blocks with content
if (isSubagentTool) { if (isSubagentTool) {
@@ -1499,28 +1554,6 @@ export function ToolCall({
) )
} }
// Get current mode from store to determine if we should render integration tools
const mode = useCopilotStore.getState().mode
// Check if this is a completed/historical tool call (not pending/executing)
// Use string comparison to handle both enum values and string values from DB
const stateStr = String(toolCall.state)
const isCompletedToolCall =
stateStr === 'success' ||
stateStr === 'error' ||
stateStr === 'rejected' ||
stateStr === 'aborted'
// Allow rendering if:
// 1. Tool is in TOOL_DISPLAY_REGISTRY (client tools), OR
// 2. We're in build mode (integration tools are executed server-side), OR
// 3. Tool call is already completed (historical - should always render)
const isClientTool = !!TOOL_DISPLAY_REGISTRY[toolCall.name]
const isIntegrationToolInBuildMode = mode === 'build' && !isClientTool
if (!isClientTool && !isIntegrationToolInBuildMode && !isCompletedToolCall) {
return null
}
const toolUIConfig = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig const toolUIConfig = TOOL_DISPLAY_REGISTRY[toolCall.name]?.uiConfig
// Check if tool has params table config (meaning it's expandable) // Check if tool has params table config (meaning it's expandable)
const hasParamsTable = !!toolUIConfig?.paramsTable const hasParamsTable = !!toolUIConfig?.paramsTable
@@ -1530,6 +1563,14 @@ export function ToolCall({
toolCall.name === 'make_api_request' || toolCall.name === 'make_api_request' ||
toolCall.name === 'set_global_workflow_variables' toolCall.name === 'set_global_workflow_variables'
const interruptActions =
(toolCall.ui?.actions && toolCall.ui.actions.length > 0
? toolCall.ui.actions
: [
{ id: 'allow_once', label: 'Allow', kind: 'accept' as const },
{ id: 'allow_always', label: 'Always Allow', kind: 'accept' as const, remember: true },
{ id: 'reject', label: 'Skip', kind: 'reject' as const },
]) as ToolUiAction[]
const showButtons = isCurrentMessage && shouldShowRunSkipButtons(toolCall) const showButtons = isCurrentMessage && shouldShowRunSkipButtons(toolCall)
// Check UI config for secondary action - only show for current message tool calls // Check UI config for secondary action - only show for current message tool calls
@@ -1987,9 +2028,12 @@ export function ToolCall({
<div className='mt-[10px]'> <div className='mt-[10px]'>
<Button <Button
onClick={async () => { onClick={async () => {
await removeAutoAllowedTool(toolCall.name) const removed = await removeAutoAllowedToolPreference(toolCall.name)
setShowRemoveAutoAllow(false) if (removed) {
forceUpdate({}) setAutoAllowRemovedForCall(true)
setShowRemoveAutoAllow(false)
forceUpdate({})
}
}} }}
variant='default' variant='default'
className='text-xs' className='text-xs'
@@ -2003,6 +2047,7 @@ export function ToolCall({
toolCall={toolCall} toolCall={toolCall}
onStateChange={handleStateChange} onStateChange={handleStateChange}
editedParams={editedParams} editedParams={editedParams}
actions={interruptActions}
/> />
)} )}
{/* Render subagent content as thinking text */} {/* Render subagent content as thinking text */}
@@ -2048,9 +2093,12 @@ export function ToolCall({
<div className='mt-[10px]'> <div className='mt-[10px]'>
<Button <Button
onClick={async () => { onClick={async () => {
await removeAutoAllowedTool(toolCall.name) const removed = await removeAutoAllowedToolPreference(toolCall.name)
setShowRemoveAutoAllow(false) if (removed) {
forceUpdate({}) setAutoAllowRemovedForCall(true)
setShowRemoveAutoAllow(false)
forceUpdate({})
}
}} }}
variant='default' variant='default'
className='text-xs' className='text-xs'
@@ -2064,6 +2112,7 @@ export function ToolCall({
toolCall={toolCall} toolCall={toolCall}
onStateChange={handleStateChange} onStateChange={handleStateChange}
editedParams={editedParams} editedParams={editedParams}
actions={interruptActions}
/> />
)} )}
{/* Render subagent content as thinking text */} {/* Render subagent content as thinking text */}
@@ -2087,7 +2136,7 @@ export function ToolCall({
} }
} }
const isEditWorkflow = toolCall.name === 'edit_workflow' const isEditWorkflow = isWorkflowEditSummaryTool(toolCall)
const shouldShowDetails = isRunWorkflow || (isExpandableTool && expanded) const shouldShowDetails = isRunWorkflow || (isExpandableTool && expanded)
const hasOperations = Array.isArray(params.operations) && params.operations.length > 0 const hasOperations = Array.isArray(params.operations) && params.operations.length > 0
const hideTextForEditWorkflow = isEditWorkflow && hasOperations const hideTextForEditWorkflow = isEditWorkflow && hasOperations
@@ -2109,9 +2158,12 @@ export function ToolCall({
<div className='mt-[10px]'> <div className='mt-[10px]'>
<Button <Button
onClick={async () => { onClick={async () => {
await removeAutoAllowedTool(toolCall.name) const removed = await removeAutoAllowedToolPreference(toolCall.name)
setShowRemoveAutoAllow(false) if (removed) {
forceUpdate({}) setAutoAllowRemovedForCall(true)
setShowRemoveAutoAllow(false)
forceUpdate({})
}
}} }}
variant='default' variant='default'
className='text-xs' className='text-xs'
@@ -2125,6 +2177,7 @@ export function ToolCall({
toolCall={toolCall} toolCall={toolCall}
onStateChange={handleStateChange} onStateChange={handleStateChange}
editedParams={editedParams} editedParams={editedParams}
actions={interruptActions}
/> />
) : showMoveToBackground ? ( ) : showMoveToBackground ? (
<div className='mt-[10px]'> <div className='mt-[10px]'>
@@ -2155,7 +2208,7 @@ export function ToolCall({
</Button> </Button>
</div> </div>
) : null} ) : null}
{/* Workflow edit summary - shows block changes after edit_workflow completes */} {/* Workflow edit summary - shows block changes after workflow_change(apply) */}
<WorkflowEditSummary toolCall={toolCall} /> <WorkflowEditSummary toolCall={toolCall} />
{/* Render subagent content as thinking text */} {/* Render subagent content as thinking text */}

View File

@@ -113,7 +113,6 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
clearPlanArtifact, clearPlanArtifact,
savePlanArtifact, savePlanArtifact,
loadAvailableModels, loadAvailableModels,
loadAutoAllowedTools,
resumeActiveStream, resumeActiveStream,
} = useCopilotStore() } = useCopilotStore()
@@ -125,8 +124,6 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
setCopilotWorkflowId, setCopilotWorkflowId,
loadChats, loadChats,
loadAvailableModels, loadAvailableModels,
loadAutoAllowedTools,
currentChat,
isSendingMessage, isSendingMessage,
resumeActiveStream, resumeActiveStream,
}) })
@@ -154,6 +151,8 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
planTodos, planTodos,
}) })
const renderedChatTitle = currentChat?.title || 'New Chat'
/** Gets markdown content for design document section (available in all modes once created) */ /** Gets markdown content for design document section (available in all modes once created) */
const designDocumentContent = useMemo(() => { const designDocumentContent = useMemo(() => {
if (streamingPlanContent) { if (streamingPlanContent) {
@@ -166,6 +165,14 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
return '' return ''
}, [streamingPlanContent]) }, [streamingPlanContent])
useEffect(() => {
logger.info('[TitleRender] Copilot header title changed', {
currentChatId: currentChat?.id || null,
currentChatTitle: currentChat?.title || null,
renderedTitle: renderedChatTitle,
})
}, [currentChat?.id, currentChat?.title, renderedChatTitle])
/** Focuses the copilot input */ /** Focuses the copilot input */
const focusInput = useCallback(() => { const focusInput = useCallback(() => {
userInputRef.current?.focus() userInputRef.current?.focus()
@@ -348,7 +355,7 @@ export const Copilot = forwardRef<CopilotRef, CopilotProps>(({ panelWidth }, ref
{/* Header */} {/* Header */}
<div className='mx-[-1px] flex flex-shrink-0 items-center justify-between gap-[8px] rounded-[4px] border border-[var(--border)] bg-[var(--surface-4)] px-[12px] py-[6px]'> <div className='mx-[-1px] flex flex-shrink-0 items-center justify-between gap-[8px] rounded-[4px] border border-[var(--border)] bg-[var(--surface-4)] px-[12px] py-[6px]'>
<h2 className='min-w-0 flex-1 truncate font-medium text-[14px] text-[var(--text-primary)]'> <h2 className='min-w-0 flex-1 truncate font-medium text-[14px] text-[var(--text-primary)]'>
{currentChat?.title || 'New Chat'} {renderedChatTitle}
</h2> </h2>
<div className='flex items-center gap-[8px]'> <div className='flex items-center gap-[8px]'>
<Button variant='ghost' className='p-0' onClick={handleStartNewChat}> <Button variant='ghost' className='p-0' onClick={handleStartNewChat}>

View File

@@ -12,8 +12,6 @@ interface UseCopilotInitializationProps {
setCopilotWorkflowId: (workflowId: string | null) => Promise<void> setCopilotWorkflowId: (workflowId: string | null) => Promise<void>
loadChats: (forceRefresh?: boolean) => Promise<void> loadChats: (forceRefresh?: boolean) => Promise<void>
loadAvailableModels: () => Promise<void> loadAvailableModels: () => Promise<void>
loadAutoAllowedTools: () => Promise<void>
currentChat: any
isSendingMessage: boolean isSendingMessage: boolean
resumeActiveStream: () => Promise<boolean> resumeActiveStream: () => Promise<boolean>
} }
@@ -32,8 +30,6 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) {
setCopilotWorkflowId, setCopilotWorkflowId,
loadChats, loadChats,
loadAvailableModels, loadAvailableModels,
loadAutoAllowedTools,
currentChat,
isSendingMessage, isSendingMessage,
resumeActiveStream, resumeActiveStream,
} = props } = props
@@ -120,17 +116,6 @@ export function useCopilotInitialization(props: UseCopilotInitializationProps) {
}) })
}, [isSendingMessage, resumeActiveStream]) }, [isSendingMessage, resumeActiveStream])
/** Load auto-allowed tools once on mount - runs immediately, independent of workflow */
const hasLoadedAutoAllowedToolsRef = useRef(false)
useEffect(() => {
if (!hasLoadedAutoAllowedToolsRef.current) {
hasLoadedAutoAllowedToolsRef.current = true
loadAutoAllowedTools().catch((err) => {
logger.warn('[Copilot] Failed to load auto-allowed tools', err)
})
}
}, [loadAutoAllowedTools])
/** Load available models once on mount */ /** Load available models once on mount */
const hasLoadedModelsRef = useRef(false) const hasLoadedModelsRef = useRef(false)
useEffect(() => { useEffect(() => {

View File

@@ -113,7 +113,7 @@ export function VersionDescriptionModal({
className='min-h-[120px] resize-none' className='min-h-[120px] resize-none'
value={description} value={description}
onChange={(e) => setDescription(e.target.value)} onChange={(e) => setDescription(e.target.value)}
maxLength={2000} maxLength={500}
disabled={isGenerating} disabled={isGenerating}
/> />
<div className='flex items-center justify-between'> <div className='flex items-center justify-between'>
@@ -123,7 +123,7 @@ export function VersionDescriptionModal({
</p> </p>
)} )}
{!updateMutation.error && !generateMutation.error && <div />} {!updateMutation.error && !generateMutation.error && <div />}
<p className='text-[11px] text-[var(--text-tertiary)]'>{description.length}/2000</p> <p className='text-[11px] text-[var(--text-tertiary)]'>{description.length}/500</p>
</div> </div>
</ModalBody> </ModalBody>
<ModalFooter> <ModalFooter>

View File

@@ -57,21 +57,6 @@ export function useChangeDetection({
} }
} }
if (block.triggerMode) {
const triggerConfigValue = blockSubValues?.triggerConfig
if (
triggerConfigValue &&
typeof triggerConfigValue === 'object' &&
!subBlocks.triggerConfig
) {
subBlocks.triggerConfig = {
id: 'triggerConfig',
type: 'short-input',
value: triggerConfigValue,
}
}
}
blocksWithSubBlocks[blockId] = { blocksWithSubBlocks[blockId] = {
...block, ...block,
subBlocks, subBlocks,

View File

@@ -1,10 +1,7 @@
import { useCallback, useState } from 'react' import { useCallback, useState } from 'react'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { runPreDeployChecks } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/deploy/hooks/use-predeploy-checks'
import { useNotificationStore } from '@/stores/notifications' import { useNotificationStore } from '@/stores/notifications'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store' import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import { mergeSubblockState } from '@/stores/workflows/utils'
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
const logger = createLogger('useDeployment') const logger = createLogger('useDeployment')
@@ -38,24 +35,6 @@ export function useDeployment({
return { success: true, shouldOpenModal: true } return { success: true, shouldOpenModal: true }
} }
const { blocks, edges, loops, parallels } = useWorkflowStore.getState()
const liveBlocks = mergeSubblockState(blocks, workflowId)
const checkResult = runPreDeployChecks({
blocks: liveBlocks,
edges,
loops,
parallels,
workflowId,
})
if (!checkResult.passed) {
addNotification({
level: 'error',
message: checkResult.error || 'Pre-deploy validation failed',
workflowId,
})
return { success: false, shouldOpenModal: false }
}
setIsDeploying(true) setIsDeploying(true)
try { try {
const response = await fetch(`/api/workflows/${workflowId}/deploy`, { const response = await fetch(`/api/workflows/${workflowId}/deploy`, {

View File

@@ -239,12 +239,7 @@ export const ComboBox = memo(function ComboBox({
*/ */
const defaultOptionValue = useMemo(() => { const defaultOptionValue = useMemo(() => {
if (defaultValue !== undefined) { if (defaultValue !== undefined) {
// Validate that the default value exists in the available (filtered) options return defaultValue
const defaultInOptions = evaluatedOptions.find((opt) => getOptionValue(opt) === defaultValue)
if (defaultInOptions) {
return defaultValue
}
// Default not available (e.g. provider disabled) — fall through to other fallbacks
} }
// For model field, default to claude-sonnet-4-5 if available // For model field, default to claude-sonnet-4-5 if available

View File

@@ -4,7 +4,6 @@ import { Button, Combobox } from '@/components/emcn/components'
import { import {
getCanonicalScopesForProvider, getCanonicalScopesForProvider,
getProviderIdFromServiceId, getProviderIdFromServiceId,
getServiceConfigByProviderId,
OAUTH_PROVIDERS, OAUTH_PROVIDERS,
type OAuthProvider, type OAuthProvider,
type OAuthService, type OAuthService,
@@ -27,11 +26,6 @@ const getProviderIcon = (providerName: OAuthProvider) => {
} }
const getProviderName = (providerName: OAuthProvider) => { const getProviderName = (providerName: OAuthProvider) => {
const serviceConfig = getServiceConfigByProviderId(providerName)
if (serviceConfig) {
return serviceConfig.name
}
const { baseProvider } = parseProvider(providerName) const { baseProvider } = parseProvider(providerName)
const baseProviderConfig = OAUTH_PROVIDERS[baseProvider] const baseProviderConfig = OAUTH_PROVIDERS[baseProvider]
@@ -60,7 +54,7 @@ export function ToolCredentialSelector({
onChange, onChange,
provider, provider,
requiredScopes = [], requiredScopes = [],
label, label = 'Select account',
serviceId, serviceId,
disabled = false, disabled = false,
}: ToolCredentialSelectorProps) { }: ToolCredentialSelectorProps) {
@@ -70,7 +64,6 @@ export function ToolCredentialSelector({
const { activeWorkflowId } = useWorkflowRegistry() const { activeWorkflowId } = useWorkflowRegistry()
const selectedId = value || '' const selectedId = value || ''
const effectiveLabel = label || `Select ${getProviderName(provider)} account`
const effectiveProviderId = useMemo(() => getProviderIdFromServiceId(serviceId), [serviceId]) const effectiveProviderId = useMemo(() => getProviderIdFromServiceId(serviceId), [serviceId])
@@ -210,7 +203,7 @@ export function ToolCredentialSelector({
selectedValue={selectedId} selectedValue={selectedId}
onChange={handleComboboxChange} onChange={handleComboboxChange}
onOpenChange={handleOpenChange} onOpenChange={handleOpenChange}
placeholder={effectiveLabel} placeholder={label}
disabled={disabled} disabled={disabled}
editable={true} editable={true}
filterOptions={!isForeign} filterOptions={!isForeign}

View File

@@ -1,186 +0,0 @@
'use client'
import type React from 'react'
import { useRef, useState } from 'react'
import { ArrowLeftRight, ArrowUp } from 'lucide-react'
import { Button, Input, Label, Tooltip } from '@/components/emcn'
import { cn } from '@/lib/core/utils/cn'
import type { WandControlHandlers } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/sub-block'
/**
* Props for a generic parameter with label component
*/
export interface ParameterWithLabelProps {
paramId: string
title: string
isRequired: boolean
visibility: string
wandConfig?: {
enabled: boolean
prompt?: string
placeholder?: string
}
canonicalToggle?: {
mode: 'basic' | 'advanced'
disabled?: boolean
onToggle?: () => void
}
disabled: boolean
isPreview: boolean
children: (wandControlRef: React.MutableRefObject<WandControlHandlers | null>) => React.ReactNode
}
/**
* Generic wrapper component for parameters that manages wand state and renders label + input
*/
export function ParameterWithLabel({
paramId,
title,
isRequired,
visibility,
wandConfig,
canonicalToggle,
disabled,
isPreview,
children,
}: ParameterWithLabelProps) {
const [isSearchActive, setIsSearchActive] = useState(false)
const [searchQuery, setSearchQuery] = useState('')
const searchInputRef = useRef<HTMLInputElement>(null)
const wandControlRef = useRef<WandControlHandlers | null>(null)
const isWandEnabled = wandConfig?.enabled ?? false
const showWand = isWandEnabled && !isPreview && !disabled
const handleSearchClick = (): void => {
setIsSearchActive(true)
setTimeout(() => {
searchInputRef.current?.focus()
}, 0)
}
const handleSearchBlur = (): void => {
if (!searchQuery.trim() && !wandControlRef.current?.isWandStreaming) {
setIsSearchActive(false)
}
}
const handleSearchChange = (value: string): void => {
setSearchQuery(value)
}
const handleSearchSubmit = (): void => {
if (searchQuery.trim() && wandControlRef.current) {
wandControlRef.current.onWandTrigger(searchQuery)
setSearchQuery('')
setIsSearchActive(false)
}
}
const handleSearchCancel = (): void => {
setSearchQuery('')
setIsSearchActive(false)
}
const isStreaming = wandControlRef.current?.isWandStreaming ?? false
return (
<div key={paramId} className='relative min-w-0 space-y-[6px]'>
<div className='flex items-center justify-between gap-[6px] pl-[2px]'>
<Label className='flex items-baseline gap-[6px] whitespace-nowrap font-medium text-[13px] text-[var(--text-primary)]'>
{title}
{isRequired && visibility === 'user-only' && <span className='ml-0.5'>*</span>}
</Label>
<div className='flex min-w-0 flex-1 items-center justify-end gap-[6px]'>
{showWand &&
(!isSearchActive ? (
<Button
variant='active'
className='-my-1 h-5 px-2 py-0 text-[11px]'
onClick={handleSearchClick}
>
Generate
</Button>
) : (
<div className='-my-1 flex min-w-[120px] max-w-[280px] flex-1 items-center gap-[4px]'>
<Input
ref={searchInputRef}
value={isStreaming ? 'Generating...' : searchQuery}
onChange={(e: React.ChangeEvent<HTMLInputElement>) =>
handleSearchChange(e.target.value)
}
onBlur={(e: React.FocusEvent<HTMLInputElement>) => {
const relatedTarget = e.relatedTarget as HTMLElement | null
if (relatedTarget?.closest('button')) return
handleSearchBlur()
}}
onKeyDown={(e: React.KeyboardEvent<HTMLInputElement>) => {
if (e.key === 'Enter' && searchQuery.trim() && !isStreaming) {
handleSearchSubmit()
} else if (e.key === 'Escape') {
handleSearchCancel()
}
}}
disabled={isStreaming}
className={cn(
'h-5 min-w-[80px] flex-1 text-[11px]',
isStreaming && 'text-muted-foreground'
)}
placeholder='Generate with AI...'
/>
<Button
variant='tertiary'
disabled={!searchQuery.trim() || isStreaming}
onMouseDown={(e: React.MouseEvent) => {
e.preventDefault()
e.stopPropagation()
}}
onClick={(e: React.MouseEvent) => {
e.stopPropagation()
handleSearchSubmit()
}}
className='h-[20px] w-[20px] flex-shrink-0 p-0'
>
<ArrowUp className='h-[12px] w-[12px]' />
</Button>
</div>
))}
{canonicalToggle && !isPreview && (
<Tooltip.Root>
<Tooltip.Trigger asChild>
<button
type='button'
className='flex h-[12px] w-[12px] flex-shrink-0 items-center justify-center bg-transparent p-0 disabled:cursor-not-allowed disabled:opacity-50'
onClick={canonicalToggle.onToggle}
disabled={canonicalToggle.disabled || disabled}
aria-label={
canonicalToggle.mode === 'advanced'
? 'Switch to selector'
: 'Switch to manual ID'
}
>
<ArrowLeftRight
className={cn(
'!h-[12px] !w-[12px]',
canonicalToggle.mode === 'advanced'
? 'text-[var(--text-primary)]'
: 'text-[var(--text-secondary)]'
)}
/>
</button>
</Tooltip.Trigger>
<Tooltip.Content side='top'>
<p>
{canonicalToggle.mode === 'advanced'
? 'Switch to selector'
: 'Switch to manual ID'}
</p>
</Tooltip.Content>
</Tooltip.Root>
)}
</div>
</div>
<div className='relative w-full min-w-0'>{children(wandControlRef)}</div>
</div>
)
}

View File

@@ -1,105 +0,0 @@
'use client'
import { useEffect, useRef } from 'react'
import { SubBlock } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/sub-block'
import type { SubBlockConfig as BlockSubBlockConfig } from '@/blocks/types'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
interface ToolSubBlockRendererProps {
blockId: string
subBlockId: string
toolIndex: number
subBlock: BlockSubBlockConfig
effectiveParamId: string
toolParams: Record<string, string> | undefined
onParamChange: (toolIndex: number, paramId: string, value: string) => void
disabled: boolean
canonicalToggle?: {
mode: 'basic' | 'advanced'
disabled?: boolean
onToggle?: () => void
}
}
/**
* SubBlock types whose store values are objects/arrays/non-strings.
* tool.params stores strings (via JSON.stringify), so when syncing
* back to the store we parse them to restore the native shape.
*/
const OBJECT_SUBBLOCK_TYPES = new Set(['file-upload', 'table', 'grouped-checkbox-list'])
/**
* Bridges the subblock store with StoredTool.params via a synthetic store key,
* then delegates all rendering to SubBlock for full parity.
*/
export function ToolSubBlockRenderer({
blockId,
subBlockId,
toolIndex,
subBlock,
effectiveParamId,
toolParams,
onParamChange,
disabled,
canonicalToggle,
}: ToolSubBlockRendererProps) {
const syntheticId = `${subBlockId}-tool-${toolIndex}-${effectiveParamId}`
const toolParamValue = toolParams?.[effectiveParamId] ?? ''
const isObjectType = OBJECT_SUBBLOCK_TYPES.has(subBlock.type)
const syncedRef = useRef<string | null>(null)
const onParamChangeRef = useRef(onParamChange)
onParamChangeRef.current = onParamChange
useEffect(() => {
const unsub = useSubBlockStore.subscribe((state, prevState) => {
const wfId = useWorkflowRegistry.getState().activeWorkflowId
if (!wfId) return
const newVal = state.workflowValues[wfId]?.[blockId]?.[syntheticId]
const oldVal = prevState.workflowValues[wfId]?.[blockId]?.[syntheticId]
if (newVal === oldVal) return
const stringified =
newVal == null ? '' : typeof newVal === 'string' ? newVal : JSON.stringify(newVal)
if (stringified === syncedRef.current) return
syncedRef.current = stringified
onParamChangeRef.current(toolIndex, effectiveParamId, stringified)
})
return unsub
}, [blockId, syntheticId, toolIndex, effectiveParamId])
useEffect(() => {
if (toolParamValue === syncedRef.current) return
syncedRef.current = toolParamValue
if (isObjectType && toolParamValue) {
try {
const parsed = JSON.parse(toolParamValue)
if (typeof parsed === 'object' && parsed !== null) {
useSubBlockStore.getState().setValue(blockId, syntheticId, parsed)
return
}
} catch {}
}
useSubBlockStore.getState().setValue(blockId, syntheticId, toolParamValue)
}, [toolParamValue, blockId, syntheticId, isObjectType])
const visibility = subBlock.paramVisibility ?? 'user-or-llm'
const isOptionalForUser = visibility !== 'user-only'
const config = {
...subBlock,
id: syntheticId,
...(isOptionalForUser && { required: false }),
}
return (
<SubBlock
blockId={blockId}
config={config}
isPreview={false}
disabled={disabled}
canonicalToggle={canonicalToggle}
dependencyContext={toolParams}
/>
)
}

View File

@@ -2,12 +2,37 @@
* @vitest-environment node * @vitest-environment node
*/ */
import { describe, expect, it } from 'vitest' import { describe, expect, it } from 'vitest'
import type { StoredTool } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/tool-input/types'
import { interface StoredTool {
isCustomToolAlreadySelected, type: string
isMcpToolAlreadySelected, title?: string
isWorkflowAlreadySelected, toolId?: string
} from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/tool-input/utils' params?: Record<string, string>
customToolId?: string
schema?: any
code?: string
operation?: string
usageControl?: 'auto' | 'force' | 'none'
}
const isMcpToolAlreadySelected = (selectedTools: StoredTool[], mcpToolId: string): boolean => {
return selectedTools.some((tool) => tool.type === 'mcp' && tool.toolId === mcpToolId)
}
const isCustomToolAlreadySelected = (
selectedTools: StoredTool[],
customToolId: string
): boolean => {
return selectedTools.some(
(tool) => tool.type === 'custom-tool' && tool.customToolId === customToolId
)
}
const isWorkflowAlreadySelected = (selectedTools: StoredTool[], workflowId: string): boolean => {
return selectedTools.some(
(tool) => tool.type === 'workflow_input' && tool.params?.workflowId === workflowId
)
}
describe('isMcpToolAlreadySelected', () => { describe('isMcpToolAlreadySelected', () => {
describe('basic functionality', () => { describe('basic functionality', () => {

View File

@@ -1,31 +0,0 @@
/**
* Represents a tool selected and configured in the workflow
*
* @remarks
* For custom tools (new format), we only store: type, customToolId, usageControl, isExpanded.
* Everything else (title, schema, code) is loaded dynamically from the database.
* Legacy custom tools with inline schema/code are still supported for backwards compatibility.
*/
export interface StoredTool {
/** Block type identifier */
type: string
/** Display title for the tool (optional for new custom tool format) */
title?: string
/** Direct tool ID for execution (optional for new custom tool format) */
toolId?: string
/** Parameter values configured by the user (optional for new custom tool format) */
params?: Record<string, string>
/** Whether the tool details are expanded in UI */
isExpanded?: boolean
/** Database ID for custom tools (new format - reference only) */
customToolId?: string
/** Tool schema for custom tools (legacy format - inline JSON schema) */
// eslint-disable-next-line @typescript-eslint/no-explicit-any
schema?: Record<string, any>
/** Implementation code for custom tools (legacy format - inline) */
code?: string
/** Selected operation for multi-operation tools */
operation?: string
/** Tool usage control mode for LLM */
usageControl?: 'auto' | 'force' | 'none'
}

View File

@@ -1,32 +0,0 @@
import type { StoredTool } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/tool-input/types'
/**
* Checks if an MCP tool is already selected.
*/
export function isMcpToolAlreadySelected(selectedTools: StoredTool[], mcpToolId: string): boolean {
return selectedTools.some((tool) => tool.type === 'mcp' && tool.toolId === mcpToolId)
}
/**
* Checks if a custom tool is already selected.
*/
export function isCustomToolAlreadySelected(
selectedTools: StoredTool[],
customToolId: string
): boolean {
return selectedTools.some(
(tool) => tool.type === 'custom-tool' && tool.customToolId === customToolId
)
}
/**
* Checks if a workflow is already selected.
*/
export function isWorkflowAlreadySelected(
selectedTools: StoredTool[],
workflowId: string
): boolean {
return selectedTools.some(
(tool) => tool.type === 'workflow_input' && tool.params?.workflowId === workflowId
)
}

View File

@@ -3,6 +3,7 @@ import { isEqual } from 'lodash'
import { AlertTriangle, ArrowLeftRight, ArrowUp, Check, Clipboard } from 'lucide-react' import { AlertTriangle, ArrowLeftRight, ArrowUp, Check, Clipboard } from 'lucide-react'
import { Button, Input, Label, Tooltip } from '@/components/emcn/components' import { Button, Input, Label, Tooltip } from '@/components/emcn/components'
import { cn } from '@/lib/core/utils/cn' import { cn } from '@/lib/core/utils/cn'
import type { FieldDiffStatus } from '@/lib/workflows/diff/types'
import { import {
CheckboxList, CheckboxList,
Code, Code,
@@ -68,15 +69,13 @@ interface SubBlockProps {
isPreview?: boolean isPreview?: boolean
subBlockValues?: Record<string, any> subBlockValues?: Record<string, any>
disabled?: boolean disabled?: boolean
fieldDiffStatus?: FieldDiffStatus
allowExpandInPreview?: boolean allowExpandInPreview?: boolean
canonicalToggle?: { canonicalToggle?: {
mode: 'basic' | 'advanced' mode: 'basic' | 'advanced'
disabled?: boolean disabled?: boolean
onToggle?: () => void onToggle?: () => void
} }
labelSuffix?: React.ReactNode
/** Provides sibling values for dependency resolution in non-preview contexts (e.g. tool-input) */
dependencyContext?: Record<string, unknown>
} }
/** /**
@@ -163,14 +162,16 @@ const getPreviewValue = (
/** /**
* Renders the label with optional validation and description tooltips. * Renders the label with optional validation and description tooltips.
* *
* @remarks
* Handles JSON validation indicators for code blocks and required field markers.
* Includes inline AI generate button when wand is enabled.
*
* @param config - The sub-block configuration defining the label content * @param config - The sub-block configuration defining the label content
* @param isValidJson - Whether the JSON content is valid (for code blocks) * @param isValidJson - Whether the JSON content is valid (for code blocks)
* @param subBlockValues - Current values of all subblocks for evaluating conditional requirements * @param subBlockValues - Current values of all subblocks for evaluating conditional requirements
* @param wandState - State and handlers for the inline AI generate feature * @param wandState - Optional state and handlers for the AI wand feature
* @param canonicalToggle - Metadata and handlers for the basic/advanced mode toggle * @param canonicalToggle - Optional canonical toggle metadata and handlers
* @param canonicalToggleIsDisabled - Whether the canonical toggle is disabled (includes dependsOn gating) * @param canonicalToggleIsDisabled - Whether the canonical toggle is disabled
* @param copyState - State and handler for the copy-to-clipboard button
* @param labelSuffix - Additional content rendered after the label text
* @returns The label JSX element, or `null` for switch types or when no title is defined * @returns The label JSX element, or `null` for switch types or when no title is defined
*/ */
const renderLabel = ( const renderLabel = (
@@ -201,8 +202,7 @@ const renderLabel = (
showCopyButton: boolean showCopyButton: boolean
copied: boolean copied: boolean
onCopy: () => void onCopy: () => void
}, }
labelSuffix?: React.ReactNode
): JSX.Element | null => { ): JSX.Element | null => {
if (config.type === 'switch') return null if (config.type === 'switch') return null
if (!config.title) return null if (!config.title) return null
@@ -215,10 +215,9 @@ const renderLabel = (
return ( return (
<div className='flex items-center justify-between gap-[6px] pl-[2px]'> <div className='flex items-center justify-between gap-[6px] pl-[2px]'>
<Label className='flex items-baseline gap-[6px] whitespace-nowrap'> <Label className='flex items-center gap-[6px] whitespace-nowrap'>
{config.title} {config.title}
{required && <span className='ml-0.5'>*</span>} {required && <span className='ml-0.5'>*</span>}
{labelSuffix}
{config.type === 'code' && {config.type === 'code' &&
config.language === 'json' && config.language === 'json' &&
!isValidJson && !isValidJson &&
@@ -384,25 +383,28 @@ const arePropsEqual = (prevProps: SubBlockProps, nextProps: SubBlockProps): bool
prevProps.isPreview === nextProps.isPreview && prevProps.isPreview === nextProps.isPreview &&
valueEqual && valueEqual &&
prevProps.disabled === nextProps.disabled && prevProps.disabled === nextProps.disabled &&
prevProps.fieldDiffStatus === nextProps.fieldDiffStatus &&
prevProps.allowExpandInPreview === nextProps.allowExpandInPreview && prevProps.allowExpandInPreview === nextProps.allowExpandInPreview &&
canonicalToggleEqual && canonicalToggleEqual
prevProps.labelSuffix === nextProps.labelSuffix &&
prevProps.dependencyContext === nextProps.dependencyContext
) )
} }
/** /**
* Renders a single workflow sub-block input based on config.type. * Renders a single workflow sub-block input based on config.type.
* *
* @remarks
* Supports multiple input types including short-input, long-input, dropdown,
* combobox, slider, table, code, switch, tool-input, and many more.
* Handles preview mode, disabled states, and AI wand generation.
*
* @param blockId - The parent block identifier * @param blockId - The parent block identifier
* @param config - Configuration defining the input type and properties * @param config - Configuration defining the input type and properties
* @param isPreview - Whether to render in preview mode * @param isPreview - Whether to render in preview mode
* @param subBlockValues - Current values of all subblocks * @param subBlockValues - Current values of all subblocks
* @param disabled - Whether the input is disabled * @param disabled - Whether the input is disabled
* @param fieldDiffStatus - Optional diff status for visual indicators
* @param allowExpandInPreview - Whether to allow expanding in preview mode * @param allowExpandInPreview - Whether to allow expanding in preview mode
* @param canonicalToggle - Metadata and handlers for the basic/advanced mode toggle * @returns The rendered sub-block input component
* @param labelSuffix - Additional content rendered after the label text
* @param dependencyContext - Sibling values for dependency resolution in non-preview contexts (e.g. tool-input)
*/ */
function SubBlockComponent({ function SubBlockComponent({
blockId, blockId,
@@ -410,10 +412,9 @@ function SubBlockComponent({
isPreview = false, isPreview = false,
subBlockValues, subBlockValues,
disabled = false, disabled = false,
fieldDiffStatus,
allowExpandInPreview, allowExpandInPreview,
canonicalToggle, canonicalToggle,
labelSuffix,
dependencyContext,
}: SubBlockProps): JSX.Element { }: SubBlockProps): JSX.Element {
const [isValidJson, setIsValidJson] = useState(true) const [isValidJson, setIsValidJson] = useState(true)
const [isSearchActive, setIsSearchActive] = useState(false) const [isSearchActive, setIsSearchActive] = useState(false)
@@ -422,6 +423,7 @@ function SubBlockComponent({
const searchInputRef = useRef<HTMLInputElement>(null) const searchInputRef = useRef<HTMLInputElement>(null)
const wandControlRef = useRef<WandControlHandlers | null>(null) const wandControlRef = useRef<WandControlHandlers | null>(null)
// Use webhook management hook when config has useWebhookUrl enabled
const webhookManagement = useWebhookManagement({ const webhookManagement = useWebhookManagement({
blockId, blockId,
triggerId: undefined, triggerId: undefined,
@@ -508,12 +510,10 @@ function SubBlockComponent({
| null | null
| undefined | undefined
const contextValues = dependencyContext ?? (isPreview ? subBlockValues : undefined)
const { finalDisabled: gatedDisabled } = useDependsOnGate(blockId, config, { const { finalDisabled: gatedDisabled } = useDependsOnGate(blockId, config, {
disabled, disabled,
isPreview, isPreview,
previewContextValues: contextValues, previewContextValues: isPreview ? subBlockValues : undefined,
}) })
const isDisabled = gatedDisabled const isDisabled = gatedDisabled
@@ -797,7 +797,7 @@ function SubBlockComponent({
disabled={isDisabled} disabled={isDisabled}
isPreview={isPreview} isPreview={isPreview}
previewValue={previewValue} previewValue={previewValue}
previewContextValues={contextValues} previewContextValues={isPreview ? subBlockValues : undefined}
/> />
) )
@@ -809,7 +809,7 @@ function SubBlockComponent({
disabled={isDisabled} disabled={isDisabled}
isPreview={isPreview} isPreview={isPreview}
previewValue={previewValue} previewValue={previewValue}
previewContextValues={contextValues} previewContextValues={isPreview ? subBlockValues : undefined}
/> />
) )
@@ -821,7 +821,7 @@ function SubBlockComponent({
disabled={isDisabled} disabled={isDisabled}
isPreview={isPreview} isPreview={isPreview}
previewValue={previewValue} previewValue={previewValue}
previewContextValues={contextValues} previewContextValues={isPreview ? subBlockValues : undefined}
/> />
) )
@@ -833,7 +833,7 @@ function SubBlockComponent({
disabled={isDisabled} disabled={isDisabled}
isPreview={isPreview} isPreview={isPreview}
previewValue={previewValue} previewValue={previewValue}
previewContextValues={contextValues} previewContextValues={isPreview ? subBlockValues : undefined}
/> />
) )
@@ -845,7 +845,7 @@ function SubBlockComponent({
disabled={isDisabled} disabled={isDisabled}
isPreview={isPreview} isPreview={isPreview}
previewValue={previewValue} previewValue={previewValue}
previewContextValues={contextValues} previewContextValues={isPreview ? subBlockValues : undefined}
/> />
) )
@@ -868,7 +868,7 @@ function SubBlockComponent({
disabled={isDisabled} disabled={isDisabled}
isPreview={isPreview} isPreview={isPreview}
previewValue={previewValue as any} previewValue={previewValue as any}
previewContextValues={contextValues} previewContextValues={isPreview ? subBlockValues : undefined}
/> />
) )
@@ -880,7 +880,7 @@ function SubBlockComponent({
disabled={isDisabled} disabled={isDisabled}
isPreview={isPreview} isPreview={isPreview}
previewValue={previewValue as any} previewValue={previewValue as any}
previewContextValues={contextValues} previewContextValues={isPreview ? subBlockValues : undefined}
/> />
) )
@@ -892,7 +892,7 @@ function SubBlockComponent({
disabled={isDisabled} disabled={isDisabled}
isPreview={isPreview} isPreview={isPreview}
previewValue={previewValue as any} previewValue={previewValue as any}
previewContextValues={contextValues} previewContextValues={isPreview ? subBlockValues : undefined}
/> />
) )
@@ -917,7 +917,7 @@ function SubBlockComponent({
isPreview={isPreview} isPreview={isPreview}
previewValue={previewValue as any} previewValue={previewValue as any}
disabled={isDisabled} disabled={isDisabled}
previewContextValues={contextValues} previewContextValues={isPreview ? subBlockValues : undefined}
/> />
) )
@@ -953,7 +953,7 @@ function SubBlockComponent({
disabled={isDisabled} disabled={isDisabled}
isPreview={isPreview} isPreview={isPreview}
previewValue={previewValue} previewValue={previewValue}
previewContextValues={contextValues} previewContextValues={isPreview ? subBlockValues : undefined}
/> />
) )
@@ -987,7 +987,7 @@ function SubBlockComponent({
disabled={isDisabled} disabled={isDisabled}
isPreview={isPreview} isPreview={isPreview}
previewValue={previewValue as any} previewValue={previewValue as any}
previewContextValues={contextValues} previewContextValues={isPreview ? subBlockValues : undefined}
/> />
) )
@@ -999,7 +999,7 @@ function SubBlockComponent({
disabled={isDisabled} disabled={isDisabled}
isPreview={isPreview} isPreview={isPreview}
previewValue={previewValue} previewValue={previewValue}
previewContextValues={contextValues} previewContextValues={isPreview ? subBlockValues : undefined}
/> />
) )
@@ -1059,8 +1059,7 @@ function SubBlockComponent({
showCopyButton: Boolean(config.showCopyButton && config.useWebhookUrl), showCopyButton: Boolean(config.showCopyButton && config.useWebhookUrl),
copied, copied,
onCopy: handleCopy, onCopy: handleCopy,
}, }
labelSuffix
)} )}
{renderInput()} {renderInput()}
</div> </div>

View File

@@ -571,6 +571,7 @@ export function Editor() {
isPreview={false} isPreview={false}
subBlockValues={subBlockState} subBlockValues={subBlockState}
disabled={!canEditBlock} disabled={!canEditBlock}
fieldDiffStatus={undefined}
allowExpandInPreview={false} allowExpandInPreview={false}
canonicalToggle={ canonicalToggle={
isCanonicalSwap && canonicalMode && canonicalId isCanonicalSwap && canonicalMode && canonicalId
@@ -634,6 +635,7 @@ export function Editor() {
isPreview={false} isPreview={false}
subBlockValues={subBlockState} subBlockValues={subBlockState}
disabled={!canEditBlock} disabled={!canEditBlock}
fieldDiffStatus={undefined}
allowExpandInPreview={false} allowExpandInPreview={false}
/> />
{index < advancedOnlySubBlocks.length - 1 && ( {index < advancedOnlySubBlocks.length - 1 && (

View File

@@ -88,38 +88,21 @@ export function useTerminalFilters() {
let result = entries let result = entries
if (hasActiveFilters) { if (hasActiveFilters) {
// Determine which top-level entries pass the filters result = entries.filter((entry) => {
const visibleBlockIds = new Set<string>() // Block ID filter
for (const entry of entries) {
if (entry.parentWorkflowBlockId) continue
let passes = true
if (filters.blockIds.size > 0 && !filters.blockIds.has(entry.blockId)) { if (filters.blockIds.size > 0 && !filters.blockIds.has(entry.blockId)) {
passes = false return false
} }
if (passes && filters.statuses.size > 0) {
// Status filter
if (filters.statuses.size > 0) {
const isError = !!entry.error const isError = !!entry.error
const hasStatus = isError ? filters.statuses.has('error') : filters.statuses.has('info') const hasStatus = isError ? filters.statuses.has('error') : filters.statuses.has('info')
if (!hasStatus) passes = false if (!hasStatus) return false
} }
if (passes) {
visibleBlockIds.add(entry.blockId)
}
}
// Propagate visibility to child workflow entries (handles arbitrary nesting). return true
// Keep iterating until no new children are discovered. })
let prevSize = 0
while (visibleBlockIds.size !== prevSize) {
prevSize = visibleBlockIds.size
for (const entry of entries) {
if (entry.parentWorkflowBlockId && visibleBlockIds.has(entry.parentWorkflowBlockId)) {
visibleBlockIds.add(entry.blockId)
}
}
}
result = entries.filter((entry) => visibleBlockIds.has(entry.blockId))
} }
// Sort by executionOrder (monotonically increasing integer from server) // Sort by executionOrder (monotonically increasing integer from server)

View File

@@ -339,8 +339,7 @@ const SubflowNodeRow = memo(function SubflowNodeRow({
}) })
/** /**
* Entry node component - dispatches to appropriate component based on node type. * Entry node component - dispatches to appropriate component based on node type
* Handles recursive rendering for workflow nodes with arbitrarily nested children.
*/ */
const EntryNodeRow = memo(function EntryNodeRow({ const EntryNodeRow = memo(function EntryNodeRow({
node, node,
@@ -381,98 +380,6 @@ const EntryNodeRow = memo(function EntryNodeRow({
) )
} }
if (nodeType === 'workflow') {
const { entry, children } = node
const BlockIcon = getBlockIcon(entry.blockType)
const hasError = Boolean(entry.error) || children.some((c) => c.entry.error)
const bgColor = getBlockColor(entry.blockType)
const nodeId = entry.id
const isExpanded = expandedNodes.has(nodeId)
const hasChildren = children.length > 0
const isSelected = selectedEntryId === entry.id
const isRunning = Boolean(entry.isRunning)
const isCanceled = Boolean(entry.isCanceled)
return (
<div className='flex min-w-0 flex-col'>
{/* Workflow Block Header */}
<div
data-entry-id={entry.id}
className={clsx(
ROW_STYLES.base,
'h-[26px]',
isSelected ? ROW_STYLES.selected : ROW_STYLES.hover
)}
onClick={(e) => {
e.stopPropagation()
if (hasChildren) {
onToggleNode(nodeId)
}
onSelectEntry(entry)
}}
>
<div className='flex min-w-0 flex-1 items-center gap-[8px]'>
<div
className='flex h-[14px] w-[14px] flex-shrink-0 items-center justify-center rounded-[4px]'
style={{ background: bgColor }}
>
{BlockIcon && <BlockIcon className='h-[9px] w-[9px] text-white' />}
</div>
<span
className={clsx(
'min-w-0 truncate font-medium text-[13px]',
hasError
? 'text-[var(--text-error)]'
: isSelected || isExpanded
? 'text-[var(--text-primary)]'
: 'text-[var(--text-tertiary)] group-hover:text-[var(--text-primary)]'
)}
>
{entry.blockName}
</span>
{hasChildren && (
<ChevronDown
className={clsx(
'h-[8px] w-[8px] flex-shrink-0 text-[var(--text-tertiary)] transition-transform duration-100 group-hover:text-[var(--text-primary)]',
!isExpanded && '-rotate-90'
)}
/>
)}
</div>
<span
className={clsx(
'flex-shrink-0 font-medium text-[13px]',
!isRunning &&
(isCanceled ? 'text-[var(--text-secondary)]' : 'text-[var(--text-tertiary)]')
)}
>
<StatusDisplay
isRunning={isRunning}
isCanceled={isCanceled}
formattedDuration={formatDuration(entry.durationMs, { precision: 2 }) ?? '-'}
/>
</span>
</div>
{/* Nested Child Workflow Blocks (recursive) */}
{isExpanded && hasChildren && (
<div className={ROW_STYLES.nested}>
{children.map((child) => (
<EntryNodeRow
key={child.entry.id}
node={child}
selectedEntryId={selectedEntryId}
onSelectEntry={onSelectEntry}
expandedNodes={expandedNodes}
onToggleNode={onToggleNode}
/>
))}
</div>
)}
</div>
)
}
// Regular block // Regular block
return ( return (
<BlockRow <BlockRow
@@ -648,8 +555,6 @@ export const Terminal = memo(function Terminal() {
const uniqueBlocks = useMemo(() => { const uniqueBlocks = useMemo(() => {
const blocksMap = new Map<string, { blockId: string; blockName: string; blockType: string }>() const blocksMap = new Map<string, { blockId: string; blockName: string; blockType: string }>()
allWorkflowEntries.forEach((entry) => { allWorkflowEntries.forEach((entry) => {
// Skip child workflow entries — they use synthetic IDs and shouldn't appear in filters
if (entry.parentWorkflowBlockId) return
if (!blocksMap.has(entry.blockId)) { if (!blocksMap.has(entry.blockId)) {
blocksMap.set(entry.blockId, { blocksMap.set(entry.blockId, {
blockId: entry.blockId, blockId: entry.blockId,
@@ -762,22 +667,19 @@ export const Terminal = memo(function Terminal() {
const newestExec = executionGroups[0] const newestExec = executionGroups[0]
// Collect all expandable node IDs recursively (subflows, iterations, and workflow nodes) // Collect all node IDs that should be expanded (subflows and their iterations)
const nodeIdsToExpand: string[] = [] const nodeIdsToExpand: string[] = []
const collectExpandableNodes = (nodes: EntryNode[]) => { for (const node of newestExec.entryTree) {
for (const node of nodes) { if (node.nodeType === 'subflow' && node.children.length > 0) {
if (node.children.length === 0) continue nodeIdsToExpand.push(node.entry.id)
if ( // Also expand all iteration children
node.nodeType === 'subflow' || for (const iterNode of node.children) {
node.nodeType === 'iteration' || if (iterNode.nodeType === 'iteration') {
node.nodeType === 'workflow' nodeIdsToExpand.push(iterNode.entry.id)
) { }
nodeIdsToExpand.push(node.entry.id)
collectExpandableNodes(node.children)
} }
} }
} }
collectExpandableNodes(newestExec.entryTree)
if (nodeIdsToExpand.length > 0) { if (nodeIdsToExpand.length > 0) {
setExpandedNodes((prev) => { setExpandedNodes((prev) => {

View File

@@ -120,10 +120,10 @@ export function isSubflowBlockType(blockType: string): boolean {
/** /**
* Node type for the tree structure * Node type for the tree structure
*/ */
export type EntryNodeType = 'block' | 'subflow' | 'iteration' | 'workflow' export type EntryNodeType = 'block' | 'subflow' | 'iteration'
/** /**
* Entry node for tree structure - represents a block, subflow, iteration, or workflow * Entry node for tree structure - represents a block, subflow, or iteration
*/ */
export interface EntryNode { export interface EntryNode {
/** The console entry (for blocks) or synthetic entry (for subflows/iterations) */ /** The console entry (for blocks) or synthetic entry (for subflows/iterations) */
@@ -175,17 +175,12 @@ interface IterationGroup {
* Sorts by start time to ensure chronological order. * Sorts by start time to ensure chronological order.
*/ */
function buildEntryTree(entries: ConsoleEntry[]): EntryNode[] { function buildEntryTree(entries: ConsoleEntry[]): EntryNode[] {
// Separate regular blocks from iteration entries and child workflow entries // Separate regular blocks from iteration entries
const regularBlocks: ConsoleEntry[] = [] const regularBlocks: ConsoleEntry[] = []
const iterationEntries: ConsoleEntry[] = [] const iterationEntries: ConsoleEntry[] = []
const childWorkflowEntries = new Map<string, ConsoleEntry[]>()
for (const entry of entries) { for (const entry of entries) {
if (entry.parentWorkflowBlockId) { if (entry.iterationType && entry.iterationCurrent !== undefined) {
const existing = childWorkflowEntries.get(entry.parentWorkflowBlockId) || []
existing.push(entry)
childWorkflowEntries.set(entry.parentWorkflowBlockId, existing)
} else if (entry.iterationType && entry.iterationCurrent !== undefined) {
iterationEntries.push(entry) iterationEntries.push(entry)
} else { } else {
regularBlocks.push(entry) regularBlocks.push(entry)
@@ -343,53 +338,12 @@ function buildEntryTree(entries: ConsoleEntry[]): EntryNode[] {
}) })
} }
/** // Build nodes for regular blocks
* Recursively builds child nodes for workflow blocks. const regularNodes: EntryNode[] = regularBlocks.map((entry) => ({
* Handles multi-level nesting where a child workflow block itself has children. entry,
*/ children: [],
const buildWorkflowChildNodes = (parentBlockId: string): EntryNode[] => { nodeType: 'block' as const,
const childEntries = childWorkflowEntries.get(parentBlockId) }))
if (!childEntries || childEntries.length === 0) return []
childEntries.sort((a, b) => {
const aTime = new Date(a.startedAt || a.timestamp).getTime()
const bTime = new Date(b.startedAt || b.timestamp).getTime()
return aTime - bTime
})
return childEntries.map((child) => {
const nestedChildren = buildWorkflowChildNodes(child.blockId)
if (nestedChildren.length > 0) {
return {
entry: child,
children: nestedChildren,
nodeType: 'workflow' as const,
}
}
return {
entry: child,
children: [],
nodeType: 'block' as const,
}
})
}
// Build nodes for regular blocks, promoting workflow blocks with children to 'workflow' nodes
const regularNodes: EntryNode[] = regularBlocks.map((entry) => {
const childNodes = buildWorkflowChildNodes(entry.blockId)
if (childNodes.length > 0) {
return {
entry,
children: childNodes,
nodeType: 'workflow' as const,
}
}
return {
entry,
children: [],
nodeType: 'block' as const,
}
})
// Combine all nodes and sort by executionOrder ascending (oldest first, top-down) // Combine all nodes and sort by executionOrder ascending (oldest first, top-down)
const allNodes = [...subflowNodes, ...regularNodes] const allNodes = [...subflowNodes, ...regularNodes]

View File

@@ -106,6 +106,31 @@ const isPlainObject = (value: unknown): value is Record<string, unknown> => {
return typeof value === 'object' && value !== null && !Array.isArray(value) return typeof value === 'object' && value !== null && !Array.isArray(value)
} }
/**
* Parse subblock values that may arrive as JSON strings or already-materialized arrays.
*/
const parseStructuredArrayValue = (value: unknown): Array<Record<string, unknown>> | null => {
if (Array.isArray(value)) {
return value.filter(
(item): item is Record<string, unknown> => typeof item === 'object' && item !== null
)
}
if (typeof value !== 'string') {
return null
}
try {
const parsed = JSON.parse(value)
if (!Array.isArray(parsed)) {
return null
}
return parsed.filter(
(item): item is Record<string, unknown> => typeof item === 'object' && item !== null
)
} catch {
return null
}
}
/** /**
* Type guard for variable assignments array * Type guard for variable assignments array
*/ */
@@ -961,25 +986,21 @@ export const WorkflowBlock = memo(function WorkflowBlock({
if (type !== 'condition') return [] as { id: string; title: string; value: string }[] if (type !== 'condition') return [] as { id: string; title: string; value: string }[]
const conditionsValue = subBlockState.conditions?.value const conditionsValue = subBlockState.conditions?.value
const raw = typeof conditionsValue === 'string' ? conditionsValue : undefined const parsed = parseStructuredArrayValue(conditionsValue)
if (parsed && parsed.length > 0) {
try { return parsed.map((item, index) => {
if (raw) { const conditionId = typeof item.id === 'string' ? item.id : `${id}-cond-${index}`
const parsed = JSON.parse(raw) as unknown const title = index === 0 ? 'if' : index === parsed.length - 1 ? 'else' : 'else if'
if (Array.isArray(parsed)) { return {
return parsed.map((item: unknown, index: number) => { id: conditionId,
const conditionItem = item as { id?: string; value?: unknown } title,
const title = index === 0 ? 'if' : index === parsed.length - 1 ? 'else' : 'else if' value: typeof item.value === 'string' ? item.value : '',
return {
id: conditionItem?.id ?? `${id}-cond-${index}`,
title,
value: typeof conditionItem?.value === 'string' ? conditionItem.value : '',
}
})
} }
} })
} catch (error) { }
logger.warn('Failed to parse condition subblock value', { error, blockId: id })
if (typeof conditionsValue === 'string' && conditionsValue.trim()) {
logger.warn('Failed to parse condition subblock value', { blockId: id })
} }
return [ return [
@@ -997,24 +1018,16 @@ export const WorkflowBlock = memo(function WorkflowBlock({
if (type !== 'router_v2') return [] as { id: string; value: string }[] if (type !== 'router_v2') return [] as { id: string; value: string }[]
const routesValue = subBlockState.routes?.value const routesValue = subBlockState.routes?.value
const raw = typeof routesValue === 'string' ? routesValue : undefined const parsed = parseStructuredArrayValue(routesValue)
if (parsed && parsed.length > 0) {
return parsed.map((item, index) => ({
id: typeof item.id === 'string' ? item.id : `${id}-route${index + 1}`,
value: typeof item.value === 'string' ? item.value : '',
}))
}
try { if (typeof routesValue === 'string' && routesValue.trim()) {
if (raw) { logger.warn('Failed to parse router routes value', { blockId: id })
const parsed = JSON.parse(raw) as unknown
if (Array.isArray(parsed)) {
return parsed.map((item: unknown, index: number) => {
const routeItem = item as { id?: string; value?: string }
return {
// Use stable ID format that matches ConditionInput's generateStableId
id: routeItem?.id ?? `${id}-route${index + 1}`,
value: routeItem?.value ?? '',
}
})
}
}
} catch (error) {
logger.warn('Failed to parse router routes value', { error, blockId: id })
} }
// Fallback must match ConditionInput's default: generateStableId(blockId, 'route1') = `${blockId}-route1` // Fallback must match ConditionInput's default: generateStableId(blockId, 'route1') = `${blockId}-route1`

View File

@@ -1,4 +1,4 @@
import { useCallback, useEffect, useRef, useState } from 'react' import { useCallback, useRef, useState } from 'react'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { useQueryClient } from '@tanstack/react-query' import { useQueryClient } from '@tanstack/react-query'
import { v4 as uuidv4 } from 'uuid' import { v4 as uuidv4 } from 'uuid'
@@ -38,11 +38,7 @@ import { useCurrentWorkflowExecution, useExecutionStore } from '@/stores/executi
import { useNotificationStore } from '@/stores/notifications' import { useNotificationStore } from '@/stores/notifications'
import { useVariablesStore } from '@/stores/panel' import { useVariablesStore } from '@/stores/panel'
import { useEnvironmentStore } from '@/stores/settings/environment' import { useEnvironmentStore } from '@/stores/settings/environment'
import { import { useTerminalConsoleStore } from '@/stores/terminal'
extractChildWorkflowEntries,
hasChildTraceSpans,
useTerminalConsoleStore,
} from '@/stores/terminal'
import { useWorkflowDiffStore } from '@/stores/workflow-diff' import { useWorkflowDiffStore } from '@/stores/workflow-diff'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store' import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import { mergeSubblockState } from '@/stores/workflows/utils' import { mergeSubblockState } from '@/stores/workflows/utils'
@@ -50,13 +46,7 @@ import { useWorkflowStore } from '@/stores/workflows/workflow/store'
const logger = createLogger('useWorkflowExecution') const logger = createLogger('useWorkflowExecution')
/** // Debug state validation result
* Module-level Set tracking which workflows have an active reconnection effect.
* Prevents multiple hook instances (from different components) from starting
* concurrent reconnection streams for the same workflow during the same mount cycle.
*/
const activeReconnections = new Set<string>()
interface DebugValidationResult { interface DebugValidationResult {
isValid: boolean isValid: boolean
error?: string error?: string
@@ -64,10 +54,9 @@ interface DebugValidationResult {
interface BlockEventHandlerConfig { interface BlockEventHandlerConfig {
workflowId?: string workflowId?: string
executionIdRef: { current: string } executionId?: string
workflowEdges: Array<{ id: string; target: string; sourceHandle?: string | null }> workflowEdges: Array<{ id: string; target: string; sourceHandle?: string | null }>
activeBlocksSet: Set<string> activeBlocksSet: Set<string>
activeBlockRefCounts: Map<string, number>
accumulatedBlockLogs: BlockLog[] accumulatedBlockLogs: BlockLog[]
accumulatedBlockStates: Map<string, BlockState> accumulatedBlockStates: Map<string, BlockState>
executedBlockIds: Set<string> executedBlockIds: Set<string>
@@ -119,15 +108,12 @@ export function useWorkflowExecution() {
const queryClient = useQueryClient() const queryClient = useQueryClient()
const currentWorkflow = useCurrentWorkflow() const currentWorkflow = useCurrentWorkflow()
const { activeWorkflowId, workflows } = useWorkflowRegistry() const { activeWorkflowId, workflows } = useWorkflowRegistry()
const { toggleConsole, addConsole, updateConsole, cancelRunningEntries, clearExecutionEntries } = const { toggleConsole, addConsole, updateConsole, cancelRunningEntries } =
useTerminalConsoleStore() useTerminalConsoleStore()
const hasHydrated = useTerminalConsoleStore((s) => s._hasHydrated)
const { getAllVariables } = useEnvironmentStore() const { getAllVariables } = useEnvironmentStore()
const { getVariablesByWorkflowId, variables } = useVariablesStore() const { getVariablesByWorkflowId, variables } = useVariablesStore()
const { isExecuting, isDebugging, pendingBlocks, executor, debugContext } = const { isExecuting, isDebugging, pendingBlocks, executor, debugContext } =
useCurrentWorkflowExecution() useCurrentWorkflowExecution()
const setCurrentExecutionId = useExecutionStore((s) => s.setCurrentExecutionId)
const getCurrentExecutionId = useExecutionStore((s) => s.getCurrentExecutionId)
const setIsExecuting = useExecutionStore((s) => s.setIsExecuting) const setIsExecuting = useExecutionStore((s) => s.setIsExecuting)
const setIsDebugging = useExecutionStore((s) => s.setIsDebugging) const setIsDebugging = useExecutionStore((s) => s.setIsDebugging)
const setPendingBlocks = useExecutionStore((s) => s.setPendingBlocks) const setPendingBlocks = useExecutionStore((s) => s.setPendingBlocks)
@@ -311,10 +297,9 @@ export function useWorkflowExecution() {
(config: BlockEventHandlerConfig) => { (config: BlockEventHandlerConfig) => {
const { const {
workflowId, workflowId,
executionIdRef, executionId,
workflowEdges, workflowEdges,
activeBlocksSet, activeBlocksSet,
activeBlockRefCounts,
accumulatedBlockLogs, accumulatedBlockLogs,
accumulatedBlockStates, accumulatedBlockStates,
executedBlockIds, executedBlockIds,
@@ -323,29 +308,12 @@ export function useWorkflowExecution() {
onBlockCompleteCallback, onBlockCompleteCallback,
} = config } = config
/** Returns true if this execution was cancelled or superseded by another run. */
const isStaleExecution = () =>
!!(
workflowId &&
executionIdRef.current &&
useExecutionStore.getState().getCurrentExecutionId(workflowId) !== executionIdRef.current
)
const updateActiveBlocks = (blockId: string, isActive: boolean) => { const updateActiveBlocks = (blockId: string, isActive: boolean) => {
if (!workflowId) return if (!workflowId) return
if (isActive) { if (isActive) {
const count = activeBlockRefCounts.get(blockId) ?? 0
activeBlockRefCounts.set(blockId, count + 1)
activeBlocksSet.add(blockId) activeBlocksSet.add(blockId)
} else { } else {
const count = activeBlockRefCounts.get(blockId) ?? 1 activeBlocksSet.delete(blockId)
const next = count - 1
if (next <= 0) {
activeBlockRefCounts.delete(blockId)
activeBlocksSet.delete(blockId)
} else {
activeBlockRefCounts.set(blockId, next)
}
} }
setActiveBlocks(workflowId, new Set(activeBlocksSet)) setActiveBlocks(workflowId, new Set(activeBlocksSet))
} }
@@ -392,7 +360,7 @@ export function useWorkflowExecution() {
endedAt: data.endedAt, endedAt: data.endedAt,
workflowId, workflowId,
blockId: data.blockId, blockId: data.blockId,
executionId: executionIdRef.current, executionId,
blockName: data.blockName || 'Unknown Block', blockName: data.blockName || 'Unknown Block',
blockType: data.blockType || 'unknown', blockType: data.blockType || 'unknown',
iterationCurrent: data.iterationCurrent, iterationCurrent: data.iterationCurrent,
@@ -415,7 +383,7 @@ export function useWorkflowExecution() {
endedAt: data.endedAt, endedAt: data.endedAt,
workflowId, workflowId,
blockId: data.blockId, blockId: data.blockId,
executionId: executionIdRef.current, executionId,
blockName: data.blockName || 'Unknown Block', blockName: data.blockName || 'Unknown Block',
blockType: data.blockType || 'unknown', blockType: data.blockType || 'unknown',
iterationCurrent: data.iterationCurrent, iterationCurrent: data.iterationCurrent,
@@ -442,7 +410,7 @@ export function useWorkflowExecution() {
iterationType: data.iterationType, iterationType: data.iterationType,
iterationContainerId: data.iterationContainerId, iterationContainerId: data.iterationContainerId,
}, },
executionIdRef.current executionId
) )
} }
@@ -464,12 +432,11 @@ export function useWorkflowExecution() {
iterationType: data.iterationType, iterationType: data.iterationType,
iterationContainerId: data.iterationContainerId, iterationContainerId: data.iterationContainerId,
}, },
executionIdRef.current executionId
) )
} }
const onBlockStarted = (data: BlockStartedData) => { const onBlockStarted = (data: BlockStartedData) => {
if (isStaleExecution()) return
updateActiveBlocks(data.blockId, true) updateActiveBlocks(data.blockId, true)
markIncomingEdges(data.blockId) markIncomingEdges(data.blockId)
@@ -486,7 +453,7 @@ export function useWorkflowExecution() {
endedAt: undefined, endedAt: undefined,
workflowId, workflowId,
blockId: data.blockId, blockId: data.blockId,
executionId: executionIdRef.current, executionId,
blockName: data.blockName || 'Unknown Block', blockName: data.blockName || 'Unknown Block',
blockType: data.blockType || 'unknown', blockType: data.blockType || 'unknown',
isRunning: true, isRunning: true,
@@ -498,7 +465,6 @@ export function useWorkflowExecution() {
} }
const onBlockCompleted = (data: BlockCompletedData) => { const onBlockCompleted = (data: BlockCompletedData) => {
if (isStaleExecution()) return
updateActiveBlocks(data.blockId, false) updateActiveBlocks(data.blockId, false)
if (workflowId) setBlockRunStatus(workflowId, data.blockId, 'success') if (workflowId) setBlockRunStatus(workflowId, data.blockId, 'success')
@@ -521,20 +487,6 @@ export function useWorkflowExecution() {
addConsoleEntry(data, data.output as NormalizedBlockOutput) addConsoleEntry(data, data.output as NormalizedBlockOutput)
} }
// Extract child workflow trace spans into separate console entries
if (data.blockType === 'workflow' && hasChildTraceSpans(data.output)) {
const childEntries = extractChildWorkflowEntries({
parentBlockId: data.blockId,
executionId: executionIdRef.current,
executionOrder: data.executionOrder,
workflowId: workflowId!,
childTraceSpans: data.output.childTraceSpans,
})
for (const entry of childEntries) {
addConsole(entry)
}
}
if (onBlockCompleteCallback) { if (onBlockCompleteCallback) {
onBlockCompleteCallback(data.blockId, data.output).catch((error) => { onBlockCompleteCallback(data.blockId, data.output).catch((error) => {
logger.error('Error in onBlockComplete callback:', error) logger.error('Error in onBlockComplete callback:', error)
@@ -543,7 +495,6 @@ export function useWorkflowExecution() {
} }
const onBlockError = (data: BlockErrorData) => { const onBlockError = (data: BlockErrorData) => {
if (isStaleExecution()) return
updateActiveBlocks(data.blockId, false) updateActiveBlocks(data.blockId, false)
if (workflowId) setBlockRunStatus(workflowId, data.blockId, 'error') if (workflowId) setBlockRunStatus(workflowId, data.blockId, 'error')
@@ -951,6 +902,10 @@ export function useWorkflowExecution() {
// Update block logs with actual stream completion times // Update block logs with actual stream completion times
if (result.logs && streamCompletionTimes.size > 0) { if (result.logs && streamCompletionTimes.size > 0) {
const streamCompletionEndTime = new Date(
Math.max(...Array.from(streamCompletionTimes.values()))
).toISOString()
result.logs.forEach((log: BlockLog) => { result.logs.forEach((log: BlockLog) => {
if (streamCompletionTimes.has(log.blockId)) { if (streamCompletionTimes.has(log.blockId)) {
const completionTime = streamCompletionTimes.get(log.blockId)! const completionTime = streamCompletionTimes.get(log.blockId)!
@@ -1032,6 +987,7 @@ export function useWorkflowExecution() {
return { success: true, stream } return { success: true, stream }
} }
// For manual (non-chat) execution
const manualExecutionId = uuidv4() const manualExecutionId = uuidv4()
try { try {
const result = await executeWorkflow( const result = await executeWorkflow(
@@ -1046,10 +1002,29 @@ export function useWorkflowExecution() {
if (result.metadata.pendingBlocks) { if (result.metadata.pendingBlocks) {
setPendingBlocks(activeWorkflowId, result.metadata.pendingBlocks) setPendingBlocks(activeWorkflowId, result.metadata.pendingBlocks)
} }
} else if (result && 'success' in result) {
setExecutionResult(result)
// Reset execution state after successful non-debug execution
setIsExecuting(activeWorkflowId, false)
setIsDebugging(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
if (isChatExecution) {
if (!result.metadata) {
result.metadata = { duration: 0, startTime: new Date().toISOString() }
}
;(result.metadata as any).source = 'chat'
}
// Invalidate subscription queries to update usage
setTimeout(() => {
queryClient.invalidateQueries({ queryKey: subscriptionKeys.all })
}, 1000)
} }
return result return result
} catch (error: any) { } catch (error: any) {
const errorResult = handleExecutionError(error, { executionId: manualExecutionId }) const errorResult = handleExecutionError(error, { executionId: manualExecutionId })
// Note: Error logs are already persisted server-side via execution-core.ts
return errorResult return errorResult
} }
}, },
@@ -1300,7 +1275,7 @@ export function useWorkflowExecution() {
if (activeWorkflowId) { if (activeWorkflowId) {
logger.info('Using server-side executor') logger.info('Using server-side executor')
const executionIdRef = { current: '' } const executionId = uuidv4()
let executionResult: ExecutionResult = { let executionResult: ExecutionResult = {
success: false, success: false,
@@ -1309,7 +1284,6 @@ export function useWorkflowExecution() {
} }
const activeBlocksSet = new Set<string>() const activeBlocksSet = new Set<string>()
const activeBlockRefCounts = new Map<string, number>()
const streamedContent = new Map<string, string>() const streamedContent = new Map<string, string>()
const accumulatedBlockLogs: BlockLog[] = [] const accumulatedBlockLogs: BlockLog[] = []
const accumulatedBlockStates = new Map<string, BlockState>() const accumulatedBlockStates = new Map<string, BlockState>()
@@ -1319,10 +1293,9 @@ export function useWorkflowExecution() {
try { try {
const blockHandlers = buildBlockEventHandlers({ const blockHandlers = buildBlockEventHandlers({
workflowId: activeWorkflowId, workflowId: activeWorkflowId,
executionIdRef, executionId,
workflowEdges, workflowEdges,
activeBlocksSet, activeBlocksSet,
activeBlockRefCounts,
accumulatedBlockLogs, accumulatedBlockLogs,
accumulatedBlockStates, accumulatedBlockStates,
executedBlockIds, executedBlockIds,
@@ -1353,10 +1326,6 @@ export function useWorkflowExecution() {
loops: clientWorkflowState.loops, loops: clientWorkflowState.loops,
parallels: clientWorkflowState.parallels, parallels: clientWorkflowState.parallels,
}, },
onExecutionId: (id) => {
executionIdRef.current = id
setCurrentExecutionId(activeWorkflowId, id)
},
callbacks: { callbacks: {
onExecutionStarted: (data) => { onExecutionStarted: (data) => {
logger.info('Server execution started:', data) logger.info('Server execution started:', data)
@@ -1399,18 +1368,6 @@ export function useWorkflowExecution() {
}, },
onExecutionCompleted: (data) => { onExecutionCompleted: (data) => {
if (
activeWorkflowId &&
executionIdRef.current &&
useExecutionStore.getState().getCurrentExecutionId(activeWorkflowId) !==
executionIdRef.current
)
return
if (activeWorkflowId) {
setCurrentExecutionId(activeWorkflowId, null)
}
executionResult = { executionResult = {
success: data.success, success: data.success,
output: data.output, output: data.output,
@@ -1468,33 +1425,9 @@ export function useWorkflowExecution() {
}) })
} }
} }
const workflowExecState = activeWorkflowId
? useExecutionStore.getState().getWorkflowExecution(activeWorkflowId)
: null
if (activeWorkflowId && !workflowExecState?.isDebugging) {
setExecutionResult(executionResult)
setIsExecuting(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
setTimeout(() => {
queryClient.invalidateQueries({ queryKey: subscriptionKeys.all })
}, 1000)
}
}, },
onExecutionError: (data) => { onExecutionError: (data) => {
if (
activeWorkflowId &&
executionIdRef.current &&
useExecutionStore.getState().getCurrentExecutionId(activeWorkflowId) !==
executionIdRef.current
)
return
if (activeWorkflowId) {
setCurrentExecutionId(activeWorkflowId, null)
}
executionResult = { executionResult = {
success: false, success: false,
output: {}, output: {},
@@ -1508,53 +1441,43 @@ export function useWorkflowExecution() {
const isPreExecutionError = accumulatedBlockLogs.length === 0 const isPreExecutionError = accumulatedBlockLogs.length === 0
handleExecutionErrorConsole({ handleExecutionErrorConsole({
workflowId: activeWorkflowId, workflowId: activeWorkflowId,
executionId: executionIdRef.current, executionId,
error: data.error, error: data.error,
durationMs: data.duration, durationMs: data.duration,
blockLogs: accumulatedBlockLogs, blockLogs: accumulatedBlockLogs,
isPreExecutionError, isPreExecutionError,
}) })
if (activeWorkflowId) {
setIsExecuting(activeWorkflowId, false)
setIsDebugging(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
}
}, },
onExecutionCancelled: (data) => { onExecutionCancelled: (data) => {
if (
activeWorkflowId &&
executionIdRef.current &&
useExecutionStore.getState().getCurrentExecutionId(activeWorkflowId) !==
executionIdRef.current
)
return
if (activeWorkflowId) {
setCurrentExecutionId(activeWorkflowId, null)
}
handleExecutionCancelledConsole({ handleExecutionCancelledConsole({
workflowId: activeWorkflowId, workflowId: activeWorkflowId,
executionId: executionIdRef.current, executionId,
durationMs: data?.duration, durationMs: data?.duration,
}) })
if (activeWorkflowId) {
setIsExecuting(activeWorkflowId, false)
setIsDebugging(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
}
}, },
}, },
}) })
return executionResult return executionResult
} catch (error: any) { } catch (error: any) {
// Don't log abort errors - they're intentional user actions
if (error.name === 'AbortError' || error.message?.includes('aborted')) { if (error.name === 'AbortError' || error.message?.includes('aborted')) {
logger.info('Execution aborted by user') logger.info('Execution aborted by user')
return executionResult
// Reset execution state
if (activeWorkflowId) {
setIsExecuting(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
}
// Return gracefully without error
return {
success: false,
output: {},
metadata: { duration: 0 },
logs: [],
}
} }
logger.error('Server-side execution failed:', error) logger.error('Server-side execution failed:', error)
@@ -1562,6 +1485,7 @@ export function useWorkflowExecution() {
} }
} }
// Fallback: should never reach here
throw new Error('Server-side execution is required') throw new Error('Server-side execution is required')
} }
@@ -1793,28 +1717,25 @@ export function useWorkflowExecution() {
* Handles cancelling the current workflow execution * Handles cancelling the current workflow execution
*/ */
const handleCancelExecution = useCallback(() => { const handleCancelExecution = useCallback(() => {
if (!activeWorkflowId) return
logger.info('Workflow execution cancellation requested') logger.info('Workflow execution cancellation requested')
const storedExecutionId = getCurrentExecutionId(activeWorkflowId) // Cancel the execution stream for this workflow (server-side)
executionStream.cancel(activeWorkflowId ?? undefined)
if (storedExecutionId) { // Mark current chat execution as superseded so its cleanup won't affect new executions
setCurrentExecutionId(activeWorkflowId, null) currentChatExecutionIdRef.current = null
fetch(`/api/workflows/${activeWorkflowId}/executions/${storedExecutionId}/cancel`, {
method: 'POST', // Mark all running entries as canceled in the terminal
}).catch(() => {}) if (activeWorkflowId) {
handleExecutionCancelledConsole({ cancelRunningEntries(activeWorkflowId)
workflowId: activeWorkflowId,
executionId: storedExecutionId, // Reset execution state - this triggers chat stream cleanup via useEffect in chat.tsx
}) setIsExecuting(activeWorkflowId, false)
setIsDebugging(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
} }
executionStream.cancel(activeWorkflowId) // If in debug mode, also reset debug state
currentChatExecutionIdRef.current = null
setIsExecuting(activeWorkflowId, false)
setIsDebugging(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
if (isDebugging) { if (isDebugging) {
resetDebugState() resetDebugState()
} }
@@ -1826,9 +1747,7 @@ export function useWorkflowExecution() {
setIsDebugging, setIsDebugging,
setActiveBlocks, setActiveBlocks,
activeWorkflowId, activeWorkflowId,
getCurrentExecutionId, cancelRunningEntries,
setCurrentExecutionId,
handleExecutionCancelledConsole,
]) ])
/** /**
@@ -1928,20 +1847,18 @@ export function useWorkflowExecution() {
} }
setIsExecuting(workflowId, true) setIsExecuting(workflowId, true)
const executionIdRef = { current: '' } const executionId = uuidv4()
const accumulatedBlockLogs: BlockLog[] = [] const accumulatedBlockLogs: BlockLog[] = []
const accumulatedBlockStates = new Map<string, BlockState>() const accumulatedBlockStates = new Map<string, BlockState>()
const executedBlockIds = new Set<string>() const executedBlockIds = new Set<string>()
const activeBlocksSet = new Set<string>() const activeBlocksSet = new Set<string>()
const activeBlockRefCounts = new Map<string, number>()
try { try {
const blockHandlers = buildBlockEventHandlers({ const blockHandlers = buildBlockEventHandlers({
workflowId, workflowId,
executionIdRef, executionId,
workflowEdges, workflowEdges,
activeBlocksSet, activeBlocksSet,
activeBlockRefCounts,
accumulatedBlockLogs, accumulatedBlockLogs,
accumulatedBlockStates, accumulatedBlockStates,
executedBlockIds, executedBlockIds,
@@ -1954,10 +1871,6 @@ export function useWorkflowExecution() {
startBlockId: blockId, startBlockId: blockId,
sourceSnapshot: effectiveSnapshot, sourceSnapshot: effectiveSnapshot,
input: workflowInput, input: workflowInput,
onExecutionId: (id) => {
executionIdRef.current = id
setCurrentExecutionId(workflowId, id)
},
callbacks: { callbacks: {
onBlockStarted: blockHandlers.onBlockStarted, onBlockStarted: blockHandlers.onBlockStarted,
onBlockCompleted: blockHandlers.onBlockCompleted, onBlockCompleted: blockHandlers.onBlockCompleted,
@@ -1965,6 +1878,7 @@ export function useWorkflowExecution() {
onExecutionCompleted: (data) => { onExecutionCompleted: (data) => {
if (data.success) { if (data.success) {
// Add the start block (trigger) to executed blocks
executedBlockIds.add(blockId) executedBlockIds.add(blockId)
const mergedBlockStates: Record<string, BlockState> = { const mergedBlockStates: Record<string, BlockState> = {
@@ -1988,10 +1902,6 @@ export function useWorkflowExecution() {
} }
setLastExecutionSnapshot(workflowId, updatedSnapshot) setLastExecutionSnapshot(workflowId, updatedSnapshot)
} }
setCurrentExecutionId(workflowId, null)
setIsExecuting(workflowId, false)
setActiveBlocks(workflowId, new Set())
}, },
onExecutionError: (data) => { onExecutionError: (data) => {
@@ -2011,27 +1921,19 @@ export function useWorkflowExecution() {
handleExecutionErrorConsole({ handleExecutionErrorConsole({
workflowId, workflowId,
executionId: executionIdRef.current, executionId,
error: data.error, error: data.error,
durationMs: data.duration, durationMs: data.duration,
blockLogs: accumulatedBlockLogs, blockLogs: accumulatedBlockLogs,
}) })
setCurrentExecutionId(workflowId, null)
setIsExecuting(workflowId, false)
setActiveBlocks(workflowId, new Set())
}, },
onExecutionCancelled: (data) => { onExecutionCancelled: (data) => {
handleExecutionCancelledConsole({ handleExecutionCancelledConsole({
workflowId, workflowId,
executionId: executionIdRef.current, executionId,
durationMs: data?.duration, durationMs: data?.duration,
}) })
setCurrentExecutionId(workflowId, null)
setIsExecuting(workflowId, false)
setActiveBlocks(workflowId, new Set())
}, },
}, },
}) })
@@ -2040,20 +1942,14 @@ export function useWorkflowExecution() {
logger.error('Run-from-block failed:', error) logger.error('Run-from-block failed:', error)
} }
} finally { } finally {
const currentId = getCurrentExecutionId(workflowId) setIsExecuting(workflowId, false)
if (currentId === null || currentId === executionIdRef.current) { setActiveBlocks(workflowId, new Set())
setCurrentExecutionId(workflowId, null)
setIsExecuting(workflowId, false)
setActiveBlocks(workflowId, new Set())
}
} }
}, },
[ [
getLastExecutionSnapshot, getLastExecutionSnapshot,
setLastExecutionSnapshot, setLastExecutionSnapshot,
clearLastExecutionSnapshot, clearLastExecutionSnapshot,
getCurrentExecutionId,
setCurrentExecutionId,
setIsExecuting, setIsExecuting,
setActiveBlocks, setActiveBlocks,
setBlockRunStatus, setBlockRunStatus,
@@ -2083,215 +1979,29 @@ export function useWorkflowExecution() {
const executionId = uuidv4() const executionId = uuidv4()
try { try {
await executeWorkflow(undefined, undefined, executionId, undefined, 'manual', blockId) const result = await executeWorkflow(
undefined,
undefined,
executionId,
undefined,
'manual',
blockId
)
if (result && 'success' in result) {
setExecutionResult(result)
}
} catch (error) { } catch (error) {
const errorResult = handleExecutionError(error, { executionId }) const errorResult = handleExecutionError(error, { executionId })
return errorResult return errorResult
} finally { } finally {
setCurrentExecutionId(workflowId, null)
setIsExecuting(workflowId, false) setIsExecuting(workflowId, false)
setIsDebugging(workflowId, false) setIsDebugging(workflowId, false)
setActiveBlocks(workflowId, new Set()) setActiveBlocks(workflowId, new Set())
} }
}, },
[ [activeWorkflowId, setExecutionResult, setIsExecuting, setIsDebugging, setActiveBlocks]
activeWorkflowId,
setCurrentExecutionId,
setExecutionResult,
setIsExecuting,
setIsDebugging,
setActiveBlocks,
]
) )
useEffect(() => {
if (!activeWorkflowId || !hasHydrated) return
const entries = useTerminalConsoleStore.getState().entries
const runningEntries = entries.filter(
(e) => e.isRunning && e.workflowId === activeWorkflowId && e.executionId
)
if (runningEntries.length === 0) return
if (activeReconnections.has(activeWorkflowId)) return
activeReconnections.add(activeWorkflowId)
executionStream.cancel(activeWorkflowId)
const sorted = [...runningEntries].sort((a, b) => {
const aTime = a.startedAt ? new Date(a.startedAt).getTime() : 0
const bTime = b.startedAt ? new Date(b.startedAt).getTime() : 0
return bTime - aTime
})
const executionId = sorted[0].executionId!
const otherExecutionIds = new Set(
sorted.filter((e) => e.executionId !== executionId).map((e) => e.executionId!)
)
if (otherExecutionIds.size > 0) {
cancelRunningEntries(activeWorkflowId)
}
setCurrentExecutionId(activeWorkflowId, executionId)
setIsExecuting(activeWorkflowId, true)
const workflowEdges = useWorkflowStore.getState().edges
const activeBlocksSet = new Set<string>()
const activeBlockRefCounts = new Map<string, number>()
const accumulatedBlockLogs: BlockLog[] = []
const accumulatedBlockStates = new Map<string, BlockState>()
const executedBlockIds = new Set<string>()
const executionIdRef = { current: executionId }
const handlers = buildBlockEventHandlers({
workflowId: activeWorkflowId,
executionIdRef,
workflowEdges,
activeBlocksSet,
activeBlockRefCounts,
accumulatedBlockLogs,
accumulatedBlockStates,
executedBlockIds,
consoleMode: 'update',
includeStartConsoleEntry: true,
})
const originalEntries = entries
.filter((e) => e.executionId === executionId)
.map((e) => ({ ...e }))
let cleared = false
let reconnectionComplete = false
let cleanupRan = false
const clearOnce = () => {
if (!cleared) {
cleared = true
clearExecutionEntries(executionId)
}
}
const reconnectWorkflowId = activeWorkflowId
executionStream
.reconnect({
workflowId: reconnectWorkflowId,
executionId,
callbacks: {
onBlockStarted: (data) => {
clearOnce()
handlers.onBlockStarted(data)
},
onBlockCompleted: (data) => {
clearOnce()
handlers.onBlockCompleted(data)
},
onBlockError: (data) => {
clearOnce()
handlers.onBlockError(data)
},
onExecutionCompleted: () => {
const currentId = useExecutionStore
.getState()
.getCurrentExecutionId(reconnectWorkflowId)
if (currentId !== executionId) {
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
return
}
clearOnce()
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
setCurrentExecutionId(reconnectWorkflowId, null)
setIsExecuting(reconnectWorkflowId, false)
setActiveBlocks(reconnectWorkflowId, new Set())
},
onExecutionError: (data) => {
const currentId = useExecutionStore
.getState()
.getCurrentExecutionId(reconnectWorkflowId)
if (currentId !== executionId) {
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
return
}
clearOnce()
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
setCurrentExecutionId(reconnectWorkflowId, null)
setIsExecuting(reconnectWorkflowId, false)
setActiveBlocks(reconnectWorkflowId, new Set())
handleExecutionErrorConsole({
workflowId: reconnectWorkflowId,
executionId,
error: data.error,
blockLogs: accumulatedBlockLogs,
})
},
onExecutionCancelled: () => {
const currentId = useExecutionStore
.getState()
.getCurrentExecutionId(reconnectWorkflowId)
if (currentId !== executionId) {
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
return
}
clearOnce()
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
setCurrentExecutionId(reconnectWorkflowId, null)
setIsExecuting(reconnectWorkflowId, false)
setActiveBlocks(reconnectWorkflowId, new Set())
handleExecutionCancelledConsole({
workflowId: reconnectWorkflowId,
executionId,
})
},
},
})
.catch((error) => {
logger.warn('Execution reconnection failed', { executionId, error })
})
.finally(() => {
if (reconnectionComplete || cleanupRan) return
const currentId = useExecutionStore.getState().getCurrentExecutionId(reconnectWorkflowId)
if (currentId !== executionId) return
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
clearExecutionEntries(executionId)
for (const entry of originalEntries) {
addConsole({
workflowId: entry.workflowId,
blockId: entry.blockId,
blockName: entry.blockName,
blockType: entry.blockType,
executionId: entry.executionId,
executionOrder: entry.executionOrder,
isRunning: false,
warning: 'Execution result unavailable — check the logs page',
})
}
setCurrentExecutionId(reconnectWorkflowId, null)
setIsExecuting(reconnectWorkflowId, false)
setActiveBlocks(reconnectWorkflowId, new Set())
})
return () => {
cleanupRan = true
executionStream.cancel(reconnectWorkflowId)
activeReconnections.delete(reconnectWorkflowId)
if (cleared && !reconnectionComplete) {
clearExecutionEntries(executionId)
for (const entry of originalEntries) {
addConsole(entry)
}
}
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [activeWorkflowId, hasHydrated])
return { return {
isExecuting, isExecuting,
isDebugging, isDebugging,

View File

@@ -1,11 +1,7 @@
import { v4 as uuidv4 } from 'uuid' import { v4 as uuidv4 } from 'uuid'
import type { ExecutionResult, StreamingExecution } from '@/executor/types' import type { ExecutionResult, StreamingExecution } from '@/executor/types'
import { useExecutionStore } from '@/stores/execution' import { useExecutionStore } from '@/stores/execution'
import { import { useTerminalConsoleStore } from '@/stores/terminal'
extractChildWorkflowEntries,
hasChildTraceSpans,
useTerminalConsoleStore,
} from '@/stores/terminal'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store' import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import { useWorkflowStore } from '@/stores/workflows/workflow/store' import { useWorkflowStore } from '@/stores/workflows/workflow/store'
@@ -43,7 +39,6 @@ export async function executeWorkflowWithFullLogging(
const workflowEdges = useWorkflowStore.getState().edges const workflowEdges = useWorkflowStore.getState().edges
const activeBlocksSet = new Set<string>() const activeBlocksSet = new Set<string>()
const activeBlockRefCounts = new Map<string, number>()
const payload: any = { const payload: any = {
input: options.workflowInput, input: options.workflowInput,
@@ -108,8 +103,6 @@ export async function executeWorkflowWithFullLogging(
switch (event.type) { switch (event.type) {
case 'block:started': { case 'block:started': {
const startCount = activeBlockRefCounts.get(event.data.blockId) ?? 0
activeBlockRefCounts.set(event.data.blockId, startCount + 1)
activeBlocksSet.add(event.data.blockId) activeBlocksSet.add(event.data.blockId)
setActiveBlocks(wfId, new Set(activeBlocksSet)) setActiveBlocks(wfId, new Set(activeBlocksSet))
@@ -122,14 +115,8 @@ export async function executeWorkflowWithFullLogging(
break break
} }
case 'block:completed': { case 'block:completed':
const completeCount = activeBlockRefCounts.get(event.data.blockId) ?? 1 activeBlocksSet.delete(event.data.blockId)
if (completeCount <= 1) {
activeBlockRefCounts.delete(event.data.blockId)
activeBlocksSet.delete(event.data.blockId)
} else {
activeBlockRefCounts.set(event.data.blockId, completeCount - 1)
}
setActiveBlocks(wfId, new Set(activeBlocksSet)) setActiveBlocks(wfId, new Set(activeBlocksSet))
setBlockRunStatus(wfId, event.data.blockId, 'success') setBlockRunStatus(wfId, event.data.blockId, 'success')
@@ -153,34 +140,13 @@ export async function executeWorkflowWithFullLogging(
iterationContainerId: event.data.iterationContainerId, iterationContainerId: event.data.iterationContainerId,
}) })
// Extract child workflow trace spans into separate console entries
if (event.data.blockType === 'workflow' && hasChildTraceSpans(event.data.output)) {
const childEntries = extractChildWorkflowEntries({
parentBlockId: event.data.blockId,
executionId,
executionOrder: event.data.executionOrder,
workflowId: activeWorkflowId,
childTraceSpans: event.data.output.childTraceSpans,
})
for (const entry of childEntries) {
addConsole(entry)
}
}
if (options.onBlockComplete) { if (options.onBlockComplete) {
options.onBlockComplete(event.data.blockId, event.data.output).catch(() => {}) options.onBlockComplete(event.data.blockId, event.data.output).catch(() => {})
} }
break break
}
case 'block:error': { case 'block:error':
const errorCount = activeBlockRefCounts.get(event.data.blockId) ?? 1 activeBlocksSet.delete(event.data.blockId)
if (errorCount <= 1) {
activeBlockRefCounts.delete(event.data.blockId)
activeBlocksSet.delete(event.data.blockId)
} else {
activeBlockRefCounts.set(event.data.blockId, errorCount - 1)
}
setActiveBlocks(wfId, new Set(activeBlocksSet)) setActiveBlocks(wfId, new Set(activeBlocksSet))
setBlockRunStatus(wfId, event.data.blockId, 'error') setBlockRunStatus(wfId, event.data.blockId, 'error')
@@ -205,7 +171,6 @@ export async function executeWorkflowWithFullLogging(
iterationContainerId: event.data.iterationContainerId, iterationContainerId: event.data.iterationContainerId,
}) })
break break
}
case 'execution:completed': case 'execution:completed':
executionResult = { executionResult = {

View File

@@ -72,6 +72,31 @@ function extractValue(entry: SubBlockValueEntry | unknown): unknown {
return entry return entry
} }
/**
* Parse subblock values that may be JSON strings or already-materialized arrays.
*/
function parseStructuredArrayValue(value: unknown): Array<Record<string, unknown>> | null {
if (Array.isArray(value)) {
return value.filter(
(item): item is Record<string, unknown> => typeof item === 'object' && item !== null
)
}
if (typeof value !== 'string') {
return null
}
try {
const parsed = JSON.parse(value)
if (!Array.isArray(parsed)) {
return null
}
return parsed.filter(
(item): item is Record<string, unknown> => typeof item === 'object' && item !== null
)
} catch {
return null
}
}
interface SubBlockRowProps { interface SubBlockRowProps {
title: string title: string
value?: string value?: string
@@ -347,25 +372,16 @@ function WorkflowPreviewBlockInner({ data }: NodeProps<WorkflowPreviewBlockData>
if (lightweight) return defaultRows if (lightweight) return defaultRows
const conditionsValue = rawValues.conditions const conditionsValue = rawValues.conditions
const raw = typeof conditionsValue === 'string' ? conditionsValue : undefined const parsed = parseStructuredArrayValue(conditionsValue)
if (parsed && parsed.length > 0) {
try { return parsed.map((item, index) => {
if (raw) { const title = index === 0 ? 'if' : index === parsed.length - 1 ? 'else' : 'else if'
const parsed = JSON.parse(raw) as unknown return {
if (Array.isArray(parsed)) { id: typeof item.id === 'string' ? item.id : `cond-${index}`,
return parsed.map((item: unknown, index: number) => { title,
const conditionItem = item as { id?: string; value?: unknown } value: typeof item.value === 'string' ? item.value : '',
const title = index === 0 ? 'if' : index === parsed.length - 1 ? 'else' : 'else if'
return {
id: conditionItem?.id ?? `cond-${index}`,
title,
value: typeof conditionItem?.value === 'string' ? conditionItem.value : '',
}
})
} }
} })
} catch {
/* empty */
} }
return defaultRows return defaultRows
@@ -384,23 +400,12 @@ function WorkflowPreviewBlockInner({ data }: NodeProps<WorkflowPreviewBlockData>
if (lightweight) return defaultRows if (lightweight) return defaultRows
const routesValue = rawValues.routes const routesValue = rawValues.routes
const raw = typeof routesValue === 'string' ? routesValue : undefined const parsed = parseStructuredArrayValue(routesValue)
if (parsed && parsed.length > 0) {
try { return parsed.map((item, index) => ({
if (raw) { id: typeof item.id === 'string' ? item.id : `route${index + 1}`,
const parsed = JSON.parse(raw) as unknown value: typeof item.value === 'string' ? item.value : '',
if (Array.isArray(parsed)) { }))
return parsed.map((item: unknown, index: number) => {
const routeItem = item as { id?: string; value?: string }
return {
id: routeItem?.id ?? `route${index + 1}`,
value: routeItem?.value ?? '',
}
})
}
}
} catch {
/* empty */
} }
return defaultRows return defaultRows

View File

@@ -1,4 +1,3 @@
export { CancelSubscription } from './cancel-subscription' export { CancelSubscription } from './cancel-subscription'
export { CreditBalance } from './credit-balance' export { CreditBalance } from './credit-balance'
export { PlanCard, type PlanCardProps, type PlanFeature } from './plan-card' export { PlanCard, type PlanCardProps, type PlanFeature } from './plan-card'
export { ReferralCode } from './referral-code'

View File

@@ -1,103 +0,0 @@
'use client'
import { useState } from 'react'
import { createLogger } from '@sim/logger'
import { Button, Input, Label } from '@/components/emcn'
const logger = createLogger('ReferralCode')
interface ReferralCodeProps {
onRedeemComplete?: () => void
}
/**
* Inline referral/promo code entry field with redeem button.
* One-time use per account — shows success or "already redeemed" state.
*/
export function ReferralCode({ onRedeemComplete }: ReferralCodeProps) {
const [code, setCode] = useState('')
const [isRedeeming, setIsRedeeming] = useState(false)
const [error, setError] = useState<string | null>(null)
const [success, setSuccess] = useState<{ bonusAmount: number } | null>(null)
const handleRedeem = async () => {
const trimmed = code.trim()
if (!trimmed || isRedeeming) return
setIsRedeeming(true)
setError(null)
try {
const response = await fetch('/api/referral-code/redeem', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ code: trimmed }),
})
const data = await response.json()
if (!response.ok) {
throw new Error(data.error || 'Failed to redeem code')
}
if (data.redeemed) {
setSuccess({ bonusAmount: data.bonusAmount })
setCode('')
onRedeemComplete?.()
} else {
setError(data.error || 'Code could not be redeemed')
}
} catch (err) {
logger.error('Referral code redemption failed', { error: err })
setError(err instanceof Error ? err.message : 'Failed to redeem code')
} finally {
setIsRedeeming(false)
}
}
if (success) {
return (
<div className='flex items-center justify-between'>
<Label>Referral Code</Label>
<span className='text-[12px] text-[var(--text-secondary)]'>
+${success.bonusAmount} credits applied
</span>
</div>
)
}
return (
<div className='flex flex-col'>
<div className='flex items-center justify-between gap-[12px]'>
<Label className='shrink-0'>Referral Code</Label>
<div className='flex items-center gap-[8px]'>
<Input
type='text'
value={code}
onChange={(e) => {
setCode(e.target.value)
setError(null)
}}
onKeyDown={(e) => {
if (e.key === 'Enter') handleRedeem()
}}
placeholder='Enter code'
className='h-[32px] w-[140px] text-[12px]'
disabled={isRedeeming}
/>
<Button
variant='active'
className='h-[32px] shrink-0 rounded-[6px] text-[12px]'
onClick={handleRedeem}
disabled={isRedeeming || !code.trim()}
>
{isRedeeming ? 'Redeeming...' : 'Redeem'}
</Button>
</div>
</div>
<div className='mt-[4px] min-h-[18px] text-right'>
{error && <span className='text-[11px] text-[var(--text-error)]'>{error}</span>}
</div>
</div>
)
}

View File

@@ -17,7 +17,6 @@ import {
CancelSubscription, CancelSubscription,
CreditBalance, CreditBalance,
PlanCard, PlanCard,
ReferralCode,
} from '@/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/subscription/components' } from '@/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/subscription/components'
import { import {
ENTERPRISE_PLAN_FEATURES, ENTERPRISE_PLAN_FEATURES,
@@ -550,10 +549,6 @@ export function Subscription() {
/> />
)} )}
{!subscription.isEnterprise && (
<ReferralCode onRedeemComplete={() => refetchSubscription()} />
)}
{/* Next Billing Date - hidden from team members */} {/* Next Billing Date - hidden from team members */}
{subscription.isPaid && {subscription.isPaid &&
subscriptionData?.data?.periodEnd && subscriptionData?.data?.periodEnd &&

View File

@@ -4,14 +4,12 @@ import { useEffect } from 'react'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { useRouter } from 'next/navigation' import { useRouter } from 'next/navigation'
import { useSession } from '@/lib/auth/auth-client' import { useSession } from '@/lib/auth/auth-client'
import { useReferralAttribution } from '@/hooks/use-referral-attribution'
const logger = createLogger('WorkspacePage') const logger = createLogger('WorkspacePage')
export default function WorkspacePage() { export default function WorkspacePage() {
const router = useRouter() const router = useRouter()
const { data: session, isPending } = useSession() const { data: session, isPending } = useSession()
useReferralAttribution()
useEffect(() => { useEffect(() => {
const redirectToFirstWorkspace = async () => { const redirectToFirstWorkspace = async () => {

View File

@@ -2,10 +2,11 @@ import { createLogger } from '@sim/logger'
import { AgentIcon } from '@/components/icons' import { AgentIcon } from '@/components/icons'
import type { BlockConfig } from '@/blocks/types' import type { BlockConfig } from '@/blocks/types'
import { AuthMode } from '@/blocks/types' import { AuthMode } from '@/blocks/types'
import { getApiKeyCondition, getModelOptions } from '@/blocks/utils' import { getApiKeyCondition } from '@/blocks/utils'
import { import {
getBaseModelProviders, getBaseModelProviders,
getMaxTemperature, getMaxTemperature,
getProviderIcon,
getReasoningEffortValuesForModel, getReasoningEffortValuesForModel,
getThinkingLevelsForModel, getThinkingLevelsForModel,
getVerbosityValuesForModel, getVerbosityValuesForModel,
@@ -17,6 +18,7 @@ import {
providers, providers,
supportsTemperature, supportsTemperature,
} from '@/providers/utils' } from '@/providers/utils'
import { useProvidersStore } from '@/stores/providers'
import type { ToolResponse } from '@/tools/types' import type { ToolResponse } from '@/tools/types'
const logger = createLogger('AgentBlock') const logger = createLogger('AgentBlock')
@@ -119,7 +121,21 @@ Return ONLY the JSON array.`,
placeholder: 'Type or select a model...', placeholder: 'Type or select a model...',
required: true, required: true,
defaultValue: 'claude-sonnet-4-5', defaultValue: 'claude-sonnet-4-5',
options: getModelOptions, options: () => {
const providersState = useProvidersStore.getState()
const baseModels = providersState.providers.base.models
const ollamaModels = providersState.providers.ollama.models
const vllmModels = providersState.providers.vllm.models
const openrouterModels = providersState.providers.openrouter.models
const allModels = Array.from(
new Set([...baseModels, ...ollamaModels, ...vllmModels, ...openrouterModels])
)
return allModels.map((model) => {
const icon = getProviderIcon(model)
return { label: model, id: model, ...(icon && { icon }) }
})
},
}, },
{ {
id: 'vertexCredential', id: 'vertexCredential',

View File

@@ -1,13 +1,10 @@
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { ChartBarIcon } from '@/components/icons' import { ChartBarIcon } from '@/components/icons'
import type { BlockConfig, ParamType } from '@/blocks/types' import type { BlockConfig, ParamType } from '@/blocks/types'
import { import { getProviderCredentialSubBlocks, PROVIDER_CREDENTIAL_INPUTS } from '@/blocks/utils'
getModelOptions,
getProviderCredentialSubBlocks,
PROVIDER_CREDENTIAL_INPUTS,
} from '@/blocks/utils'
import type { ProviderId } from '@/providers/types' import type { ProviderId } from '@/providers/types'
import { getBaseModelProviders } from '@/providers/utils' import { getBaseModelProviders, getProviderIcon } from '@/providers/utils'
import { useProvidersStore } from '@/stores/providers/store'
import type { ToolResponse } from '@/tools/types' import type { ToolResponse } from '@/tools/types'
const logger = createLogger('EvaluatorBlock') const logger = createLogger('EvaluatorBlock')
@@ -178,7 +175,21 @@ export const EvaluatorBlock: BlockConfig<EvaluatorResponse> = {
placeholder: 'Type or select a model...', placeholder: 'Type or select a model...',
required: true, required: true,
defaultValue: 'claude-sonnet-4-5', defaultValue: 'claude-sonnet-4-5',
options: getModelOptions, options: () => {
const providersState = useProvidersStore.getState()
const baseModels = providersState.providers.base.models
const ollamaModels = providersState.providers.ollama.models
const vllmModels = providersState.providers.vllm.models
const openrouterModels = providersState.providers.openrouter.models
const allModels = Array.from(
new Set([...baseModels, ...ollamaModels, ...vllmModels, ...openrouterModels])
)
return allModels.map((model) => {
const icon = getProviderIcon(model)
return { label: model, id: model, ...(icon && { icon }) }
})
},
}, },
...getProviderCredentialSubBlocks(), ...getProviderCredentialSubBlocks(),
{ {

View File

@@ -1,201 +0,0 @@
import { GoogleBooksIcon } from '@/components/icons'
import type { BlockConfig } from '@/blocks/types'
import { AuthMode } from '@/blocks/types'
export const GoogleBooksBlock: BlockConfig = {
type: 'google_books',
name: 'Google Books',
description: 'Search and retrieve book information',
authMode: AuthMode.ApiKey,
longDescription:
'Search for books using the Google Books API. Find volumes by title, author, ISBN, or keywords, and retrieve detailed information about specific books including descriptions, ratings, and publication details.',
docsLink: 'https://docs.sim.ai/tools/google_books',
category: 'tools',
bgColor: '#E0E0E0',
icon: GoogleBooksIcon,
subBlocks: [
{
id: 'operation',
title: 'Operation',
type: 'dropdown',
options: [
{ label: 'Search Volumes', id: 'volume_search' },
{ label: 'Get Volume Details', id: 'volume_details' },
],
value: () => 'volume_search',
},
{
id: 'apiKey',
title: 'API Key',
type: 'short-input',
password: true,
placeholder: 'Enter your Google Books API key',
required: true,
},
{
id: 'query',
title: 'Search Query',
type: 'short-input',
placeholder: 'e.g., intitle:harry potter inauthor:rowling',
condition: { field: 'operation', value: 'volume_search' },
required: { field: 'operation', value: 'volume_search' },
},
{
id: 'filter',
title: 'Filter',
type: 'dropdown',
options: [
{ label: 'None', id: '' },
{ label: 'Partial Preview', id: 'partial' },
{ label: 'Full Preview', id: 'full' },
{ label: 'Free eBooks', id: 'free-ebooks' },
{ label: 'Paid eBooks', id: 'paid-ebooks' },
{ label: 'All eBooks', id: 'ebooks' },
],
condition: { field: 'operation', value: 'volume_search' },
mode: 'advanced',
},
{
id: 'printType',
title: 'Print Type',
type: 'dropdown',
options: [
{ label: 'All', id: 'all' },
{ label: 'Books', id: 'books' },
{ label: 'Magazines', id: 'magazines' },
],
value: () => 'all',
condition: { field: 'operation', value: 'volume_search' },
mode: 'advanced',
},
{
id: 'orderBy',
title: 'Order By',
type: 'dropdown',
options: [
{ label: 'Relevance', id: 'relevance' },
{ label: 'Newest', id: 'newest' },
],
value: () => 'relevance',
condition: { field: 'operation', value: 'volume_search' },
mode: 'advanced',
},
{
id: 'maxResults',
title: 'Max Results',
type: 'short-input',
placeholder: 'Number of results (1-40)',
condition: { field: 'operation', value: 'volume_search' },
mode: 'advanced',
},
{
id: 'startIndex',
title: 'Start Index',
type: 'short-input',
placeholder: 'Starting index for pagination',
condition: { field: 'operation', value: 'volume_search' },
mode: 'advanced',
},
{
id: 'langRestrict',
title: 'Language',
type: 'short-input',
placeholder: 'ISO 639-1 code (e.g., en, es, fr)',
condition: { field: 'operation', value: 'volume_search' },
mode: 'advanced',
},
{
id: 'volumeId',
title: 'Volume ID',
type: 'short-input',
placeholder: 'Google Books volume ID',
condition: { field: 'operation', value: 'volume_details' },
required: { field: 'operation', value: 'volume_details' },
},
{
id: 'projection',
title: 'Projection',
type: 'dropdown',
options: [
{ label: 'Full', id: 'full' },
{ label: 'Lite', id: 'lite' },
],
value: () => 'full',
condition: { field: 'operation', value: 'volume_details' },
mode: 'advanced',
},
],
tools: {
access: ['google_books_volume_search', 'google_books_volume_details'],
config: {
tool: (params) => `google_books_${params.operation}`,
params: (params) => {
const { operation, ...rest } = params
let maxResults: number | undefined
if (params.maxResults) {
maxResults = Number.parseInt(params.maxResults, 10)
if (Number.isNaN(maxResults)) {
maxResults = undefined
}
}
let startIndex: number | undefined
if (params.startIndex) {
startIndex = Number.parseInt(params.startIndex, 10)
if (Number.isNaN(startIndex)) {
startIndex = undefined
}
}
return {
...rest,
maxResults,
startIndex,
filter: params.filter || undefined,
printType: params.printType || undefined,
orderBy: params.orderBy || undefined,
projection: params.projection || undefined,
}
},
},
},
inputs: {
operation: { type: 'string', description: 'Operation to perform' },
apiKey: { type: 'string', description: 'Google Books API key' },
query: { type: 'string', description: 'Search query' },
filter: { type: 'string', description: 'Filter by availability' },
printType: { type: 'string', description: 'Print type filter' },
orderBy: { type: 'string', description: 'Sort order' },
maxResults: { type: 'string', description: 'Maximum number of results' },
startIndex: { type: 'string', description: 'Starting index for pagination' },
langRestrict: { type: 'string', description: 'Language restriction' },
volumeId: { type: 'string', description: 'Volume ID for details' },
projection: { type: 'string', description: 'Projection level' },
},
outputs: {
totalItems: { type: 'number', description: 'Total number of matching results' },
volumes: { type: 'json', description: 'List of matching volumes' },
id: { type: 'string', description: 'Volume ID' },
title: { type: 'string', description: 'Book title' },
subtitle: { type: 'string', description: 'Book subtitle' },
authors: { type: 'json', description: 'List of authors' },
publisher: { type: 'string', description: 'Publisher name' },
publishedDate: { type: 'string', description: 'Publication date' },
description: { type: 'string', description: 'Book description' },
pageCount: { type: 'number', description: 'Number of pages' },
categories: { type: 'json', description: 'Book categories' },
averageRating: { type: 'number', description: 'Average rating (1-5)' },
ratingsCount: { type: 'number', description: 'Number of ratings' },
language: { type: 'string', description: 'Language code' },
previewLink: { type: 'string', description: 'Link to preview on Google Books' },
infoLink: { type: 'string', description: 'Link to info page' },
thumbnailUrl: { type: 'string', description: 'Book cover thumbnail URL' },
isbn10: { type: 'string', description: 'ISBN-10 identifier' },
isbn13: { type: 'string', description: 'ISBN-13 identifier' },
},
}

View File

@@ -1,10 +1,8 @@
import { ShieldCheckIcon } from '@/components/icons' import { ShieldCheckIcon } from '@/components/icons'
import type { BlockConfig } from '@/blocks/types' import type { BlockConfig } from '@/blocks/types'
import { import { getProviderCredentialSubBlocks, PROVIDER_CREDENTIAL_INPUTS } from '@/blocks/utils'
getModelOptions, import { getProviderIcon } from '@/providers/utils'
getProviderCredentialSubBlocks, import { useProvidersStore } from '@/stores/providers/store'
PROVIDER_CREDENTIAL_INPUTS,
} from '@/blocks/utils'
import type { ToolResponse } from '@/tools/types' import type { ToolResponse } from '@/tools/types'
export interface GuardrailsResponse extends ToolResponse { export interface GuardrailsResponse extends ToolResponse {
@@ -113,7 +111,21 @@ Return ONLY the regex pattern - no explanations, no quotes, no forward slashes,
type: 'combobox', type: 'combobox',
placeholder: 'Type or select a model...', placeholder: 'Type or select a model...',
required: true, required: true,
options: getModelOptions, options: () => {
const providersState = useProvidersStore.getState()
const baseModels = providersState.providers.base.models
const ollamaModels = providersState.providers.ollama.models
const vllmModels = providersState.providers.vllm.models
const openrouterModels = providersState.providers.openrouter.models
const allModels = Array.from(
new Set([...baseModels, ...ollamaModels, ...vllmModels, ...openrouterModels])
)
return allModels.map((model) => {
const icon = getProviderIcon(model)
return { label: model, id: model, ...(icon && { icon }) }
})
},
condition: { condition: {
field: 'validationType', field: 'validationType',
value: ['hallucination'], value: ['hallucination'],

View File

@@ -1,12 +1,9 @@
import { ConnectIcon } from '@/components/icons' import { ConnectIcon } from '@/components/icons'
import { AuthMode, type BlockConfig } from '@/blocks/types' import { AuthMode, type BlockConfig } from '@/blocks/types'
import { import { getProviderCredentialSubBlocks, PROVIDER_CREDENTIAL_INPUTS } from '@/blocks/utils'
getModelOptions,
getProviderCredentialSubBlocks,
PROVIDER_CREDENTIAL_INPUTS,
} from '@/blocks/utils'
import type { ProviderId } from '@/providers/types' import type { ProviderId } from '@/providers/types'
import { getBaseModelProviders } from '@/providers/utils' import { getBaseModelProviders, getProviderIcon } from '@/providers/utils'
import { useProvidersStore } from '@/stores/providers'
import type { ToolResponse } from '@/tools/types' import type { ToolResponse } from '@/tools/types'
interface RouterResponse extends ToolResponse { interface RouterResponse extends ToolResponse {
@@ -137,6 +134,25 @@ Respond with a JSON object containing:
- reasoning: A brief explanation (1-2 sentences) of why you chose this route` - reasoning: A brief explanation (1-2 sentences) of why you chose this route`
} }
/**
* Helper to get model options for both router versions.
*/
const getModelOptions = () => {
const providersState = useProvidersStore.getState()
const baseModels = providersState.providers.base.models
const ollamaModels = providersState.providers.ollama.models
const vllmModels = providersState.providers.vllm.models
const openrouterModels = providersState.providers.openrouter.models
const allModels = Array.from(
new Set([...baseModels, ...ollamaModels, ...vllmModels, ...openrouterModels])
)
return allModels.map((model) => {
const icon = getProviderIcon(model)
return { label: model, id: model, ...(icon && { icon }) }
})
}
/** /**
* Legacy Router Block (block-based routing). * Legacy Router Block (block-based routing).
* Hidden from toolbar but still supported for existing workflows. * Hidden from toolbar but still supported for existing workflows.

View File

@@ -58,16 +58,6 @@ export const S3Block: BlockConfig<S3Response> = {
}, },
required: true, required: true,
}, },
{
id: 'getObjectRegion',
title: 'AWS Region',
type: 'short-input',
placeholder: 'Used when S3 URL does not include region',
condition: {
field: 'operation',
value: ['get_object'],
},
},
{ {
id: 'bucketName', id: 'bucketName',
title: 'Bucket Name', title: 'Bucket Name',
@@ -301,11 +291,34 @@ export const S3Block: BlockConfig<S3Response> = {
if (!params.s3Uri) { if (!params.s3Uri) {
throw new Error('S3 Object URL is required') throw new Error('S3 Object URL is required')
} }
return {
accessKeyId: params.accessKeyId, // Parse S3 URI for get_object
secretAccessKey: params.secretAccessKey, try {
region: params.getObjectRegion || params.region, const url = new URL(params.s3Uri)
s3Uri: params.s3Uri, const hostname = url.hostname
const bucketName = hostname.split('.')[0]
const regionMatch = hostname.match(/s3[.-]([^.]+)\.amazonaws\.com/)
const region = regionMatch ? regionMatch[1] : params.region
const objectKey = url.pathname.startsWith('/')
? url.pathname.substring(1)
: url.pathname
if (!bucketName || !objectKey) {
throw new Error('Could not parse S3 URL')
}
return {
accessKeyId: params.accessKeyId,
secretAccessKey: params.secretAccessKey,
region,
bucketName,
objectKey,
s3Uri: params.s3Uri,
}
} catch (_error) {
throw new Error(
'Invalid S3 Object URL format. Expected: https://bucket-name.s3.region.amazonaws.com/path/to/file'
)
} }
} }
@@ -388,7 +401,6 @@ export const S3Block: BlockConfig<S3Response> = {
acl: { type: 'string', description: 'Access control list' }, acl: { type: 'string', description: 'Access control list' },
// Download inputs // Download inputs
s3Uri: { type: 'string', description: 'S3 object URL' }, s3Uri: { type: 'string', description: 'S3 object URL' },
getObjectRegion: { type: 'string', description: 'Optional AWS region override for downloads' },
// List inputs // List inputs
prefix: { type: 'string', description: 'Prefix filter' }, prefix: { type: 'string', description: 'Prefix filter' },
maxKeys: { type: 'number', description: 'Maximum results' }, maxKeys: { type: 'number', description: 'Maximum results' },

View File

@@ -122,25 +122,6 @@ export const ScheduleBlock: BlockConfig = {
required: true, required: true,
mode: 'trigger', mode: 'trigger',
condition: { field: 'scheduleType', value: 'custom' }, condition: { field: 'scheduleType', value: 'custom' },
wandConfig: {
enabled: true,
prompt: `You are an expert at writing cron expressions. Generate a valid cron expression based on the user's description.
Cron format: minute hour day-of-month month day-of-week
- minute: 0-59
- hour: 0-23
- day-of-month: 1-31
- month: 1-12
- day-of-week: 0-7 (0 and 7 are Sunday)
Special characters: * (any), , (list), - (range), / (step)
{context}
Return ONLY the cron expression, nothing else. No explanation, no backticks, no quotes.`,
placeholder: 'Describe your schedule (e.g., "every weekday at 9am")',
generationType: 'cron-expression',
},
}, },
{ {

View File

@@ -604,7 +604,7 @@ Return ONLY the timestamp string - no explanations, no quotes, no extra text.`,
case 'send': { case 'send': {
baseParams.text = text baseParams.text = text
if (threadTs) { if (threadTs) {
baseParams.threadTs = threadTs baseParams.thread_ts = threadTs
} }
// files is the canonical param from attachmentFiles (basic) or files (advanced) // files is the canonical param from attachmentFiles (basic) or files (advanced)
const normalizedFiles = normalizeFileInput(files) const normalizedFiles = normalizeFileInput(files)

View File

@@ -1,10 +1,8 @@
import { TranslateIcon } from '@/components/icons' import { TranslateIcon } from '@/components/icons'
import { AuthMode, type BlockConfig } from '@/blocks/types' import { AuthMode, type BlockConfig } from '@/blocks/types'
import { import { getProviderCredentialSubBlocks, PROVIDER_CREDENTIAL_INPUTS } from '@/blocks/utils'
getModelOptions, import { getProviderIcon } from '@/providers/utils'
getProviderCredentialSubBlocks, import { useProvidersStore } from '@/stores/providers/store'
PROVIDER_CREDENTIAL_INPUTS,
} from '@/blocks/utils'
const getTranslationPrompt = (targetLanguage: string) => const getTranslationPrompt = (targetLanguage: string) =>
`Translate the following text into ${targetLanguage || 'English'}. Output ONLY the translated text with no additional commentary, explanations, or notes.` `Translate the following text into ${targetLanguage || 'English'}. Output ONLY the translated text with no additional commentary, explanations, or notes.`
@@ -40,7 +38,18 @@ export const TranslateBlock: BlockConfig = {
type: 'combobox', type: 'combobox',
placeholder: 'Type or select a model...', placeholder: 'Type or select a model...',
required: true, required: true,
options: getModelOptions, options: () => {
const providersState = useProvidersStore.getState()
const baseModels = providersState.providers.base.models
const ollamaModels = providersState.providers.ollama.models
const openrouterModels = providersState.providers.openrouter.models
const allModels = Array.from(new Set([...baseModels, ...ollamaModels, ...openrouterModels]))
return allModels.map((model) => {
const icon = getProviderIcon(model)
return { label: model, id: model, ...(icon && { icon }) }
})
},
}, },
...getProviderCredentialSubBlocks(), ...getProviderCredentialSubBlocks(),
{ {

View File

@@ -39,7 +39,6 @@ import { GitHubBlock, GitHubV2Block } from '@/blocks/blocks/github'
import { GitLabBlock } from '@/blocks/blocks/gitlab' import { GitLabBlock } from '@/blocks/blocks/gitlab'
import { GmailBlock, GmailV2Block } from '@/blocks/blocks/gmail' import { GmailBlock, GmailV2Block } from '@/blocks/blocks/gmail'
import { GoogleSearchBlock } from '@/blocks/blocks/google' import { GoogleSearchBlock } from '@/blocks/blocks/google'
import { GoogleBooksBlock } from '@/blocks/blocks/google_books'
import { GoogleCalendarBlock, GoogleCalendarV2Block } from '@/blocks/blocks/google_calendar' import { GoogleCalendarBlock, GoogleCalendarV2Block } from '@/blocks/blocks/google_calendar'
import { GoogleDocsBlock } from '@/blocks/blocks/google_docs' import { GoogleDocsBlock } from '@/blocks/blocks/google_docs'
import { GoogleDriveBlock } from '@/blocks/blocks/google_drive' import { GoogleDriveBlock } from '@/blocks/blocks/google_drive'
@@ -215,7 +214,6 @@ export const registry: Record<string, BlockConfig> = {
gmail_v2: GmailV2Block, gmail_v2: GmailV2Block,
google_calendar: GoogleCalendarBlock, google_calendar: GoogleCalendarBlock,
google_calendar_v2: GoogleCalendarV2Block, google_calendar_v2: GoogleCalendarV2Block,
google_books: GoogleBooksBlock,
google_docs: GoogleDocsBlock, google_docs: GoogleDocsBlock,
google_drive: GoogleDriveBlock, google_drive: GoogleDriveBlock,
google_forms: GoogleFormsBlock, google_forms: GoogleFormsBlock,

View File

@@ -40,7 +40,6 @@ export type GenerationType =
| 'neo4j-parameters' | 'neo4j-parameters'
| 'timestamp' | 'timestamp'
| 'timezone' | 'timezone'
| 'cron-expression'
export type SubBlockType = export type SubBlockType =
| 'short-input' // Single line input | 'short-input' // Single line input
@@ -197,8 +196,6 @@ export interface SubBlockConfig {
type: SubBlockType type: SubBlockType
mode?: 'basic' | 'advanced' | 'both' | 'trigger' // Default is 'both' if not specified. 'trigger' means only shown in trigger mode mode?: 'basic' | 'advanced' | 'both' | 'trigger' // Default is 'both' if not specified. 'trigger' means only shown in trigger mode
canonicalParamId?: string canonicalParamId?: string
/** Controls parameter visibility in agent/tool-input context */
paramVisibility?: 'user-or-llm' | 'user-only' | 'llm-only' | 'hidden'
required?: required?:
| boolean | boolean
| { | {

View File

@@ -1,32 +1,8 @@
import { isHosted } from '@/lib/core/config/feature-flags' import { isHosted } from '@/lib/core/config/feature-flags'
import type { BlockOutput, OutputFieldDefinition, SubBlockConfig } from '@/blocks/types' import type { BlockOutput, OutputFieldDefinition, SubBlockConfig } from '@/blocks/types'
import { import { getHostedModels, getProviderFromModel, providers } from '@/providers/utils'
getHostedModels,
getProviderFromModel,
getProviderIcon,
providers,
} from '@/providers/utils'
import { useProvidersStore } from '@/stores/providers/store' import { useProvidersStore } from '@/stores/providers/store'
/**
* Returns model options for combobox subblocks, combining all provider sources.
*/
export function getModelOptions() {
const providersState = useProvidersStore.getState()
const baseModels = providersState.providers.base.models
const ollamaModels = providersState.providers.ollama.models
const vllmModels = providersState.providers.vllm.models
const openrouterModels = providersState.providers.openrouter.models
const allModels = Array.from(
new Set([...baseModels, ...ollamaModels, ...vllmModels, ...openrouterModels])
)
return allModels.map((model) => {
const icon = getProviderIcon(model)
return { label: model, id: model, ...(icon && { icon }) }
})
}
/** /**
* Checks if a field is included in the dependsOn config. * Checks if a field is included in the dependsOn config.
* Handles both simple array format and object format with all/any fields. * Handles both simple array format and object format with all/any fields.

View File

@@ -1157,21 +1157,6 @@ export function AirweaveIcon(props: SVGProps<SVGSVGElement>) {
) )
} }
export function GoogleBooksIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 478.633 540.068'>
<path
fill='#1C51A4'
d='M449.059,218.231L245.519,99.538l-0.061,193.23c0.031,1.504-0.368,2.977-1.166,4.204c-0.798,1.258-1.565,1.995-2.915,2.547c-1.35,0.552-2.792,0.706-4.204,0.399c-1.412-0.307-2.7-1.043-3.713-2.117l-69.166-70.609l-69.381,70.179c-1.013,0.982-2.301,1.657-3.652,1.903c-1.381,0.246-2.792,0.092-4.081-0.491c-1.289-0.583-1.626-0.522-2.394-1.749c-0.767-1.197-1.197-2.608-1.197-4.081L85.031,6.007l-2.915-1.289C43.973-11.638,0,16.409,0,59.891v420.306c0,46.029,49.312,74.782,88.775,51.767l360.285-210.138C488.491,298.782,488.491,241.246,449.059,218.231z'
/>
<path
fill='#80D7FB'
d='M88.805,8.124c-2.179-1.289-4.419-2.363-6.659-3.345l0.123,288.663c0,1.442,0.43,2.854,1.197,4.081c0.767,1.197,1.872,2.148,3.161,2.731c1.289,0.583,2.7,0.736,4.081,0.491c1.381-0.246,2.639-0.921,3.652-1.903l69.749-69.688l69.811,69.749c1.013,1.074,2.301,1.81,3.713,2.117c1.412,0.307,2.884,0.153,4.204-0.399c1.319-0.552,2.455-1.565,3.253-2.792c0.798-1.258,1.197-2.731,1.166-4.204V99.998L88.805,8.124z'
/>
</svg>
)
}
export function GoogleDocsIcon(props: SVGProps<SVGSVGElement>) { export function GoogleDocsIcon(props: SVGProps<SVGSVGElement>) {
return ( return (
<svg <svg

View File

@@ -428,7 +428,7 @@ export class BlockExecutor {
block: SerializedBlock, block: SerializedBlock,
executionOrder: number executionOrder: number
): void { ): void {
const blockId = node.metadata?.originalBlockId ?? node.id const blockId = node.id
const blockName = block.metadata?.name ?? blockId const blockName = block.metadata?.name ?? blockId
const blockType = block.metadata?.id ?? DEFAULTS.BLOCK_TYPE const blockType = block.metadata?.id ?? DEFAULTS.BLOCK_TYPE
@@ -456,7 +456,7 @@ export class BlockExecutor {
executionOrder: number, executionOrder: number,
endedAt: string endedAt: string
): void { ): void {
const blockId = node.metadata?.originalBlockId ?? node.id const blockId = node.id
const blockName = block.metadata?.name ?? blockId const blockName = block.metadata?.name ?? blockId
const blockType = block.metadata?.id ?? DEFAULTS.BLOCK_TYPE const blockType = block.metadata?.id ?? DEFAULTS.BLOCK_TYPE

View File

@@ -1,4 +1,3 @@
import { setupGlobalFetchMock } from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, type Mock, vi } from 'vitest' import { afterEach, beforeEach, describe, expect, it, type Mock, vi } from 'vitest'
import { getAllBlocks } from '@/blocks' import { getAllBlocks } from '@/blocks'
import { BlockType, isMcpTool } from '@/executor/constants' import { BlockType, isMcpTool } from '@/executor/constants'
@@ -62,30 +61,6 @@ vi.mock('@/providers', () => ({
}), }),
})) }))
vi.mock('@/executor/utils/http', () => ({
buildAuthHeaders: vi.fn().mockResolvedValue({ 'Content-Type': 'application/json' }),
buildAPIUrl: vi.fn((path: string, params?: Record<string, string>) => {
const url = new URL(path, 'http://localhost:3000')
if (params) {
for (const [key, value] of Object.entries(params)) {
if (value !== undefined && value !== null) {
url.searchParams.set(key, value)
}
}
}
return url
}),
extractAPIErrorMessage: vi.fn(async (response: Response) => {
const defaultMessage = `API request failed with status ${response.status}`
try {
const errorData = await response.json()
return errorData.error || defaultMessage
} catch {
return defaultMessage
}
}),
}))
vi.mock('@sim/db', () => ({ vi.mock('@sim/db', () => ({
db: { db: {
select: vi.fn().mockReturnValue({ select: vi.fn().mockReturnValue({
@@ -109,7 +84,7 @@ vi.mock('@sim/db/schema', () => ({
}, },
})) }))
setupGlobalFetchMock() global.fetch = Object.assign(vi.fn(), { preconnect: vi.fn() }) as typeof fetch
const mockGetAllBlocks = getAllBlocks as Mock const mockGetAllBlocks = getAllBlocks as Mock
const mockExecuteTool = executeTool as Mock const mockExecuteTool = executeTool as Mock
@@ -1926,301 +1901,5 @@ describe('AgentBlockHandler', () => {
expect(discoveryCalls[0].url).toContain('serverId=mcp-legacy-server') expect(discoveryCalls[0].url).toContain('serverId=mcp-legacy-server')
}) })
describe('customToolId resolution - DB as source of truth', () => {
const staleInlineSchema = {
function: {
name: 'formatReport',
description: 'Formats a report',
parameters: {
type: 'object',
properties: {
title: { type: 'string', description: 'Report title' },
content: { type: 'string', description: 'Report content' },
},
required: ['title', 'content'],
},
},
}
const dbSchema = {
function: {
name: 'formatReport',
description: 'Formats a report',
parameters: {
type: 'object',
properties: {
title: { type: 'string', description: 'Report title' },
content: { type: 'string', description: 'Report content' },
format: { type: 'string', description: 'Output format' },
},
required: ['title', 'content', 'format'],
},
},
}
const staleInlineCode = 'return { title, content };'
const dbCode = 'return { title, content, format };'
function mockFetchForCustomTool(toolId: string) {
mockFetch.mockImplementation((url: string) => {
if (typeof url === 'string' && url.includes('/api/tools/custom')) {
return Promise.resolve({
ok: true,
headers: { get: () => null },
json: () =>
Promise.resolve({
data: [
{
id: toolId,
title: 'formatReport',
schema: dbSchema,
code: dbCode,
},
],
}),
})
}
return Promise.resolve({
ok: true,
headers: { get: () => null },
json: () => Promise.resolve({}),
})
})
}
function mockFetchFailure() {
mockFetch.mockImplementation((url: string) => {
if (typeof url === 'string' && url.includes('/api/tools/custom')) {
return Promise.resolve({
ok: false,
status: 500,
headers: { get: () => null },
json: () => Promise.resolve({}),
})
}
return Promise.resolve({
ok: true,
headers: { get: () => null },
json: () => Promise.resolve({}),
})
})
}
beforeEach(() => {
Object.defineProperty(global, 'window', {
value: undefined,
writable: true,
configurable: true,
})
})
it('should always fetch latest schema from DB when customToolId is present', async () => {
const toolId = 'custom-tool-123'
mockFetchForCustomTool(toolId)
const inputs = {
model: 'gpt-4o',
userPrompt: 'Format a report',
apiKey: 'test-api-key',
tools: [
{
type: 'custom-tool',
customToolId: toolId,
title: 'formatReport',
schema: staleInlineSchema,
code: staleInlineCode,
usageControl: 'auto' as const,
},
],
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockContext, mockBlock, inputs)
expect(mockExecuteProviderRequest).toHaveBeenCalled()
const providerCall = mockExecuteProviderRequest.mock.calls[0]
const tools = providerCall[1].tools
expect(tools.length).toBe(1)
// DB schema wins over stale inline — includes format param
expect(tools[0].parameters.required).toContain('format')
expect(tools[0].parameters.properties).toHaveProperty('format')
})
it('should fetch from DB when customToolId has no inline schema', async () => {
const toolId = 'custom-tool-123'
mockFetchForCustomTool(toolId)
const inputs = {
model: 'gpt-4o',
userPrompt: 'Format a report',
apiKey: 'test-api-key',
tools: [
{
type: 'custom-tool',
customToolId: toolId,
usageControl: 'auto' as const,
},
],
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockContext, mockBlock, inputs)
expect(mockExecuteProviderRequest).toHaveBeenCalled()
const providerCall = mockExecuteProviderRequest.mock.calls[0]
const tools = providerCall[1].tools
expect(tools.length).toBe(1)
expect(tools[0].name).toBe('formatReport')
expect(tools[0].parameters.required).toContain('format')
})
it('should fall back to inline schema when DB fetch fails and inline exists', async () => {
mockFetchFailure()
const inputs = {
model: 'gpt-4o',
userPrompt: 'Format a report',
apiKey: 'test-api-key',
tools: [
{
type: 'custom-tool',
customToolId: 'custom-tool-123',
title: 'formatReport',
schema: staleInlineSchema,
code: staleInlineCode,
usageControl: 'auto' as const,
},
],
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockContext, mockBlock, inputs)
expect(mockExecuteProviderRequest).toHaveBeenCalled()
const providerCall = mockExecuteProviderRequest.mock.calls[0]
const tools = providerCall[1].tools
expect(tools.length).toBe(1)
expect(tools[0].name).toBe('formatReport')
expect(tools[0].parameters.required).not.toContain('format')
})
it('should return null when DB fetch fails and no inline schema exists', async () => {
mockFetchFailure()
const inputs = {
model: 'gpt-4o',
userPrompt: 'Format a report',
apiKey: 'test-api-key',
tools: [
{
type: 'custom-tool',
customToolId: 'custom-tool-123',
usageControl: 'auto' as const,
},
],
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockContext, mockBlock, inputs)
expect(mockExecuteProviderRequest).toHaveBeenCalled()
const providerCall = mockExecuteProviderRequest.mock.calls[0]
const tools = providerCall[1].tools
expect(tools.length).toBe(0)
})
it('should use DB code for executeFunction when customToolId resolves', async () => {
const toolId = 'custom-tool-123'
mockFetchForCustomTool(toolId)
let capturedTools: any[] = []
Promise.all = vi.fn().mockImplementation((promises: Promise<any>[]) => {
const result = originalPromiseAll.call(Promise, promises)
result.then((tools: any[]) => {
if (tools?.length) {
capturedTools = tools.filter((t) => t !== null)
}
})
return result
})
const inputs = {
model: 'gpt-4o',
userPrompt: 'Format a report',
apiKey: 'test-api-key',
tools: [
{
type: 'custom-tool',
customToolId: toolId,
title: 'formatReport',
schema: staleInlineSchema,
code: staleInlineCode,
usageControl: 'auto' as const,
},
],
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockContext, mockBlock, inputs)
expect(capturedTools.length).toBe(1)
expect(typeof capturedTools[0].executeFunction).toBe('function')
await capturedTools[0].executeFunction({ title: 'Q1', format: 'pdf' })
expect(mockExecuteTool).toHaveBeenCalledWith(
'function_execute',
expect.objectContaining({
code: dbCode,
}),
false,
expect.any(Object)
)
})
it('should not fetch from DB when no customToolId is present', async () => {
const inputs = {
model: 'gpt-4o',
userPrompt: 'Use the tool',
apiKey: 'test-api-key',
tools: [
{
type: 'custom-tool',
title: 'formatReport',
schema: staleInlineSchema,
code: staleInlineCode,
usageControl: 'auto' as const,
},
],
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockContext, mockBlock, inputs)
const customToolFetches = mockFetch.mock.calls.filter(
(call: any[]) => typeof call[0] === 'string' && call[0].includes('/api/tools/custom')
)
expect(customToolFetches.length).toBe(0)
expect(mockExecuteProviderRequest).toHaveBeenCalled()
const providerCall = mockExecuteProviderRequest.mock.calls[0]
const tools = providerCall[1].tools
expect(tools.length).toBe(1)
expect(tools[0].name).toBe('formatReport')
expect(tools[0].parameters.required).not.toContain('format')
})
})
}) })
}) })

View File

@@ -62,12 +62,9 @@ export class AgentBlockHandler implements BlockHandler {
await validateModelProvider(ctx.userId, model, ctx) await validateModelProvider(ctx.userId, model, ctx)
const providerId = getProviderFromModel(model) const providerId = getProviderFromModel(model)
const formattedTools = await this.formatTools( const formattedTools = await this.formatTools(ctx, filteredInputs.tools || [])
ctx,
filteredInputs.tools || [],
block.canonicalModes
)
// Resolve skill metadata for progressive disclosure
const skillInputs = filteredInputs.skills ?? [] const skillInputs = filteredInputs.skills ?? []
let skillMetadata: Array<{ name: string; description: string }> = [] let skillMetadata: Array<{ name: string; description: string }> = []
if (skillInputs.length > 0 && ctx.workspaceId) { if (skillInputs.length > 0 && ctx.workspaceId) {
@@ -224,11 +221,7 @@ export class AgentBlockHandler implements BlockHandler {
}) })
} }
private async formatTools( private async formatTools(ctx: ExecutionContext, inputTools: ToolInput[]): Promise<any[]> {
ctx: ExecutionContext,
inputTools: ToolInput[],
canonicalModes?: Record<string, 'basic' | 'advanced'>
): Promise<any[]> {
if (!Array.isArray(inputTools)) return [] if (!Array.isArray(inputTools)) return []
const filtered = inputTools.filter((tool) => { const filtered = inputTools.filter((tool) => {
@@ -256,7 +249,7 @@ export class AgentBlockHandler implements BlockHandler {
if (tool.type === 'custom-tool' && (tool.schema || tool.customToolId)) { if (tool.type === 'custom-tool' && (tool.schema || tool.customToolId)) {
return await this.createCustomTool(ctx, tool) return await this.createCustomTool(ctx, tool)
} }
return this.transformBlockTool(ctx, tool, canonicalModes) return this.transformBlockTool(ctx, tool)
} catch (error) { } catch (error) {
logger.error(`[AgentHandler] Error creating tool:`, { tool, error }) logger.error(`[AgentHandler] Error creating tool:`, { tool, error })
return null return null
@@ -279,16 +272,15 @@ export class AgentBlockHandler implements BlockHandler {
let code = tool.code let code = tool.code
let title = tool.title let title = tool.title
if (tool.customToolId) { if (tool.customToolId && !schema) {
const resolved = await this.fetchCustomToolById(ctx, tool.customToolId) const resolved = await this.fetchCustomToolById(ctx, tool.customToolId)
if (resolved) { if (!resolved) {
schema = resolved.schema
code = resolved.code
title = resolved.title
} else if (!schema) {
logger.error(`Custom tool not found: ${tool.customToolId}`) logger.error(`Custom tool not found: ${tool.customToolId}`)
return null return null
} }
schema = resolved.schema
code = resolved.code
title = resolved.title
} }
if (!schema?.function) { if (!schema?.function) {
@@ -727,17 +719,12 @@ export class AgentBlockHandler implements BlockHandler {
} }
} }
private async transformBlockTool( private async transformBlockTool(ctx: ExecutionContext, tool: ToolInput) {
ctx: ExecutionContext,
tool: ToolInput,
canonicalModes?: Record<string, 'basic' | 'advanced'>
) {
const transformedTool = await transformBlockTool(tool, { const transformedTool = await transformBlockTool(tool, {
selectedOperation: tool.operation, selectedOperation: tool.operation,
getAllBlocks, getAllBlocks,
getToolAsync: (toolId: string) => getToolAsync(toolId, ctx.workflowId), getToolAsync: (toolId: string) => getToolAsync(toolId, ctx.workflowId),
getTool, getTool,
canonicalModes,
}) })
if (transformedTool) { if (transformedTool) {

View File

@@ -2,7 +2,7 @@ import { db } from '@sim/db'
import { account } from '@sim/db/schema' import { account } from '@sim/db/schema'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm' import { eq } from 'drizzle-orm'
import { getInternalApiBaseUrl } from '@/lib/core/utils/urls' import { getBaseUrl } from '@/lib/core/utils/urls'
import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils' import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils'
import { generateRouterPrompt, generateRouterV2Prompt } from '@/blocks/blocks/router' import { generateRouterPrompt, generateRouterV2Prompt } from '@/blocks/blocks/router'
import type { BlockOutput } from '@/blocks/types' import type { BlockOutput } from '@/blocks/types'
@@ -79,7 +79,7 @@ export class RouterBlockHandler implements BlockHandler {
const providerId = getProviderFromModel(routerConfig.model) const providerId = getProviderFromModel(routerConfig.model)
try { try {
const url = new URL('/api/providers', getInternalApiBaseUrl()) const url = new URL('/api/providers', getBaseUrl())
if (ctx.userId) url.searchParams.set('userId', ctx.userId) if (ctx.userId) url.searchParams.set('userId', ctx.userId)
const messages = [{ role: 'user', content: routerConfig.prompt }] const messages = [{ role: 'user', content: routerConfig.prompt }]
@@ -209,7 +209,7 @@ export class RouterBlockHandler implements BlockHandler {
const providerId = getProviderFromModel(routerConfig.model) const providerId = getProviderFromModel(routerConfig.model)
try { try {
const url = new URL('/api/providers', getInternalApiBaseUrl()) const url = new URL('/api/providers', getBaseUrl())
if (ctx.userId) url.searchParams.set('userId', ctx.userId) if (ctx.userId) url.searchParams.set('userId', ctx.userId)
const messages = [{ role: 'user', content: routerConfig.context }] const messages = [{ role: 'user', content: routerConfig.context }]

View File

@@ -1,4 +1,3 @@
import { setupGlobalFetchMock } from '@sim/testing'
import { beforeEach, describe, expect, it, type Mock, vi } from 'vitest' import { beforeEach, describe, expect, it, type Mock, vi } from 'vitest'
import { BlockType } from '@/executor/constants' import { BlockType } from '@/executor/constants'
import { WorkflowBlockHandler } from '@/executor/handlers/workflow/workflow-handler' import { WorkflowBlockHandler } from '@/executor/handlers/workflow/workflow-handler'
@@ -10,7 +9,7 @@ vi.mock('@/lib/auth/internal', () => ({
})) }))
// Mock fetch globally // Mock fetch globally
setupGlobalFetchMock() global.fetch = vi.fn()
describe('WorkflowBlockHandler', () => { describe('WorkflowBlockHandler', () => {
let handler: WorkflowBlockHandler let handler: WorkflowBlockHandler

View File

@@ -184,6 +184,48 @@ describe('resolveBlockReference', () => {
const result = resolveBlockReference('start', ['input'], ctx) const result = resolveBlockReference('start', ['input'], ctx)
expect(result).toEqual({ value: undefined, blockId: 'block-1' }) expect(result).toEqual({ value: undefined, blockId: 'block-1' })
}) })
it('should allow nested access under json output schema fields', () => {
const ctx = createContext({
blockData: {},
blockOutputSchemas: {
'block-1': {
result: { type: 'json' },
},
},
})
const result = resolveBlockReference('start', ['result', 'emails', '0', 'subject'], ctx)
expect(result).toEqual({ value: undefined, blockId: 'block-1' })
})
it('should allow nested access under any output schema fields', () => {
const ctx = createContext({
blockData: {},
blockOutputSchemas: {
'block-1': {
payload: { type: 'any' },
},
},
})
const result = resolveBlockReference('start', ['payload', 'nested', 'key'], ctx)
expect(result).toEqual({ value: undefined, blockId: 'block-1' })
})
it('should allow nested access for arrays without explicit item schema', () => {
const ctx = createContext({
blockData: {},
blockOutputSchemas: {
'block-1': {
rows: { type: 'array' },
},
},
})
const result = resolveBlockReference('start', ['rows', '0', 'value'], ctx)
expect(result).toEqual({ value: undefined, blockId: 'block-1' })
})
}) })
describe('without schema (pass-through mode)', () => { describe('without schema (pass-through mode)', () => {

View File

@@ -53,6 +53,24 @@ function getProperties(schema: unknown): Record<string, unknown> | undefined {
: undefined : undefined
} }
function getSchemaType(schema: unknown): string | null {
if (typeof schema !== 'object' || schema === null) return null
const rawType = (schema as { type?: unknown }).type
return typeof rawType === 'string' ? rawType.toLowerCase() : null
}
function isDynamicSchemaNode(schema: unknown): boolean {
const schemaType = getSchemaType(schema)
if (!schemaType) return false
if (schemaType === 'any' || schemaType === 'json') {
return true
}
if (schemaType === 'object' && !getProperties(schema)) {
return true
}
return false
}
function lookupField(schema: unknown, fieldName: string): unknown | undefined { function lookupField(schema: unknown, fieldName: string): unknown | undefined {
if (typeof schema !== 'object' || schema === null) return undefined if (typeof schema !== 'object' || schema === null) return undefined
const typed = schema as Record<string, unknown> const typed = schema as Record<string, unknown>
@@ -83,6 +101,12 @@ function isPathInSchema(schema: OutputSchema | undefined, pathParts: string[]):
return false return false
} }
if (isDynamicSchemaNode(current)) {
// Dynamic schema node (json/any/object-without-properties):
// allow deeper traversal without strict field validation.
return true
}
if (/^\d+$/.test(part)) { if (/^\d+$/.test(part)) {
if (isFileType(current)) { if (isFileType(current)) {
const nextPart = pathParts[i + 1] const nextPart = pathParts[i + 1]
@@ -94,7 +118,12 @@ function isPathInSchema(schema: OutputSchema | undefined, pathParts: string[]):
) )
} }
if (isArrayType(current)) { if (isArrayType(current)) {
current = getArrayItems(current) const items = getArrayItems(current)
if (items === undefined) {
// Arrays without declared item schema are treated as dynamic.
return true
}
current = items
} }
continue continue
} }
@@ -116,6 +145,10 @@ function isPathInSchema(schema: OutputSchema | undefined, pathParts: string[]):
} }
current = isArrayType(fieldDef) ? getArrayItems(fieldDef) : fieldDef current = isArrayType(fieldDef) ? getArrayItems(fieldDef) : fieldDef
if (current === undefined) {
// Array/object without explicit shape after this segment.
return true
}
continue continue
} }

View File

@@ -1,5 +1,5 @@
import { generateInternalToken } from '@/lib/auth/internal' import { generateInternalToken } from '@/lib/auth/internal'
import { getBaseUrl, getInternalApiBaseUrl } from '@/lib/core/utils/urls' import { getBaseUrl } from '@/lib/core/utils/urls'
import { HTTP } from '@/executor/constants' import { HTTP } from '@/executor/constants'
export async function buildAuthHeaders(): Promise<Record<string, string>> { export async function buildAuthHeaders(): Promise<Record<string, string>> {
@@ -16,8 +16,7 @@ export async function buildAuthHeaders(): Promise<Record<string, string>> {
} }
export function buildAPIUrl(path: string, params?: Record<string, string>): URL { export function buildAPIUrl(path: string, params?: Record<string, string>): URL {
const baseUrl = path.startsWith('/api/') ? getInternalApiBaseUrl() : getBaseUrl() const url = new URL(path, getBaseUrl())
const url = new URL(path, baseUrl)
if (params) { if (params) {
for (const [key, value] of Object.entries(params)) { for (const [key, value] of Object.entries(params)) {

View File

@@ -423,7 +423,7 @@ interface GenerateVersionDescriptionVariables {
const VERSION_DESCRIPTION_SYSTEM_PROMPT = `You are writing deployment version descriptions for a workflow automation platform. const VERSION_DESCRIPTION_SYSTEM_PROMPT = `You are writing deployment version descriptions for a workflow automation platform.
Write a brief, factual description (1-3 sentences, under 2000 characters) that states what changed between versions. Write a brief, factual description (1-3 sentences, under 400 characters) that states what changed between versions.
Guidelines: Guidelines:
- Use the specific values provided (credential names, channel names, model names) - Use the specific values provided (credential names, channel names, model names)

View File

@@ -642,10 +642,6 @@ export function useDeployChildWorkflow() {
queryClient.invalidateQueries({ queryClient.invalidateQueries({
queryKey: workflowKeys.deploymentStatus(variables.workflowId), queryKey: workflowKeys.deploymentStatus(variables.workflowId),
}) })
// Invalidate workflow state so tool input mappings refresh
queryClient.invalidateQueries({
queryKey: workflowKeys.state(variables.workflowId),
})
// Also invalidate deployment queries // Also invalidate deployment queries
queryClient.invalidateQueries({ queryClient.invalidateQueries({
queryKey: deploymentKeys.info(variables.workflowId), queryKey: deploymentKeys.info(variables.workflowId),

View File

@@ -1,4 +1,4 @@
import { useCallback } from 'react' import { useCallback, useRef } from 'react'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import type { import type {
BlockCompletedData, BlockCompletedData,
@@ -16,18 +16,6 @@ import type { SerializableExecutionState } from '@/executor/execution/types'
const logger = createLogger('useExecutionStream') const logger = createLogger('useExecutionStream')
/**
* Detects errors caused by the browser killing a fetch (page refresh, navigation, tab close).
* These should be treated as clean disconnects, not execution errors.
*/
function isClientDisconnectError(error: any): boolean {
if (error.name === 'AbortError') return true
const msg = (error.message ?? '').toLowerCase()
return (
msg.includes('network error') || msg.includes('failed to fetch') || msg.includes('load failed')
)
}
/** /**
* Processes SSE events from a response body and invokes appropriate callbacks. * Processes SSE events from a response body and invokes appropriate callbacks.
*/ */
@@ -133,7 +121,6 @@ export interface ExecuteStreamOptions {
parallels?: Record<string, any> parallels?: Record<string, any>
} }
stopAfterBlockId?: string stopAfterBlockId?: string
onExecutionId?: (executionId: string) => void
callbacks?: ExecutionStreamCallbacks callbacks?: ExecutionStreamCallbacks
} }
@@ -142,40 +129,30 @@ export interface ExecuteFromBlockOptions {
startBlockId: string startBlockId: string
sourceSnapshot: SerializableExecutionState sourceSnapshot: SerializableExecutionState
input?: any input?: any
onExecutionId?: (executionId: string) => void
callbacks?: ExecutionStreamCallbacks callbacks?: ExecutionStreamCallbacks
} }
export interface ReconnectStreamOptions {
workflowId: string
executionId: string
fromEventId?: number
callbacks?: ExecutionStreamCallbacks
}
/**
* Module-level map shared across all hook instances.
* Ensures ANY instance can cancel streams started by ANY other instance,
* which is critical for SPA navigation where the original hook instance unmounts
* but the SSE stream must be cancellable from the new instance.
*/
const sharedAbortControllers = new Map<string, AbortController>()
/** /**
* Hook for executing workflows via server-side SSE streaming. * Hook for executing workflows via server-side SSE streaming.
* Supports concurrent executions via per-workflow AbortController maps. * Supports concurrent executions via per-workflow AbortController maps.
*/ */
export function useExecutionStream() { export function useExecutionStream() {
const execute = useCallback(async (options: ExecuteStreamOptions) => { const abortControllersRef = useRef<Map<string, AbortController>>(new Map())
const { workflowId, callbacks = {}, onExecutionId, ...payload } = options const currentExecutionsRef = useRef<Map<string, { workflowId: string; executionId: string }>>(
new Map()
)
const existing = sharedAbortControllers.get(workflowId) const execute = useCallback(async (options: ExecuteStreamOptions) => {
const { workflowId, callbacks = {}, ...payload } = options
const existing = abortControllersRef.current.get(workflowId)
if (existing) { if (existing) {
existing.abort() existing.abort()
} }
const abortController = new AbortController() const abortController = new AbortController()
sharedAbortControllers.set(workflowId, abortController) abortControllersRef.current.set(workflowId, abortController)
currentExecutionsRef.current.delete(workflowId)
try { try {
const response = await fetch(`/api/workflows/${workflowId}/execute`, { const response = await fetch(`/api/workflows/${workflowId}/execute`, {
@@ -200,48 +177,42 @@ export function useExecutionStream() {
throw new Error('No response body') throw new Error('No response body')
} }
const serverExecutionId = response.headers.get('X-Execution-Id') const executionId = response.headers.get('X-Execution-Id')
if (serverExecutionId) { if (executionId) {
onExecutionId?.(serverExecutionId) currentExecutionsRef.current.set(workflowId, { workflowId, executionId })
} }
const reader = response.body.getReader() const reader = response.body.getReader()
await processSSEStream(reader, callbacks, 'Execution') await processSSEStream(reader, callbacks, 'Execution')
} catch (error: any) { } catch (error: any) {
if (isClientDisconnectError(error)) { if (error.name === 'AbortError') {
logger.info('Execution stream disconnected (page unload or abort)') logger.info('Execution stream cancelled')
return callbacks.onExecutionCancelled?.({ duration: 0 })
} else {
logger.error('Execution stream error:', error)
callbacks.onExecutionError?.({
error: error.message || 'Unknown error',
duration: 0,
})
} }
logger.error('Execution stream error:', error)
callbacks.onExecutionError?.({
error: error.message || 'Unknown error',
duration: 0,
})
throw error throw error
} finally { } finally {
if (sharedAbortControllers.get(workflowId) === abortController) { abortControllersRef.current.delete(workflowId)
sharedAbortControllers.delete(workflowId) currentExecutionsRef.current.delete(workflowId)
}
} }
}, []) }, [])
const executeFromBlock = useCallback(async (options: ExecuteFromBlockOptions) => { const executeFromBlock = useCallback(async (options: ExecuteFromBlockOptions) => {
const { const { workflowId, startBlockId, sourceSnapshot, input, callbacks = {} } = options
workflowId,
startBlockId,
sourceSnapshot,
input,
onExecutionId,
callbacks = {},
} = options
const existing = sharedAbortControllers.get(workflowId) const existing = abortControllersRef.current.get(workflowId)
if (existing) { if (existing) {
existing.abort() existing.abort()
} }
const abortController = new AbortController() const abortController = new AbortController()
sharedAbortControllers.set(workflowId, abortController) abortControllersRef.current.set(workflowId, abortController)
currentExecutionsRef.current.delete(workflowId)
try { try {
const response = await fetch(`/api/workflows/${workflowId}/execute`, { const response = await fetch(`/api/workflows/${workflowId}/execute`, {
@@ -275,80 +246,64 @@ export function useExecutionStream() {
throw new Error('No response body') throw new Error('No response body')
} }
const serverExecutionId = response.headers.get('X-Execution-Id') const executionId = response.headers.get('X-Execution-Id')
if (serverExecutionId) { if (executionId) {
onExecutionId?.(serverExecutionId) currentExecutionsRef.current.set(workflowId, { workflowId, executionId })
} }
const reader = response.body.getReader() const reader = response.body.getReader()
await processSSEStream(reader, callbacks, 'Run-from-block') await processSSEStream(reader, callbacks, 'Run-from-block')
} catch (error: any) { } catch (error: any) {
if (isClientDisconnectError(error)) { if (error.name === 'AbortError') {
logger.info('Run-from-block stream disconnected (page unload or abort)') logger.info('Run-from-block execution cancelled')
return callbacks.onExecutionCancelled?.({ duration: 0 })
} else {
logger.error('Run-from-block execution error:', error)
callbacks.onExecutionError?.({
error: error.message || 'Unknown error',
duration: 0,
})
} }
logger.error('Run-from-block execution error:', error)
callbacks.onExecutionError?.({
error: error.message || 'Unknown error',
duration: 0,
})
throw error throw error
} finally { } finally {
if (sharedAbortControllers.get(workflowId) === abortController) { abortControllersRef.current.delete(workflowId)
sharedAbortControllers.delete(workflowId) currentExecutionsRef.current.delete(workflowId)
}
}
}, [])
const reconnect = useCallback(async (options: ReconnectStreamOptions) => {
const { workflowId, executionId, fromEventId = 0, callbacks = {} } = options
const existing = sharedAbortControllers.get(workflowId)
if (existing) {
existing.abort()
}
const abortController = new AbortController()
sharedAbortControllers.set(workflowId, abortController)
try {
const response = await fetch(
`/api/workflows/${workflowId}/executions/${executionId}/stream?from=${fromEventId}`,
{ signal: abortController.signal }
)
if (!response.ok) throw new Error(`Reconnect failed (${response.status})`)
if (!response.body) throw new Error('No response body')
await processSSEStream(response.body.getReader(), callbacks, 'Reconnect')
} catch (error: any) {
if (isClientDisconnectError(error)) return
logger.error('Reconnection stream error:', error)
throw error
} finally {
if (sharedAbortControllers.get(workflowId) === abortController) {
sharedAbortControllers.delete(workflowId)
}
} }
}, []) }, [])
const cancel = useCallback((workflowId?: string) => { const cancel = useCallback((workflowId?: string) => {
if (workflowId) { if (workflowId) {
const controller = sharedAbortControllers.get(workflowId) const execution = currentExecutionsRef.current.get(workflowId)
if (execution) {
fetch(`/api/workflows/${execution.workflowId}/executions/${execution.executionId}/cancel`, {
method: 'POST',
}).catch(() => {})
}
const controller = abortControllersRef.current.get(workflowId)
if (controller) { if (controller) {
controller.abort() controller.abort()
sharedAbortControllers.delete(workflowId) abortControllersRef.current.delete(workflowId)
} }
currentExecutionsRef.current.delete(workflowId)
} else { } else {
for (const [, controller] of sharedAbortControllers) { for (const [, execution] of currentExecutionsRef.current) {
fetch(`/api/workflows/${execution.workflowId}/executions/${execution.executionId}/cancel`, {
method: 'POST',
}).catch(() => {})
}
for (const [, controller] of abortControllersRef.current) {
controller.abort() controller.abort()
} }
sharedAbortControllers.clear() abortControllersRef.current.clear()
currentExecutionsRef.current.clear()
} }
}, []) }, [])
return { return {
execute, execute,
executeFromBlock, executeFromBlock,
reconnect,
cancel, cancel,
} }
} }

View File

@@ -1,46 +0,0 @@
'use client'
import { useEffect, useRef } from 'react'
import { createLogger } from '@sim/logger'
const logger = createLogger('ReferralAttribution')
const COOKIE_NAME = 'sim_utm'
const TERMINAL_REASONS = new Set([
'invalid_cookie',
'no_utm_cookie',
'no_matching_campaign',
'already_attributed',
])
/**
* Fires a one-shot `POST /api/attribution` when a `sim_utm` cookie is present.
* Retries on transient failures; stops on terminal outcomes.
*/
export function useReferralAttribution() {
const calledRef = useRef(false)
useEffect(() => {
if (calledRef.current) return
if (!document.cookie.includes(COOKIE_NAME)) return
calledRef.current = true
fetch('/api/attribution', { method: 'POST' })
.then((res) => res.json())
.then((data) => {
if (data.attributed) {
logger.info('Referral attribution successful', { bonusAmount: data.bonusAmount })
} else if (data.error || TERMINAL_REASONS.has(data.reason)) {
logger.info('Referral attribution skipped', { reason: data.reason || data.error })
} else {
calledRef.current = false
}
})
.catch((err) => {
logger.warn('Referral attribution failed, will retry', { error: err })
calledRef.current = false
})
}, [])
}

View File

@@ -1,64 +0,0 @@
import { db } from '@sim/db'
import { organization, userStats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq, sql } from 'drizzle-orm'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import type { DbOrTx } from '@/lib/db/types'
const logger = createLogger('BonusCredits')
/**
* Apply bonus credits to a user (e.g. referral bonuses, promotional codes).
*
* Detects the user's current plan and routes credits accordingly:
* - Free/Pro: adds to `userStats.creditBalance` and increments `currentUsageLimit`
* - Team/Enterprise: adds to `organization.creditBalance` and increments `orgUsageLimit`
*
* Uses direct increment (not recalculation) so it works correctly for free-tier
* users where `setUsageLimitForCredits` would compute planBase=0 and skip the update.
*
* @param tx - Optional Drizzle transaction context. When provided, all DB writes
* participate in the caller's transaction for atomicity.
*/
export async function applyBonusCredits(
userId: string,
amount: number,
tx?: DbOrTx
): Promise<void> {
const dbCtx = tx ?? db
const subscription = await getHighestPrioritySubscription(userId)
const isTeamOrEnterprise = subscription?.plan === 'team' || subscription?.plan === 'enterprise'
if (isTeamOrEnterprise && subscription?.referenceId) {
const orgId = subscription.referenceId
await dbCtx
.update(organization)
.set({
creditBalance: sql`${organization.creditBalance} + ${amount}`,
orgUsageLimit: sql`COALESCE(${organization.orgUsageLimit}, '0')::decimal + ${amount}`,
})
.where(eq(organization.id, orgId))
logger.info('Applied bonus credits to organization', {
userId,
organizationId: orgId,
plan: subscription.plan,
amount,
})
} else {
await dbCtx
.update(userStats)
.set({
creditBalance: sql`${userStats.creditBalance} + ${amount}`,
currentUsageLimit: sql`COALESCE(${userStats.currentUsageLimit}, '0')::decimal + ${amount}`,
})
.where(eq(userStats.userId, userId))
logger.info('Applied bonus credits to user', {
userId,
plan: subscription?.plan || 'free',
amount,
})
}
}

View File

@@ -1,22 +1,21 @@
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { COPILOT_CONFIRM_API_PATH, STREAM_STORAGE_KEY } from '@/lib/copilot/constants' import { STREAM_STORAGE_KEY } from '@/lib/copilot/constants'
import { asRecord } from '@/lib/copilot/orchestrator/sse-utils' import { asRecord } from '@/lib/copilot/orchestrator/sse-utils'
import type { SSEEvent } from '@/lib/copilot/orchestrator/types' import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
import { import { isBackgroundState, isRejectedState, isReviewState } from '@/lib/copilot/store-utils'
isBackgroundState,
isRejectedState,
isReviewState,
resolveToolDisplay,
} from '@/lib/copilot/store-utils'
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
import type { CopilotStore, CopilotStreamInfo, CopilotToolCall } from '@/stores/panel/copilot/types' import type { CopilotStore, CopilotStreamInfo, CopilotToolCall } from '@/stores/panel/copilot/types'
import { useVariablesStore } from '@/stores/panel/variables/store'
import { useEnvironmentStore } from '@/stores/settings/environment/store'
import { useWorkflowDiffStore } from '@/stores/workflow-diff/store'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import type { WorkflowState } from '@/stores/workflows/workflow/types'
import { appendTextBlock, beginThinkingBlock, finalizeThinkingBlock } from './content-blocks' import { appendTextBlock, beginThinkingBlock, finalizeThinkingBlock } from './content-blocks'
import { CLIENT_EXECUTABLE_RUN_TOOLS, executeRunToolOnClient } from './run-tool-execution' import { CLIENT_EXECUTABLE_RUN_TOOLS, executeRunToolOnClient } from './run-tool-execution'
import {
extractOperationListFromResultPayload,
extractToolExecutionMetadata,
extractToolUiMetadata,
isWorkflowChangeApplyCall,
mapServerStateToClientState,
resolveDisplayFromServerUi,
} from './tool-call-helpers'
import { applyToolEffects } from './tool-effects'
import type { ClientContentBlock, ClientStreamingContext } from './types' import type { ClientContentBlock, ClientStreamingContext } from './types'
const logger = createLogger('CopilotClientSseHandlers') const logger = createLogger('CopilotClientSseHandlers')
@@ -26,21 +25,11 @@ const MAX_BATCH_INTERVAL = 50
const MIN_BATCH_INTERVAL = 16 const MIN_BATCH_INTERVAL = 16
const MAX_QUEUE_SIZE = 5 const MAX_QUEUE_SIZE = 5
/** function isClientRunCapability(toolCall: CopilotToolCall): boolean {
* Send an auto-accept confirmation to the server for auto-allowed tools. if (toolCall.execution?.target === 'sim_client_capability') {
* The server-side orchestrator polls Redis for this decision. return toolCall.execution.capabilityId === 'workflow.run' || !toolCall.execution.capabilityId
*/ }
export function sendAutoAcceptConfirmation(toolCallId: string): void { return CLIENT_EXECUTABLE_RUN_TOOLS.has(toolCall.name)
fetch(COPILOT_CONFIRM_API_PATH, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ toolCallId, status: 'accepted' }),
}).catch((error) => {
logger.warn('Failed to send auto-accept confirmation', {
toolCallId,
error: error instanceof Error ? error.message : String(error),
})
})
} }
function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void { function writeActiveStreamToStorage(info: CopilotStreamInfo | null): void {
@@ -230,28 +219,86 @@ export const sseHandlers: Record<string, SSEHandler> = {
} }
}, },
title_updated: (_data, _context, get, set) => { title_updated: (_data, _context, get, set) => {
const title = _data.title const title = typeof _data.title === 'string' ? _data.title.trim() : ''
if (!title) return const eventChatId = typeof _data.chatId === 'string' ? _data.chatId : undefined
const { currentChat, chats } = get() const { currentChat, chats } = get()
if (currentChat) {
set({ logger.info('[Title] Received title_updated SSE event', {
currentChat: { ...currentChat, title }, eventTitle: title,
chats: chats.map((c) => (c.id === currentChat.id ? { ...c, title } : c)), eventChatId: eventChatId || null,
currentChatId: currentChat?.id || null,
currentChatTitle: currentChat?.title || null,
chatCount: chats.length,
})
if (!title) {
logger.warn('[Title] Ignoring title_updated event with empty title', {
payload: _data,
})
return
}
if (!currentChat) {
logger.warn('[Title] Received title_updated event without an active currentChat', {
eventChatId: eventChatId || null,
title,
})
return
}
const targetChatId = eventChatId || currentChat.id
if (eventChatId && eventChatId !== currentChat.id) {
logger.warn('[Title] title_updated event chatId does not match currentChat', {
eventChatId,
currentChatId: currentChat.id,
}) })
} }
set({
currentChat:
currentChat.id === targetChatId
? {
...currentChat,
title,
}
: currentChat,
chats: chats.map((c) => (c.id === targetChatId ? { ...c, title } : c)),
})
const updatedState = get()
logger.info('[Title] Applied title_updated event to copilot store', {
targetChatId,
renderedCurrentChatId: updatedState.currentChat?.id || null,
renderedCurrentChatTitle: updatedState.currentChat?.title || null,
chatListTitle: updatedState.chats.find((c) => c.id === targetChatId)?.title || null,
})
}, },
tool_result: (data, context, get, set) => { 'copilot.tool.result': (data, context, get, set) => {
try { try {
const eventData = asRecord(data?.data) const eventData = asRecord(data?.data)
const toolCallId: string | undefined = const toolCallId: string | undefined =
data?.toolCallId || (eventData.id as string | undefined) data?.toolCallId ||
(eventData.id as string | undefined) ||
(eventData.callId as string | undefined)
const success: boolean | undefined = data?.success const success: boolean | undefined = data?.success
const failedDependency: boolean = data?.failedDependency === true const failedDependency: boolean = data?.failedDependency === true
const resultObj = asRecord(data?.result) const resultObj = asRecord(data?.result)
const skipped: boolean = resultObj.skipped === true const skipped: boolean = resultObj.skipped === true
if (!toolCallId) return if (!toolCallId) return
const uiMetadata = extractToolUiMetadata(eventData)
const executionMetadata = extractToolExecutionMetadata(eventData)
const serverState = (eventData.state as string | undefined) || undefined
const targetState = serverState
? mapServerStateToClientState(serverState)
: success
? ClientToolCallState.success
: failedDependency || skipped
? ClientToolCallState.rejected
: ClientToolCallState.error
const resultPayload = asRecord(data?.result || eventData.result || eventData.data || data?.data)
const { toolCallsById } = get() const { toolCallsById } = get()
const current = toolCallsById[toolCallId] const current = toolCallsById[toolCallId]
let paramsForCurrentToolCall: Record<string, unknown> | undefined = current?.params
if (current) { if (current) {
if ( if (
isRejectedState(current.state) || isRejectedState(current.state) ||
@@ -260,16 +307,33 @@ export const sseHandlers: Record<string, SSEHandler> = {
) { ) {
return return
} }
const targetState = success if (
? ClientToolCallState.success targetState === ClientToolCallState.success &&
: failedDependency || skipped isWorkflowChangeApplyCall(current.name, paramsForCurrentToolCall)
? ClientToolCallState.rejected ) {
: ClientToolCallState.error const operations = extractOperationListFromResultPayload(resultPayload || {})
if (operations && operations.length > 0) {
paramsForCurrentToolCall = {
...(current.params || {}),
operations,
}
}
}
const updatedMap = { ...toolCallsById } const updatedMap = { ...toolCallsById }
updatedMap[toolCallId] = { updatedMap[toolCallId] = {
...current, ...current,
ui: uiMetadata || current.ui,
execution: executionMetadata || current.execution,
params: paramsForCurrentToolCall,
state: targetState, state: targetState,
display: resolveToolDisplay(current.name, targetState, current.id, current.params), display: resolveDisplayFromServerUi(
current.name,
targetState,
current.id,
paramsForCurrentToolCall,
uiMetadata || current.ui
),
} }
set({ toolCallsById: updatedMap }) set({ toolCallsById: updatedMap })
@@ -312,138 +376,11 @@ export const sseHandlers: Record<string, SSEHandler> = {
} }
} }
if (current.name === 'edit_workflow') { applyToolEffects({
try { effectsRaw: eventData.effects,
const resultPayload = asRecord( toolCall: updatedMap[toolCallId],
data?.result || eventData.result || eventData.data || data?.data resultPayload,
) })
const workflowState = asRecord(resultPayload?.workflowState)
const hasWorkflowState = !!resultPayload?.workflowState
logger.info('[SSE] edit_workflow result received', {
hasWorkflowState,
blockCount: hasWorkflowState ? Object.keys(workflowState.blocks ?? {}).length : 0,
edgeCount: Array.isArray(workflowState.edges) ? workflowState.edges.length : 0,
})
if (hasWorkflowState) {
const diffStore = useWorkflowDiffStore.getState()
diffStore
.setProposedChanges(resultPayload.workflowState as WorkflowState)
.catch((err) => {
logger.error('[SSE] Failed to apply edit_workflow diff', {
error: err instanceof Error ? err.message : String(err),
})
})
}
} catch (err) {
logger.error('[SSE] edit_workflow result handling failed', {
error: err instanceof Error ? err.message : String(err),
})
}
}
// Deploy tools: update deployment status in workflow registry
if (
targetState === ClientToolCallState.success &&
(current.name === 'deploy_api' ||
current.name === 'deploy_chat' ||
current.name === 'deploy_mcp' ||
current.name === 'redeploy')
) {
try {
const resultPayload = asRecord(
data?.result || eventData.result || eventData.data || data?.data
)
const input = asRecord(current.params)
const workflowId =
(resultPayload?.workflowId as string) ||
(input?.workflowId as string) ||
useWorkflowRegistry.getState().activeWorkflowId
const isDeployed = resultPayload?.isDeployed !== false
if (workflowId) {
useWorkflowRegistry
.getState()
.setDeploymentStatus(workflowId, isDeployed, isDeployed ? new Date() : undefined)
logger.info('[SSE] Updated deployment status from tool result', {
toolName: current.name,
workflowId,
isDeployed,
})
}
} catch (err) {
logger.warn('[SSE] Failed to hydrate deployment status', {
error: err instanceof Error ? err.message : String(err),
})
}
}
// Environment variables: reload store after successful set
if (
targetState === ClientToolCallState.success &&
current.name === 'set_environment_variables'
) {
try {
useEnvironmentStore.getState().loadEnvironmentVariables()
logger.info('[SSE] Triggered environment variables reload')
} catch (err) {
logger.warn('[SSE] Failed to reload environment variables', {
error: err instanceof Error ? err.message : String(err),
})
}
}
// Workflow variables: reload store after successful set
if (
targetState === ClientToolCallState.success &&
current.name === 'set_global_workflow_variables'
) {
try {
const input = asRecord(current.params)
const workflowId =
(input?.workflowId as string) || useWorkflowRegistry.getState().activeWorkflowId
if (workflowId) {
useVariablesStore.getState().loadForWorkflow(workflowId)
logger.info('[SSE] Triggered workflow variables reload', { workflowId })
}
} catch (err) {
logger.warn('[SSE] Failed to reload workflow variables', {
error: err instanceof Error ? err.message : String(err),
})
}
}
// Generate API key: update deployment status with the new key
if (targetState === ClientToolCallState.success && current.name === 'generate_api_key') {
try {
const resultPayload = asRecord(
data?.result || eventData.result || eventData.data || data?.data
)
const input = asRecord(current.params)
const workflowId =
(input?.workflowId as string) || useWorkflowRegistry.getState().activeWorkflowId
const apiKey = (resultPayload?.apiKey || resultPayload?.key) as string | undefined
if (workflowId) {
const existingStatus = useWorkflowRegistry
.getState()
.getWorkflowDeploymentStatus(workflowId)
useWorkflowRegistry
.getState()
.setDeploymentStatus(
workflowId,
existingStatus?.isDeployed ?? false,
existingStatus?.deployedAt,
apiKey
)
logger.info('[SSE] Updated deployment status with API key', {
workflowId,
hasKey: !!apiKey,
})
}
} catch (err) {
logger.warn('[SSE] Failed to hydrate API key status', {
error: err instanceof Error ? err.message : String(err),
})
}
}
} }
for (let i = 0; i < context.contentBlocks.length; i++) { for (let i = 0; i < context.contentBlocks.length; i++) {
@@ -460,16 +397,24 @@ export const sseHandlers: Record<string, SSEHandler> = {
: failedDependency || skipped : failedDependency || skipped
? ClientToolCallState.rejected ? ClientToolCallState.rejected
: ClientToolCallState.error : ClientToolCallState.error
const paramsForBlock =
b.toolCall?.id === toolCallId
? paramsForCurrentToolCall || b.toolCall?.params
: b.toolCall?.params
context.contentBlocks[i] = { context.contentBlocks[i] = {
...b, ...b,
toolCall: { toolCall: {
...b.toolCall, ...b.toolCall,
params: paramsForBlock,
ui: uiMetadata || b.toolCall?.ui,
execution: executionMetadata || b.toolCall?.execution,
state: targetState, state: targetState,
display: resolveToolDisplay( display: resolveDisplayFromServerUi(
b.toolCall?.name, b.toolCall?.name,
targetState, targetState,
toolCallId, toolCallId,
b.toolCall?.params paramsForBlock,
uiMetadata || b.toolCall?.ui
), ),
}, },
} }
@@ -483,106 +428,29 @@ export const sseHandlers: Record<string, SSEHandler> = {
}) })
} }
}, },
tool_error: (data, context, get, set) => { 'copilot.tool.call': (data, context, get, set) => {
try {
const errorData = asRecord(data?.data)
const toolCallId: string | undefined =
data?.toolCallId || (errorData.id as string | undefined)
const failedDependency: boolean = data?.failedDependency === true
if (!toolCallId) return
const { toolCallsById } = get()
const current = toolCallsById[toolCallId]
if (current) {
if (
isRejectedState(current.state) ||
isReviewState(current.state) ||
isBackgroundState(current.state)
) {
return
}
const targetState = failedDependency
? ClientToolCallState.rejected
: ClientToolCallState.error
const updatedMap = { ...toolCallsById }
updatedMap[toolCallId] = {
...current,
state: targetState,
display: resolveToolDisplay(current.name, targetState, current.id, current.params),
}
set({ toolCallsById: updatedMap })
}
for (let i = 0; i < context.contentBlocks.length; i++) {
const b = context.contentBlocks[i]
if (b?.type === 'tool_call' && b?.toolCall?.id === toolCallId) {
if (
isRejectedState(b.toolCall?.state) ||
isReviewState(b.toolCall?.state) ||
isBackgroundState(b.toolCall?.state)
)
break
const targetState = failedDependency
? ClientToolCallState.rejected
: ClientToolCallState.error
context.contentBlocks[i] = {
...b,
toolCall: {
...b.toolCall,
state: targetState,
display: resolveToolDisplay(
b.toolCall?.name,
targetState,
toolCallId,
b.toolCall?.params
),
},
}
break
}
}
updateStreamingMessage(set, context)
} catch (error) {
logger.warn('Failed to process tool_error SSE event', {
error: error instanceof Error ? error.message : String(error),
})
}
},
tool_generating: (data, context, get, set) => {
const { toolCallId, toolName } = data
if (!toolCallId || !toolName) return
const { toolCallsById } = get()
if (!toolCallsById[toolCallId]) {
const isAutoAllowed = get().isToolAutoAllowed(toolName)
const initialState = isAutoAllowed
? ClientToolCallState.executing
: ClientToolCallState.pending
const tc: CopilotToolCall = {
id: toolCallId,
name: toolName,
state: initialState,
display: resolveToolDisplay(toolName, initialState, toolCallId),
}
const updated = { ...toolCallsById, [toolCallId]: tc }
set({ toolCallsById: updated })
logger.info('[toolCallsById] map updated', updated)
upsertToolCallBlock(context, tc)
updateStreamingMessage(set, context)
}
},
tool_call: (data, context, get, set) => {
const toolData = asRecord(data?.data) const toolData = asRecord(data?.data)
const id: string | undefined = (toolData.id as string | undefined) || data?.toolCallId const id: string | undefined =
const name: string | undefined = (toolData.name as string | undefined) || data?.toolName (toolData.id as string | undefined) ||
(toolData.callId as string | undefined) ||
data?.toolCallId
const name: string | undefined =
(toolData.name as string | undefined) ||
(toolData.toolName as string | undefined) ||
data?.toolName
if (!id) return if (!id) return
const args = toolData.arguments as Record<string, unknown> | undefined const args = toolData.arguments as Record<string, unknown> | undefined
const isPartial = toolData.partial === true const isPartial = toolData.partial === true
const uiMetadata = extractToolUiMetadata(toolData)
const executionMetadata = extractToolExecutionMetadata(toolData)
const serverState = toolData.state
const { toolCallsById } = get() const { toolCallsById } = get()
const existing = toolCallsById[id] const existing = toolCallsById[id]
const toolName = name || existing?.name || 'unknown_tool' const toolName = name || existing?.name || 'unknown_tool'
const isAutoAllowed = get().isToolAutoAllowed(toolName) let initialState = serverState
let initialState = isAutoAllowed ? ClientToolCallState.executing : ClientToolCallState.pending ? mapServerStateToClientState(serverState)
: ClientToolCallState.pending
// Avoid flickering back to pending on partial/duplicate events once a tool is executing. // Avoid flickering back to pending on partial/duplicate events once a tool is executing.
if ( if (
@@ -597,15 +465,25 @@ export const sseHandlers: Record<string, SSEHandler> = {
...existing, ...existing,
name: toolName, name: toolName,
state: initialState, state: initialState,
ui: uiMetadata || existing.ui,
execution: executionMetadata || existing.execution,
...(args ? { params: args } : {}), ...(args ? { params: args } : {}),
display: resolveToolDisplay(toolName, initialState, id, args || existing.params), display: resolveDisplayFromServerUi(
toolName,
initialState,
id,
args || existing.params,
uiMetadata || existing.ui
),
} }
: { : {
id, id,
name: toolName, name: toolName,
state: initialState, state: initialState,
ui: uiMetadata,
execution: executionMetadata,
...(args ? { params: args } : {}), ...(args ? { params: args } : {}),
display: resolveToolDisplay(toolName, initialState, id, args), display: resolveDisplayFromServerUi(toolName, initialState, id, args, uiMetadata),
} }
const updated = { ...toolCallsById, [id]: next } const updated = { ...toolCallsById, [id]: next }
set({ toolCallsById: updated }) set({ toolCallsById: updated })
@@ -618,20 +496,12 @@ export const sseHandlers: Record<string, SSEHandler> = {
return return
} }
// Auto-allowed tools: send confirmation to the server so it can proceed const shouldInterrupt = next.ui?.showInterrupt === true
// without waiting for the user to click "Allow".
if (isAutoAllowed) {
sendAutoAcceptConfirmation(id)
}
// Client-executable run tools: execute on the client for real-time feedback // Client-run capability: execution is delegated to the browser.
// (block pulsing, console logs, stop button). The server defers execution // We run immediately only when no interrupt is required.
// for these tools in interactive mode; the client reports back via mark-complete. if (isClientRunCapability(next) && !shouldInterrupt) {
if ( executeRunToolOnClient(id, toolName, args || next.params || {})
CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName) &&
initialState === ClientToolCallState.executing
) {
executeRunToolOnClient(id, toolName, args || existing?.params || {})
} }
// OAuth: dispatch event to open the OAuth connect modal // OAuth: dispatch event to open the OAuth connect modal
@@ -661,7 +531,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
return return
}, },
reasoning: (data, context, _get, set) => { 'copilot.phase.progress': (data, context, _get, set) => {
const phase = (data && (data.phase || data?.data?.phase)) as string | undefined const phase = (data && (data.phase || data?.data?.phase)) as string | undefined
if (phase === 'start') { if (phase === 'start') {
beginThinkingBlock(context) beginThinkingBlock(context)
@@ -678,7 +548,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
appendThinkingContent(context, chunk) appendThinkingContent(context, chunk)
updateStreamingMessage(set, context) updateStreamingMessage(set, context)
}, },
content: (data, context, get, set) => { 'copilot.content': (data, context, get, set) => {
if (!data.data) return if (!data.data) return
context.pendingContent += data.data context.pendingContent += data.data
@@ -893,7 +763,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
updateStreamingMessage(set, context) updateStreamingMessage(set, context)
} }
}, },
done: (_data, context) => { 'copilot.phase.completed': (_data, context) => {
logger.info('[SSE] DONE EVENT RECEIVED', { logger.info('[SSE] DONE EVENT RECEIVED', {
doneEventCount: context.doneEventCount, doneEventCount: context.doneEventCount,
data: _data, data: _data,
@@ -904,7 +774,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
context.streamComplete = true context.streamComplete = true
} }
}, },
error: (data, context, _get, set) => { 'copilot.error': (data, context, _get, set) => {
logger.error('Stream error:', data.error) logger.error('Stream error:', data.error)
set((state: CopilotStore) => ({ set((state: CopilotStore) => ({
messages: state.messages.map((msg) => messages: state.messages.map((msg) =>
@@ -919,6 +789,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
})) }))
context.streamComplete = true context.streamComplete = true
}, },
'copilot.phase.started': () => {},
stream_end: (_data, context, _get, set) => { stream_end: (_data, context, _get, set) => {
if (context.pendingContent) { if (context.pendingContent) {
if (context.isInThinkingBlock && context.currentThinkingBlock) { if (context.isInThinkingBlock && context.currentThinkingBlock) {
@@ -933,3 +804,8 @@ export const sseHandlers: Record<string, SSEHandler> = {
}, },
default: () => {}, default: () => {},
} }
sseHandlers['copilot.tool.interrupt_required'] = sseHandlers['copilot.tool.call']
sseHandlers['copilot.workflow.patch'] = sseHandlers['copilot.tool.result']
sseHandlers['copilot.workflow.verify'] = sseHandlers['copilot.tool.result']
sseHandlers['copilot.tool.interrupt_resolved'] = sseHandlers['copilot.tool.result']

View File

@@ -15,10 +15,7 @@ const logger = createLogger('CopilotRunToolExecution')
* (block pulsing, logs, stop button, etc.). * (block pulsing, logs, stop button, etc.).
*/ */
export const CLIENT_EXECUTABLE_RUN_TOOLS = new Set([ export const CLIENT_EXECUTABLE_RUN_TOOLS = new Set([
'run_workflow', 'workflow_run',
'run_workflow_until_block',
'run_from_block',
'run_block',
]) ])
/** /**
@@ -74,21 +71,44 @@ async function doExecuteRunTool(
| Record<string, unknown> | Record<string, unknown>
| undefined | undefined
const runMode =
toolName === 'workflow_run' ? ((params.mode as string | undefined) || 'full').toLowerCase() : undefined
if (
toolName === 'workflow_run' &&
runMode !== 'full' &&
runMode !== 'until_block' &&
runMode !== 'from_block' &&
runMode !== 'block'
) {
const error = `Unsupported workflow_run mode: ${String(params.mode)}`
logger.warn('[RunTool] Execution prevented: unsupported workflow_run mode', {
toolCallId,
mode: params.mode,
})
setToolState(toolCallId, ClientToolCallState.error)
await reportCompletion(toolCallId, false, error)
return
}
const stopAfterBlockId = (() => { const stopAfterBlockId = (() => {
if (toolName === 'run_workflow_until_block') if (toolName === 'workflow_run' && runMode === 'until_block') {
return params.stopAfterBlockId as string | undefined return params.stopAfterBlockId as string | undefined
if (toolName === 'run_block') return params.blockId as string | undefined }
if (toolName === 'workflow_run' && runMode === 'block') {
return params.blockId as string | undefined
}
return undefined return undefined
})() })()
const runFromBlock = (() => { const runFromBlock = (() => {
if (toolName === 'run_from_block' && params.startBlockId) { if (toolName === 'workflow_run' && runMode === 'from_block' && params.startBlockId) {
return { return {
startBlockId: params.startBlockId as string, startBlockId: params.startBlockId as string,
executionId: (params.executionId as string | undefined) || 'latest', executionId: (params.executionId as string | undefined) || 'latest',
} }
} }
if (toolName === 'run_block' && params.blockId) { if (toolName === 'workflow_run' && runMode === 'block' && params.blockId) {
return { return {
startBlockId: params.blockId as string, startBlockId: params.blockId as string,
executionId: (params.executionId as string | undefined) || 'latest', executionId: (params.executionId as string | undefined) || 'latest',

View File

@@ -0,0 +1,172 @@
/**
* @vitest-environment node
*/
import { describe, expect, it, vi } from 'vitest'
import { applySseEvent } from '@/lib/copilot/client-sse/subagent-handlers'
import type { ClientStreamingContext } from '@/lib/copilot/client-sse/types'
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
import type { CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types'
type StoreSet = (
partial: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)
) => void
function createTestStore(initialToolCalls: Record<string, CopilotToolCall>) {
const state: Partial<CopilotStore> = {
messages: [{ id: 'assistant-msg', role: 'assistant', content: '', timestamp: new Date().toISOString() }],
toolCallsById: { ...initialToolCalls },
currentChat: null,
chats: [],
activeStream: null,
updatePlanTodoStatus: vi.fn(),
handleNewChatCreation: vi.fn().mockResolvedValue(undefined),
}
const get = () => state as CopilotStore
const set: StoreSet = (partial) => {
const patch = typeof partial === 'function' ? partial(get()) : partial
Object.assign(state, patch)
}
return { get, set }
}
function createStreamingContext(): ClientStreamingContext {
return {
messageId: 'assistant-msg',
accumulatedContent: '',
contentBlocks: [],
currentTextBlock: null,
isInThinkingBlock: false,
currentThinkingBlock: null,
isInDesignWorkflowBlock: false,
designWorkflowContent: '',
pendingContent: '',
doneEventCount: 0,
streamComplete: false,
subAgentContent: {},
subAgentToolCalls: {},
subAgentBlocks: {},
suppressStreamingUpdates: true,
}
}
describe('client SSE copilot.* stream smoke', () => {
it('processes main tool call/result events with copilot.* keys', async () => {
const { get, set } = createTestStore({})
const context = createStreamingContext()
await applySseEvent(
{
type: 'copilot.tool.call',
data: { id: 'main-tool-1', name: 'get_user_workflow', state: 'executing', arguments: {} },
} as any,
context,
get,
set
)
await applySseEvent(
{
type: 'copilot.tool.result',
toolCallId: 'main-tool-1',
success: true,
result: { ok: true },
data: {
id: 'main-tool-1',
name: 'get_user_workflow',
phase: 'completed',
state: 'success',
success: true,
result: { ok: true },
},
} as any,
context,
get,
set
)
expect(get().toolCallsById['main-tool-1']).toBeDefined()
expect(get().toolCallsById['main-tool-1'].state).toBe(ClientToolCallState.success)
expect(
context.contentBlocks.some(
(block) => block.type === 'tool_call' && block.toolCall?.id === 'main-tool-1'
)
).toBe(true)
})
it('processes subagent start/tool/result/end with copilot.* keys', async () => {
const parentToolCallId = 'parent-edit-tool'
const { get, set } = createTestStore({
[parentToolCallId]: {
id: parentToolCallId,
name: 'edit',
state: ClientToolCallState.executing,
},
})
const context = createStreamingContext()
await applySseEvent(
{
type: 'copilot.subagent.started',
subagent: 'edit',
data: { tool_call_id: parentToolCallId },
} as any,
context,
get,
set
)
await applySseEvent(
{
type: 'copilot.tool.call',
subagent: 'edit',
data: {
id: 'sub-tool-1',
name: 'workflow_context_get',
state: 'executing',
arguments: { includeSchemas: false },
},
} as any,
context,
get,
set
)
await applySseEvent(
{
type: 'copilot.tool.result',
subagent: 'edit',
data: {
id: 'sub-tool-1',
name: 'workflow_context_get',
phase: 'completed',
state: 'success',
success: true,
result: { contextPackId: 'pack-1' },
},
} as any,
context,
get,
set
)
await applySseEvent(
{
type: 'copilot.subagent.completed',
subagent: 'edit',
data: {},
} as any,
context,
get,
set
)
const parentToolCall = get().toolCallsById[parentToolCallId]
expect(parentToolCall).toBeDefined()
expect(parentToolCall.subAgentStreaming).toBe(false)
expect(parentToolCall.subAgentToolCalls?.length).toBe(1)
expect(parentToolCall.subAgentToolCalls?.[0]?.id).toBe('sub-tool-1')
expect(parentToolCall.subAgentToolCalls?.[0]?.state).toBe(ClientToolCallState.success)
})
})

View File

@@ -6,16 +6,23 @@ import {
shouldSkipToolResultEvent, shouldSkipToolResultEvent,
} from '@/lib/copilot/orchestrator/sse-utils' } from '@/lib/copilot/orchestrator/sse-utils'
import type { SSEEvent } from '@/lib/copilot/orchestrator/types' import type { SSEEvent } from '@/lib/copilot/orchestrator/types'
import { resolveToolDisplay } from '@/lib/copilot/store-utils'
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry' import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
import type { CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types' import type { CopilotStore, CopilotToolCall } from '@/stores/panel/copilot/types'
import { import {
type SSEHandler, type SSEHandler,
sendAutoAcceptConfirmation,
sseHandlers, sseHandlers,
updateStreamingMessage, updateStreamingMessage,
} from './handlers' } from './handlers'
import { CLIENT_EXECUTABLE_RUN_TOOLS, executeRunToolOnClient } from './run-tool-execution' import { CLIENT_EXECUTABLE_RUN_TOOLS, executeRunToolOnClient } from './run-tool-execution'
import {
extractOperationListFromResultPayload,
extractToolExecutionMetadata,
extractToolUiMetadata,
isWorkflowChangeApplyCall,
mapServerStateToClientState,
resolveDisplayFromServerUi,
} from './tool-call-helpers'
import { applyToolEffects } from './tool-effects'
import type { ClientStreamingContext } from './types' import type { ClientStreamingContext } from './types'
const logger = createLogger('CopilotClientSubagentHandlers') const logger = createLogger('CopilotClientSubagentHandlers')
@@ -24,6 +31,13 @@ type StoreSet = (
partial: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>) partial: Partial<CopilotStore> | ((state: CopilotStore) => Partial<CopilotStore>)
) => void ) => void
function isClientRunCapability(toolCall: CopilotToolCall): boolean {
if (toolCall.execution?.target === 'sim_client_capability') {
return toolCall.execution.capabilityId === 'workflow.run' || !toolCall.execution.capabilityId
}
return CLIENT_EXECUTABLE_RUN_TOOLS.has(toolCall.name)
}
export function appendSubAgentContent( export function appendSubAgentContent(
context: ClientStreamingContext, context: ClientStreamingContext,
parentToolCallId: string, parentToolCallId: string,
@@ -110,11 +124,11 @@ export function updateToolCallWithSubAgentData(
} }
export const subAgentSSEHandlers: Record<string, SSEHandler> = { export const subAgentSSEHandlers: Record<string, SSEHandler> = {
start: () => { 'copilot.phase.started': () => {
// Subagent start event - no action needed, parent is already tracked from subagent_start // No-op: parent subagent association is handled by copilot.subagent.started.
}, },
content: (data, context, get, set) => { 'copilot.content': (data, context, get, set) => {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
const contentStr = typeof data.data === 'string' ? data.data : data.content || '' const contentStr = typeof data.data === 'string' ? data.data : data.content || ''
logger.info('[SubAgent] content event', { logger.info('[SubAgent] content event', {
@@ -135,7 +149,7 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
updateToolCallWithSubAgentData(context, get, set, parentToolCallId) updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
}, },
reasoning: (data, context, get, set) => { 'copilot.phase.progress': (data, context, get, set) => {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
const dataObj = asRecord(data?.data) const dataObj = asRecord(data?.data)
const phase = data?.phase || (dataObj.phase as string | undefined) const phase = data?.phase || (dataObj.phase as string | undefined)
@@ -151,11 +165,7 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
updateToolCallWithSubAgentData(context, get, set, parentToolCallId) updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
}, },
tool_generating: () => { 'copilot.tool.call': async (data, context, get, set) => {
// Tool generating event - no action needed, we'll handle the actual tool_call
},
tool_call: async (data, context, get, set) => {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) return if (!parentToolCallId) return
@@ -164,6 +174,8 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
const name: string | undefined = (toolData.name as string | undefined) || data?.toolName const name: string | undefined = (toolData.name as string | undefined) || data?.toolName
if (!id || !name) return if (!id || !name) return
const isPartial = toolData.partial === true const isPartial = toolData.partial === true
const uiMetadata = extractToolUiMetadata(toolData)
const executionMetadata = extractToolExecutionMetadata(toolData)
let args: Record<string, unknown> | undefined = (toolData.arguments || toolData.input) as let args: Record<string, unknown> | undefined = (toolData.arguments || toolData.input) as
| Record<string, unknown> | Record<string, unknown>
@@ -199,9 +211,10 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
const existingToolCall = const existingToolCall =
existingIndex >= 0 ? context.subAgentToolCalls[parentToolCallId][existingIndex] : undefined existingIndex >= 0 ? context.subAgentToolCalls[parentToolCallId][existingIndex] : undefined
// Auto-allowed tools skip pending state to avoid flashing interrupt buttons const serverState = toolData.state
const isAutoAllowed = get().isToolAutoAllowed(name) let initialState = serverState
let initialState = isAutoAllowed ? ClientToolCallState.executing : ClientToolCallState.pending ? mapServerStateToClientState(serverState)
: ClientToolCallState.pending
// Avoid flickering back to pending on partial/duplicate events once a tool is executing. // Avoid flickering back to pending on partial/duplicate events once a tool is executing.
if ( if (
@@ -215,8 +228,10 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
id, id,
name, name,
state: initialState, state: initialState,
ui: uiMetadata,
execution: executionMetadata,
...(args ? { params: args } : {}), ...(args ? { params: args } : {}),
display: resolveToolDisplay(name, initialState, id, args), display: resolveDisplayFromServerUi(name, initialState, id, args, uiMetadata),
} }
if (existingIndex >= 0) { if (existingIndex >= 0) {
@@ -241,21 +256,16 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
return return
} }
// Auto-allowed tools: send confirmation to the server so it can proceed const shouldInterrupt = subAgentToolCall.ui?.showInterrupt === true
// without waiting for the user to click "Allow".
if (isAutoAllowed) {
sendAutoAcceptConfirmation(id)
}
// Client-executable run tools: if auto-allowed, execute immediately for // Client-run capability: execution is delegated to the browser.
// real-time feedback. For non-auto-allowed, the user must click "Allow" // Execute immediately only for non-interrupting calls.
// first — handleRun in tool-call.tsx triggers executeRunToolOnClient. if (isClientRunCapability(subAgentToolCall) && !shouldInterrupt) {
if (CLIENT_EXECUTABLE_RUN_TOOLS.has(name) && isAutoAllowed) {
executeRunToolOnClient(id, name, args || {}) executeRunToolOnClient(id, name, args || {})
} }
}, },
tool_result: (data, context, get, set) => { 'copilot.tool.result': (data, context, get, set) => {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) return if (!parentToolCallId) return
@@ -275,17 +285,51 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
if (!context.subAgentToolCalls[parentToolCallId]) return if (!context.subAgentToolCalls[parentToolCallId]) return
if (!context.subAgentBlocks[parentToolCallId]) return if (!context.subAgentBlocks[parentToolCallId]) return
const targetState = success ? ClientToolCallState.success : ClientToolCallState.error const serverState = resultData.state
const targetState = serverState
? mapServerStateToClientState(serverState)
: success
? ClientToolCallState.success
: ClientToolCallState.error
const uiMetadata = extractToolUiMetadata(resultData)
const executionMetadata = extractToolExecutionMetadata(resultData)
const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex( const existingIndex = context.subAgentToolCalls[parentToolCallId].findIndex(
(tc: CopilotToolCall) => tc.id === toolCallId (tc: CopilotToolCall) => tc.id === toolCallId
) )
if (existingIndex >= 0) { if (existingIndex >= 0) {
const existing = context.subAgentToolCalls[parentToolCallId][existingIndex] const existing = context.subAgentToolCalls[parentToolCallId][existingIndex]
let nextParams = existing.params
const resultPayload = asRecord(
data?.result || resultData.result || resultData.data || data?.data
)
if (
targetState === ClientToolCallState.success &&
isWorkflowChangeApplyCall(existing.name, existing.params as Record<string, unknown>) &&
resultPayload
) {
const operations = extractOperationListFromResultPayload(resultPayload)
if (operations && operations.length > 0) {
nextParams = {
...(existing.params || {}),
operations,
}
}
}
const updatedSubAgentToolCall = { const updatedSubAgentToolCall = {
...existing, ...existing,
params: nextParams,
ui: uiMetadata || existing.ui,
execution: executionMetadata || existing.execution,
state: targetState, state: targetState,
display: resolveToolDisplay(existing.name, targetState, toolCallId, existing.params), display: resolveDisplayFromServerUi(
existing.name,
targetState,
toolCallId,
nextParams,
uiMetadata || existing.ui
),
} }
context.subAgentToolCalls[parentToolCallId][existingIndex] = updatedSubAgentToolCall context.subAgentToolCalls[parentToolCallId][existingIndex] = updatedSubAgentToolCall
@@ -309,12 +353,18 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
state: targetState, state: targetState,
}) })
} }
applyToolEffects({
effectsRaw: resultData.effects,
toolCall: updatedSubAgentToolCall,
resultPayload,
})
} }
updateToolCallWithSubAgentData(context, get, set, parentToolCallId) updateToolCallWithSubAgentData(context, get, set, parentToolCallId)
}, },
done: (_data, context, get, set) => { 'copilot.phase.completed': (_data, context, get, set) => {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) return if (!parentToolCallId) return
@@ -322,6 +372,11 @@ export const subAgentSSEHandlers: Record<string, SSEHandler> = {
}, },
} }
subAgentSSEHandlers['copilot.tool.interrupt_required'] = subAgentSSEHandlers['copilot.tool.call']
subAgentSSEHandlers['copilot.workflow.patch'] = subAgentSSEHandlers['copilot.tool.result']
subAgentSSEHandlers['copilot.workflow.verify'] = subAgentSSEHandlers['copilot.tool.result']
subAgentSSEHandlers['copilot.tool.interrupt_resolved'] = subAgentSSEHandlers['copilot.tool.result']
export async function applySseEvent( export async function applySseEvent(
rawData: SSEEvent, rawData: SSEEvent,
context: ClientStreamingContext, context: ClientStreamingContext,
@@ -334,7 +389,7 @@ export async function applySseEvent(
} }
const data = normalizedEvent const data = normalizedEvent
if (data.type === 'subagent_start') { if (data.type === 'copilot.subagent.started') {
const startData = asRecord(data.data) const startData = asRecord(data.data)
const toolCallId = startData.tool_call_id as string | undefined const toolCallId = startData.tool_call_id as string | undefined
if (toolCallId) { if (toolCallId) {
@@ -357,7 +412,7 @@ export async function applySseEvent(
return true return true
} }
if (data.type === 'subagent_end') { if (data.type === 'copilot.subagent.completed') {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
if (parentToolCallId) { if (parentToolCallId) {
const { toolCallsById } = get() const { toolCallsById } = get()

View File

@@ -0,0 +1,134 @@
import { asRecord } from '@/lib/copilot/orchestrator/sse-utils'
import { humanizedFallback, resolveToolDisplay } from '@/lib/copilot/store-utils'
import { ClientToolCallState } from '@/lib/copilot/tools/client/tool-display-registry'
import type { CopilotToolCall } from '@/stores/panel/copilot/types'
export function mapServerStateToClientState(state: unknown): ClientToolCallState {
switch (String(state || '')) {
case 'generating':
return ClientToolCallState.generating
case 'pending':
case 'awaiting_approval':
return ClientToolCallState.pending
case 'executing':
return ClientToolCallState.executing
case 'success':
return ClientToolCallState.success
case 'rejected':
case 'skipped':
return ClientToolCallState.rejected
case 'aborted':
return ClientToolCallState.aborted
case 'error':
case 'failed':
return ClientToolCallState.error
default:
return ClientToolCallState.pending
}
}
export function extractToolUiMetadata(
data: Record<string, unknown>
): CopilotToolCall['ui'] | undefined {
const ui = asRecord(data.ui)
if (!ui || Object.keys(ui).length === 0) return undefined
const autoAllowedFromUi = ui.autoAllowed === true
const autoAllowedFromData = data.autoAllowed === true
return {
title: typeof ui.title === 'string' ? ui.title : undefined,
phaseLabel: typeof ui.phaseLabel === 'string' ? ui.phaseLabel : undefined,
icon: typeof ui.icon === 'string' ? ui.icon : undefined,
showInterrupt: ui.showInterrupt === true,
showRemember: ui.showRemember === true,
autoAllowed: autoAllowedFromUi || autoAllowedFromData,
actions: Array.isArray(ui.actions)
? ui.actions
.map((action) => {
const a = asRecord(action)
const id = typeof a.id === 'string' ? a.id : undefined
const label = typeof a.label === 'string' ? a.label : undefined
const kind: 'accept' | 'reject' = a.kind === 'reject' ? 'reject' : 'accept'
if (!id || !label) return null
return {
id,
label,
kind,
remember: a.remember === true,
}
})
.filter((a): a is NonNullable<typeof a> => !!a)
: undefined,
}
}
export function extractToolExecutionMetadata(
data: Record<string, unknown>
): CopilotToolCall['execution'] | undefined {
const execution = asRecord(data.execution)
if (!execution || Object.keys(execution).length === 0) return undefined
return {
target: typeof execution.target === 'string' ? execution.target : undefined,
capabilityId: typeof execution.capabilityId === 'string' ? execution.capabilityId : undefined,
}
}
function displayVerb(state: ClientToolCallState): string {
switch (state) {
case ClientToolCallState.success:
return 'Completed'
case ClientToolCallState.error:
return 'Failed'
case ClientToolCallState.rejected:
return 'Skipped'
case ClientToolCallState.aborted:
return 'Aborted'
case ClientToolCallState.generating:
return 'Preparing'
case ClientToolCallState.pending:
return 'Waiting'
default:
return 'Running'
}
}
export function resolveDisplayFromServerUi(
toolName: string,
state: ClientToolCallState,
toolCallId: string,
params: Record<string, unknown> | undefined,
ui?: CopilotToolCall['ui']
) {
const fallback =
resolveToolDisplay(toolName, state, toolCallId, params) ||
humanizedFallback(toolName, state)
if (!fallback) return undefined
if (ui?.phaseLabel) {
return { text: ui.phaseLabel, icon: fallback.icon }
}
if (ui?.title) {
return { text: `${displayVerb(state)} ${ui.title}`, icon: fallback.icon }
}
return fallback
}
export function isWorkflowChangeApplyCall(
toolName?: string,
params?: Record<string, unknown>
): boolean {
if (toolName !== 'workflow_change') return false
const mode = typeof params?.mode === 'string' ? params.mode.toLowerCase() : ''
if (mode === 'apply') return true
return typeof params?.proposalId === 'string' && params.proposalId.length > 0
}
export function extractOperationListFromResultPayload(
resultPayload: Record<string, unknown>
): Array<Record<string, unknown>> | undefined {
const operations = resultPayload.operations
if (Array.isArray(operations)) return operations as Array<Record<string, unknown>>
const compiled = resultPayload.compiledOperations
if (Array.isArray(compiled)) return compiled as Array<Record<string, unknown>>
return undefined
}

View File

@@ -0,0 +1,170 @@
/**
* @vitest-environment node
*/
import { loggerMock } from '@sim/testing'
import { beforeEach, describe, expect, it, vi } from 'vitest'
vi.mock('@sim/logger', () => loggerMock)
const mocked = vi.hoisted(() => ({
setProposedChanges: vi.fn().mockResolvedValue(undefined),
loadEnvironmentVariables: vi.fn(),
loadVariablesForWorkflow: vi.fn(),
getWorkflowDeploymentStatus: vi.fn().mockReturnValue(null),
setDeploymentStatus: vi.fn(),
registryState: {
activeWorkflowId: 'workflow-active',
},
}))
vi.mock('@/stores/workflow-diff/store', () => ({
useWorkflowDiffStore: {
getState: () => ({
setProposedChanges: mocked.setProposedChanges,
}),
},
}))
vi.mock('@/stores/settings/environment/store', () => ({
useEnvironmentStore: {
getState: () => ({
loadEnvironmentVariables: mocked.loadEnvironmentVariables,
}),
},
}))
vi.mock('@/stores/panel/variables/store', () => ({
useVariablesStore: {
getState: () => ({
loadForWorkflow: mocked.loadVariablesForWorkflow,
}),
},
}))
vi.mock('@/stores/workflows/registry/store', () => ({
useWorkflowRegistry: {
getState: () => ({
activeWorkflowId: mocked.registryState.activeWorkflowId,
getWorkflowDeploymentStatus: mocked.getWorkflowDeploymentStatus,
setDeploymentStatus: mocked.setDeploymentStatus,
}),
},
}))
import { applyToolEffects } from '@/lib/copilot/client-sse/tool-effects'
describe('applyToolEffects', () => {
beforeEach(() => {
vi.clearAllMocks()
mocked.registryState.activeWorkflowId = 'workflow-active'
})
it('applies workflow_change fallback diff when effects are absent', () => {
const workflowState = {
blocks: {
start: { id: 'start', metadata: { id: 'start', type: 'start' }, inputs: {}, outputs: {} },
},
edges: [],
loops: {},
parallels: {},
}
applyToolEffects({
effectsRaw: [],
toolCall: {
id: 'tool-1',
name: 'workflow_change',
state: 'success',
params: { workflowId: 'workflow-123' },
} as any,
resultPayload: {
workflowState,
},
})
expect(mocked.setProposedChanges).toHaveBeenCalledTimes(1)
expect(mocked.setProposedChanges).toHaveBeenCalledWith(workflowState)
})
it('applies workflow_change fallback diff from nested editResult.workflowState', () => {
const workflowState = {
blocks: {
start: { id: 'start', metadata: { id: 'start', type: 'start' }, inputs: {}, outputs: {} },
},
edges: [],
loops: {},
parallels: {},
}
applyToolEffects({
effectsRaw: [],
toolCall: {
id: 'tool-2',
name: 'workflow_change',
state: 'success',
} as any,
resultPayload: {
editResult: {
workflowState,
},
},
})
expect(mocked.setProposedChanges).toHaveBeenCalledTimes(1)
expect(mocked.setProposedChanges).toHaveBeenCalledWith(workflowState)
})
it('applies explicit workflow.diff.proposed effect', () => {
const workflowState = {
blocks: {
start: { id: 'start', metadata: { id: 'start', type: 'start' }, inputs: {}, outputs: {} },
},
edges: [],
loops: {},
parallels: {},
}
applyToolEffects({
effectsRaw: [
{
kind: 'workflow.diff.proposed',
payload: {
workflowState,
},
},
],
toolCall: {
id: 'tool-3',
name: 'workflow_change',
state: 'success',
} as any,
})
expect(mocked.setProposedChanges).toHaveBeenCalledTimes(1)
expect(mocked.setProposedChanges).toHaveBeenCalledWith(workflowState)
})
it('does not apply fallback diff for non-workflow_change tools', () => {
const workflowState = {
blocks: {},
edges: [],
loops: {},
parallels: {},
}
applyToolEffects({
effectsRaw: [],
toolCall: {
id: 'tool-4',
name: 'list_workflows',
state: 'success',
} as any,
resultPayload: {
workflowState,
},
})
expect(mocked.setProposedChanges).not.toHaveBeenCalled()
})
})

View File

@@ -0,0 +1,180 @@
import { createLogger } from '@sim/logger'
import { asRecord } from '@/lib/copilot/orchestrator/sse-utils'
import type { CopilotToolCall } from '@/stores/panel/copilot/types'
import { useVariablesStore } from '@/stores/panel/variables/store'
import { useEnvironmentStore } from '@/stores/settings/environment/store'
import { useWorkflowDiffStore } from '@/stores/workflow-diff/store'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import type { WorkflowState } from '@/stores/workflows/workflow/types'
const logger = createLogger('CopilotToolEffects')
type ParsedToolEffect = {
kind: string
payload: Record<string, unknown>
}
function asNonEmptyRecord(value: unknown): Record<string, unknown> | null {
const record = asRecord(value)
return Object.keys(record).length > 0 ? record : null
}
function parseToolEffects(raw: unknown): ParsedToolEffect[] {
if (!Array.isArray(raw)) return []
const effects: ParsedToolEffect[] = []
for (const item of raw) {
const effect = asRecord(item)
const kind = typeof effect.kind === 'string' ? effect.kind : ''
if (!kind) continue
effects.push({
kind,
payload: asRecord(effect.payload) || {},
})
}
return effects
}
function resolveWorkflowId(
payload: Record<string, unknown>,
toolCall?: CopilotToolCall
): string | undefined {
const payloadWorkflowId = typeof payload.workflowId === 'string' ? payload.workflowId : undefined
if (payloadWorkflowId) return payloadWorkflowId
const params = asRecord(toolCall?.params)
const paramWorkflowId = typeof params?.workflowId === 'string' ? params.workflowId : undefined
if (paramWorkflowId) return paramWorkflowId
return useWorkflowRegistry.getState().activeWorkflowId || undefined
}
function resolveWorkflowState(
payload: Record<string, unknown>,
resultPayload?: Record<string, unknown>
): WorkflowState | null {
const payloadState = asNonEmptyRecord(payload.workflowState)
if (payloadState) return payloadState as unknown as WorkflowState
if (resultPayload) {
const directState = asNonEmptyRecord(resultPayload.workflowState)
if (directState) return directState as unknown as WorkflowState
const editResult = asRecord(resultPayload.editResult)
const nestedState = asNonEmptyRecord(editResult?.workflowState)
if (nestedState) return nestedState as unknown as WorkflowState
}
return null
}
function applyDeploymentSyncEffect(payload: Record<string, unknown>, toolCall?: CopilotToolCall): void {
const workflowId = resolveWorkflowId(payload, toolCall)
if (!workflowId) return
const registry = useWorkflowRegistry.getState()
const existingStatus = registry.getWorkflowDeploymentStatus(workflowId)
const isDeployed =
typeof payload.isDeployed === 'boolean'
? payload.isDeployed
: (existingStatus?.isDeployed ?? true)
const deployedAt = (() => {
if (typeof payload.deployedAt === 'string' && payload.deployedAt) {
const parsed = new Date(payload.deployedAt)
if (!Number.isNaN(parsed.getTime())) return parsed
}
return existingStatus?.deployedAt
})()
const apiKey =
typeof payload.apiKey === 'string' && payload.apiKey.length > 0
? payload.apiKey
: existingStatus?.apiKey
registry.setDeploymentStatus(workflowId, isDeployed, deployedAt, apiKey)
}
function applyApiKeySyncEffect(payload: Record<string, unknown>, toolCall?: CopilotToolCall): void {
const workflowId = resolveWorkflowId(payload, toolCall)
if (!workflowId) return
const apiKey = typeof payload.apiKey === 'string' ? payload.apiKey : undefined
const registry = useWorkflowRegistry.getState()
const existingStatus = registry.getWorkflowDeploymentStatus(workflowId)
registry.setDeploymentStatus(
workflowId,
existingStatus?.isDeployed ?? false,
existingStatus?.deployedAt,
apiKey || existingStatus?.apiKey
)
}
function applyWorkflowVariablesReload(
payload: Record<string, unknown>,
toolCall?: CopilotToolCall
): void {
const workflowId = resolveWorkflowId(payload, toolCall)
if (!workflowId) return
useVariablesStore.getState().loadForWorkflow(workflowId)
}
export function applyToolEffects(params: {
effectsRaw: unknown
toolCall?: CopilotToolCall
resultPayload?: Record<string, unknown>
}): void {
const effects = parseToolEffects(params.effectsRaw)
if (effects.length === 0) {
if (params.toolCall?.name === 'workflow_change' && params.resultPayload) {
const workflowState = resolveWorkflowState({}, params.resultPayload)
if (!workflowState) return
useWorkflowDiffStore
.getState()
.setProposedChanges(workflowState)
.catch((error) => {
logger.error('Failed to apply fallback workflow diff from result payload', {
error: error instanceof Error ? error.message : String(error),
})
})
}
return
}
for (const effect of effects) {
switch (effect.kind) {
case 'workflow.diff.proposed': {
const workflowState = resolveWorkflowState(effect.payload, params.resultPayload)
if (!workflowState) break
useWorkflowDiffStore
.getState()
.setProposedChanges(workflowState)
.catch((error) => {
logger.error('Failed to apply workflow diff effect', {
error: error instanceof Error ? error.message : String(error),
})
})
break
}
case 'workflow.deployment.sync':
applyDeploymentSyncEffect(effect.payload, params.toolCall)
break
case 'workflow.api_key.sync':
applyApiKeySyncEffect(effect.payload, params.toolCall)
break
case 'environment.variables.reload':
useEnvironmentStore.getState().loadEnvironmentVariables()
break
case 'workflow.variables.reload':
applyWorkflowVariablesReload(effect.payload, params.toolCall)
break
default:
logger.debug('Ignoring unknown tool effect', { kind: effect.kind })
break
}
}
}

View File

@@ -101,9 +101,6 @@ export const COPILOT_CHECKPOINTS_API_PATH = '/api/copilot/checkpoints'
/** POST — revert to a checkpoint. */ /** POST — revert to a checkpoint. */
export const COPILOT_CHECKPOINTS_REVERT_API_PATH = '/api/copilot/checkpoints/revert' export const COPILOT_CHECKPOINTS_REVERT_API_PATH = '/api/copilot/checkpoints/revert'
/** GET/POST/DELETE — manage auto-allowed tools. */
export const COPILOT_AUTO_ALLOWED_TOOLS_API_PATH = '/api/copilot/auto-allowed-tools'
/** GET — fetch dynamically available copilot models. */ /** GET — fetch dynamically available copilot models. */
export const COPILOT_MODELS_API_PATH = '/api/copilot/models' export const COPILOT_MODELS_API_PATH = '/api/copilot/models'

View File

@@ -1,67 +0,0 @@
export const INTERRUPT_TOOL_NAMES = [
'set_global_workflow_variables',
'run_workflow',
'run_workflow_until_block',
'run_from_block',
'run_block',
'manage_mcp_tool',
'manage_custom_tool',
'deploy_mcp',
'deploy_chat',
'deploy_api',
'create_workspace_mcp_server',
'set_environment_variables',
'make_api_request',
'oauth_request_access',
'navigate_ui',
'knowledge_base',
'generate_api_key',
] as const
export const INTERRUPT_TOOL_SET = new Set<string>(INTERRUPT_TOOL_NAMES)
export const SUBAGENT_TOOL_NAMES = [
'debug',
'edit',
'build',
'plan',
'test',
'deploy',
'auth',
'research',
'knowledge',
'custom_tool',
'tour',
'info',
'workflow',
'evaluate',
'superagent',
'discovery',
] as const
export const SUBAGENT_TOOL_SET = new Set<string>(SUBAGENT_TOOL_NAMES)
/**
* Respond tools are internal to the copilot's subagent system.
* They're used by subagents to signal completion and should NOT be executed by the sim side.
* The copilot backend handles these internally.
*/
export const RESPOND_TOOL_NAMES = [
'plan_respond',
'edit_respond',
'build_respond',
'debug_respond',
'info_respond',
'research_respond',
'deploy_respond',
'superagent_respond',
'discovery_respond',
'tour_respond',
'auth_respond',
'workflow_respond',
'knowledge_respond',
'custom_tool_respond',
'test_respond',
] as const
export const RESPOND_TOOL_SET = new Set<string>(RESPOND_TOOL_NAMES)

View File

@@ -54,14 +54,14 @@ describe('sse-handlers tool lifecycle', () => {
} }
}) })
it('executes tool_call and emits tool_result + mark-complete', async () => { it('executes copilot.tool.call and emits copilot.tool.result + mark-complete', async () => {
executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } }) executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } })
markToolComplete.mockResolvedValueOnce(true) markToolComplete.mockResolvedValueOnce(true)
const onEvent = vi.fn() const onEvent = vi.fn()
await sseHandlers.tool_call( await sseHandlers['copilot.tool.call'](
{ {
type: 'tool_call', type: 'copilot.tool.call',
data: { id: 'tool-1', name: 'get_user_workflow', arguments: { workflowId: 'workflow-1' } }, data: { id: 'tool-1', name: 'get_user_workflow', arguments: { workflowId: 'workflow-1' } },
} as any, } as any,
context, context,
@@ -73,7 +73,7 @@ describe('sse-handlers tool lifecycle', () => {
expect(markToolComplete).toHaveBeenCalledTimes(1) expect(markToolComplete).toHaveBeenCalledTimes(1)
expect(onEvent).toHaveBeenCalledWith( expect(onEvent).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
type: 'tool_result', type: 'copilot.tool.result',
toolCallId: 'tool-1', toolCallId: 'tool-1',
success: true, success: true,
}) })
@@ -84,17 +84,17 @@ describe('sse-handlers tool lifecycle', () => {
expect(updated?.result?.output).toEqual({ ok: true }) expect(updated?.result?.output).toEqual({ ok: true })
}) })
it('skips duplicate tool_call after result', async () => { it('skips duplicate copilot.tool.call after result', async () => {
executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } }) executeToolServerSide.mockResolvedValueOnce({ success: true, output: { ok: true } })
markToolComplete.mockResolvedValueOnce(true) markToolComplete.mockResolvedValueOnce(true)
const event = { const event = {
type: 'tool_call', type: 'copilot.tool.call',
data: { id: 'tool-dup', name: 'get_user_workflow', arguments: { workflowId: 'workflow-1' } }, data: { id: 'tool-dup', name: 'get_user_workflow', arguments: { workflowId: 'workflow-1' } },
} }
await sseHandlers.tool_call(event as any, context, execContext, { interactive: false }) await sseHandlers['copilot.tool.call'](event as any, context, execContext, { interactive: false })
await sseHandlers.tool_call(event as any, context, execContext, { interactive: false }) await sseHandlers['copilot.tool.call'](event as any, context, execContext, { interactive: false })
expect(executeToolServerSide).toHaveBeenCalledTimes(1) expect(executeToolServerSide).toHaveBeenCalledTimes(1)
expect(markToolComplete).toHaveBeenCalledTimes(1) expect(markToolComplete).toHaveBeenCalledTimes(1)

View File

@@ -1,17 +1,12 @@
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { STREAM_TIMEOUT_MS } from '@/lib/copilot/constants' import { STREAM_TIMEOUT_MS } from '@/lib/copilot/constants'
import { RESPOND_TOOL_SET, SUBAGENT_TOOL_SET } from '@/lib/copilot/orchestrator/config'
import { import {
asRecord, asRecord,
getEventData, getEventData,
markToolResultSeen, markToolResultSeen,
wasToolResultSeen, wasToolResultSeen,
} from '@/lib/copilot/orchestrator/sse-utils' } from '@/lib/copilot/orchestrator/sse-utils'
import { import { markToolComplete } from '@/lib/copilot/orchestrator/tool-executor'
isIntegrationTool,
isToolAvailableOnSimSide,
markToolComplete,
} from '@/lib/copilot/orchestrator/tool-executor'
import type { import type {
ContentBlock, ContentBlock,
ExecutionContext, ExecutionContext,
@@ -22,7 +17,6 @@ import type {
} from '@/lib/copilot/orchestrator/types' } from '@/lib/copilot/orchestrator/types'
import { import {
executeToolAndReport, executeToolAndReport,
isInterruptToolName,
waitForToolCompletion, waitForToolCompletion,
waitForToolDecision, waitForToolDecision,
} from './tool-execution' } from './tool-execution'
@@ -35,12 +29,208 @@ const logger = createLogger('CopilotSseHandlers')
* execution to the browser client instead of running executeWorkflow directly. * execution to the browser client instead of running executeWorkflow directly.
*/ */
const CLIENT_EXECUTABLE_RUN_TOOLS = new Set([ const CLIENT_EXECUTABLE_RUN_TOOLS = new Set([
'run_workflow', 'workflow_run',
'run_workflow_until_block',
'run_from_block',
'run_block',
]) ])
function mapServerStateToToolStatus(state: unknown): ToolCallState['status'] {
switch (String(state || '')) {
case 'generating':
case 'pending':
case 'awaiting_approval':
return 'pending'
case 'executing':
return 'executing'
case 'success':
return 'success'
case 'rejected':
case 'skipped':
return 'rejected'
case 'aborted':
return 'skipped'
case 'error':
case 'failed':
return 'error'
default:
return 'pending'
}
}
function getExecutionTarget(
toolData: Record<string, unknown>,
toolName: string
): { target: string; capabilityId?: string } {
const execution = asRecord(toolData.execution)
if (typeof execution.target === 'string' && execution.target.length > 0) {
return {
target: execution.target,
capabilityId:
typeof execution.capabilityId === 'string' ? execution.capabilityId : undefined,
}
}
// Fallback only when metadata is missing.
if (CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName)) {
return { target: 'sim_client_capability', capabilityId: 'workflow.run' }
}
return { target: 'sim_server' }
}
function needsApproval(toolData: Record<string, unknown>): boolean {
const ui = asRecord(toolData.ui)
return ui.showInterrupt === true
}
async function waitForClientCapabilityAndReport(
toolCall: ToolCallState,
options: OrchestratorOptions,
logScope: string
): Promise<void> {
toolCall.status = 'executing'
const completion = await waitForToolCompletion(
toolCall.id,
options.timeout || STREAM_TIMEOUT_MS,
options.abortSignal
)
if (completion?.status === 'background') {
toolCall.status = 'skipped'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
202,
completion.message || 'Tool execution moved to background',
{ background: true }
).catch((err) => {
logger.error(`markToolComplete fire-and-forget failed (${logScope} background)`, {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
if (completion?.status === 'rejected') {
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
markToolComplete(toolCall.id, toolCall.name, 400, completion.message || 'Tool execution rejected')
.catch((err) => {
logger.error(`markToolComplete fire-and-forget failed (${logScope} rejected)`, {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
const success = completion?.status === 'success'
toolCall.status = success ? 'success' : 'error'
toolCall.endTime = Date.now()
const msg = completion?.message || (success ? 'Tool completed' : 'Tool failed or timed out')
markToolComplete(toolCall.id, toolCall.name, success ? 200 : 500, msg).catch((err) => {
logger.error(`markToolComplete fire-and-forget failed (${logScope})`, {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
}
function markToolCallAndNotify(
toolCall: ToolCallState,
statusCode: number,
message: string,
data: Record<string, unknown> | undefined,
logScope: string
): void {
markToolComplete(toolCall.id, toolCall.name, statusCode, message, data).catch((err) => {
logger.error(`markToolComplete fire-and-forget failed (${logScope})`, {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
}
async function executeToolCallWithPolicy(
toolCall: ToolCallState,
toolName: string,
toolData: Record<string, unknown>,
context: StreamingContext,
execContext: ExecutionContext,
options: OrchestratorOptions,
logScope: string
): Promise<void> {
const execution = getExecutionTarget(toolData, toolName)
const isInteractive = options.interactive === true
const requiresApproval = isInteractive && needsApproval(toolData)
if (toolData.state) {
toolCall.status = mapServerStateToToolStatus(toolData.state)
}
if (requiresApproval) {
const decision = await waitForToolDecision(
toolCall.id,
options.timeout || STREAM_TIMEOUT_MS,
options.abortSignal
)
if (decision?.status === 'accepted' || decision?.status === 'success') {
// Continue below into normal execution path.
} else if (decision?.status === 'rejected' || decision?.status === 'error') {
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
markToolCallAndNotify(
toolCall,
400,
decision.message || 'Tool execution rejected',
{ skipped: true, reason: 'user_rejected' },
`${logScope} rejected`
)
return
} else if (decision?.status === 'background') {
toolCall.status = 'skipped'
toolCall.endTime = Date.now()
markToolCallAndNotify(
toolCall,
202,
decision.message || 'Tool execution moved to background',
{ background: true },
`${logScope} background`
)
return
} else {
// Decision was null (timeout/abort).
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
markToolCallAndNotify(
toolCall,
408,
'Tool approval timed out',
{ skipped: true, reason: 'timeout' },
`${logScope} timeout`
)
return
}
}
if (execution.target === 'sim_client_capability' && isInteractive) {
await waitForClientCapabilityAndReport(toolCall, options, logScope)
return
}
if (
(execution.target === 'sim_server' || execution.target === 'sim_client_capability') &&
options.autoExecuteTools !== false
) {
await executeToolAndReport(toolCall.id, context, execContext, options)
}
}
// Normalization + dedupe helpers live in sse-utils to keep server/client in sync. // Normalization + dedupe helpers live in sse-utils to keep server/client in sync.
function inferToolSuccess(data: Record<string, unknown> | undefined): { function inferToolSuccess(data: Record<string, unknown> | undefined): {
@@ -76,7 +266,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
context.chatId = asRecord(event.data).chatId as string | undefined context.chatId = asRecord(event.data).chatId as string | undefined
}, },
title_updated: () => {}, title_updated: () => {},
tool_result: (event, context) => { 'copilot.tool.result': (event, context) => {
const data = getEventData(event) const data = getEventData(event)
const toolCallId = event.toolCallId || (data?.id as string | undefined) const toolCallId = event.toolCallId || (data?.id as string | undefined)
if (!toolCallId) return if (!toolCallId) return
@@ -85,7 +275,11 @@ export const sseHandlers: Record<string, SSEHandler> = {
const { success, hasResultData, hasError } = inferToolSuccess(data) const { success, hasResultData, hasError } = inferToolSuccess(data)
current.status = success ? 'success' : 'error' current.status = data?.state
? mapServerStateToToolStatus(data.state)
: success
? 'success'
: 'error'
current.endTime = Date.now() current.endTime = Date.now()
if (hasResultData) { if (hasResultData) {
current.result = { current.result = {
@@ -98,35 +292,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
current.error = (data?.error || resultObj.error) as string | undefined current.error = (data?.error || resultObj.error) as string | undefined
} }
}, },
tool_error: (event, context) => { 'copilot.tool.call': async (event, context, execContext, options) => {
const data = getEventData(event)
const toolCallId = event.toolCallId || (data?.id as string | undefined)
if (!toolCallId) return
const current = context.toolCalls.get(toolCallId)
if (!current) return
current.status = 'error'
current.error = (data?.error as string | undefined) || 'Tool execution failed'
current.endTime = Date.now()
},
tool_generating: (event, context) => {
const data = getEventData(event)
const toolCallId =
event.toolCallId ||
(data?.toolCallId as string | undefined) ||
(data?.id as string | undefined)
const toolName =
event.toolName || (data?.toolName as string | undefined) || (data?.name as string | undefined)
if (!toolCallId || !toolName) return
if (!context.toolCalls.has(toolCallId)) {
context.toolCalls.set(toolCallId, {
id: toolCallId,
name: toolName,
status: 'pending',
startTime: Date.now(),
})
}
},
tool_call: async (event, context, execContext, options) => {
const toolData = getEventData(event) || ({} as Record<string, unknown>) const toolData = getEventData(event) || ({} as Record<string, unknown>)
const toolCallId = (toolData.id as string | undefined) || event.toolCallId const toolCallId = (toolData.id as string | undefined) || event.toolCallId
const toolName = (toolData.name as string | undefined) || event.toolName const toolName = (toolData.name as string | undefined) || event.toolName
@@ -156,7 +322,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
context.toolCalls.set(toolCallId, { context.toolCalls.set(toolCallId, {
id: toolCallId, id: toolCallId,
name: toolName, name: toolName,
status: 'pending', status: toolData.state ? mapServerStateToToolStatus(toolData.state) : 'pending',
params: args, params: args,
startTime: Date.now(), startTime: Date.now(),
}) })
@@ -170,149 +336,17 @@ export const sseHandlers: Record<string, SSEHandler> = {
const toolCall = context.toolCalls.get(toolCallId) const toolCall = context.toolCalls.get(toolCallId)
if (!toolCall) return if (!toolCall) return
// Subagent tools are executed by the copilot backend, not sim side. await executeToolCallWithPolicy(
if (SUBAGENT_TOOL_SET.has(toolName)) { toolCall,
return toolName,
} toolData,
context,
// Respond tools are internal to copilot's subagent system - skip execution. execContext,
// The copilot backend handles these internally to signal subagent completion. options,
if (RESPOND_TOOL_SET.has(toolName)) { 'run tool'
toolCall.status = 'success' )
toolCall.endTime = Date.now()
toolCall.result = {
success: true,
output: 'Internal respond tool - handled by copilot backend',
}
return
}
const isInterruptTool = isInterruptToolName(toolName)
const isInteractive = options.interactive === true
// Integration tools (user-installed) also require approval in interactive mode
const needsApproval = isInterruptTool || isIntegrationTool(toolName)
if (needsApproval && isInteractive) {
const decision = await waitForToolDecision(
toolCallId,
options.timeout || STREAM_TIMEOUT_MS,
options.abortSignal
)
if (decision?.status === 'accepted' || decision?.status === 'success') {
// Client-executable run tools: defer execution to the browser client.
// The client calls executeWorkflowWithFullLogging for real-time feedback
// (block pulsing, logs, stop button) and reports completion via
// /api/copilot/confirm with status success/error. We poll Redis for
// that completion signal, then fire-and-forget markToolComplete to Go.
if (CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName)) {
toolCall.status = 'executing'
const completion = await waitForToolCompletion(
toolCallId,
options.timeout || STREAM_TIMEOUT_MS,
options.abortSignal
)
if (completion?.status === 'background') {
toolCall.status = 'skipped'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
202,
completion.message || 'Tool execution moved to background',
{ background: true }
).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (run tool background)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
const success = completion?.status === 'success'
toolCall.status = success ? 'success' : 'error'
toolCall.endTime = Date.now()
const msg =
completion?.message || (success ? 'Tool completed' : 'Tool failed or timed out')
// Fire-and-forget: tell Go backend the tool is done
// (must NOT await — see deadlock note in executeToolAndReport)
markToolComplete(toolCall.id, toolCall.name, success ? 200 : 500, msg).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (run tool)', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
await executeToolAndReport(toolCallId, context, execContext, options)
return
}
if (decision?.status === 'rejected' || decision?.status === 'error') {
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
// Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport
markToolComplete(
toolCall.id,
toolCall.name,
400,
decision.message || 'Tool execution rejected',
{ skipped: true, reason: 'user_rejected' }
).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (rejected)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
if (decision?.status === 'background') {
toolCall.status = 'skipped'
toolCall.endTime = Date.now()
// Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport
markToolComplete(
toolCall.id,
toolCall.name,
202,
decision.message || 'Tool execution moved to background',
{ background: true }
).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (background)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
// Decision was null — timed out or aborted.
// Do NOT fall through to auto-execute. Mark the tool as timed out
// and notify Go so it can unblock waitForExternalTool.
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
markToolComplete(toolCall.id, toolCall.name, 408, 'Tool approval timed out', {
skipped: true,
reason: 'timeout',
}).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (timeout)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
if (options.autoExecuteTools !== false) {
await executeToolAndReport(toolCallId, context, execContext, options)
}
}, },
reasoning: (event, context) => { 'copilot.phase.progress': (event, context) => {
const d = asRecord(event.data) const d = asRecord(event.data)
const phase = d.phase || asRecord(d.data).phase const phase = d.phase || asRecord(d.data).phase
if (phase === 'start') { if (phase === 'start') {
@@ -336,7 +370,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
if (!chunk || !context.currentThinkingBlock) return if (!chunk || !context.currentThinkingBlock) return
context.currentThinkingBlock.content = `${context.currentThinkingBlock.content || ''}${chunk}` context.currentThinkingBlock.content = `${context.currentThinkingBlock.content || ''}${chunk}`
}, },
content: (event, context) => { 'copilot.content': (event, context) => {
// Go backend sends content as a plain string in event.data, not wrapped in an object. // Go backend sends content as a plain string in event.data, not wrapped in an object.
let chunk: string | undefined let chunk: string | undefined
if (typeof event.data === 'string') { if (typeof event.data === 'string') {
@@ -349,20 +383,20 @@ export const sseHandlers: Record<string, SSEHandler> = {
context.accumulatedContent += chunk context.accumulatedContent += chunk
addContentBlock(context, { type: 'text', content: chunk }) addContentBlock(context, { type: 'text', content: chunk })
}, },
done: (event, context) => { 'copilot.phase.completed': (event, context) => {
const d = asRecord(event.data) const d = asRecord(event.data)
if (d.responseId) { if (d.responseId) {
context.conversationId = d.responseId as string context.conversationId = d.responseId as string
} }
context.streamComplete = true context.streamComplete = true
}, },
start: (event, context) => { 'copilot.phase.started': (event, context) => {
const d = asRecord(event.data) const d = asRecord(event.data)
if (d.responseId) { if (d.responseId) {
context.conversationId = d.responseId as string context.conversationId = d.responseId as string
} }
}, },
error: (event, context) => { 'copilot.error': (event, context) => {
const d = asRecord(event.data) const d = asRecord(event.data)
const message = (d.message || d.error || event.error) as string | undefined const message = (d.message || d.error || event.error) as string | undefined
if (message) { if (message) {
@@ -373,7 +407,7 @@ export const sseHandlers: Record<string, SSEHandler> = {
} }
export const subAgentHandlers: Record<string, SSEHandler> = { export const subAgentHandlers: Record<string, SSEHandler> = {
content: (event, context) => { 'copilot.content': (event, context) => {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId || !event.data) return if (!parentToolCallId || !event.data) return
// Go backend sends content as a plain string in event.data // Go backend sends content as a plain string in event.data
@@ -389,7 +423,7 @@ export const subAgentHandlers: Record<string, SSEHandler> = {
(context.subAgentContent[parentToolCallId] || '') + chunk (context.subAgentContent[parentToolCallId] || '') + chunk
addContentBlock(context, { type: 'subagent_text', content: chunk }) addContentBlock(context, { type: 'subagent_text', content: chunk })
}, },
tool_call: async (event, context, execContext, options) => { 'copilot.tool.call': async (event, context, execContext, options) => {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) return if (!parentToolCallId) return
const toolData = getEventData(event) || ({} as Record<string, unknown>) const toolData = getEventData(event) || ({} as Record<string, unknown>)
@@ -410,7 +444,7 @@ export const subAgentHandlers: Record<string, SSEHandler> = {
const toolCall: ToolCallState = { const toolCall: ToolCallState = {
id: toolCallId, id: toolCallId,
name: toolName, name: toolName,
status: 'pending', status: toolData.state ? mapServerStateToToolStatus(toolData.state) : 'pending',
params: args, params: args,
startTime: Date.now(), startTime: Date.now(),
} }
@@ -428,159 +462,17 @@ export const subAgentHandlers: Record<string, SSEHandler> = {
if (isPartial) return if (isPartial) return
// Respond tools are internal to copilot's subagent system - skip execution. await executeToolCallWithPolicy(
if (RESPOND_TOOL_SET.has(toolName)) { toolCall,
toolCall.status = 'success' toolName,
toolCall.endTime = Date.now() toolData,
toolCall.result = { context,
success: true, execContext,
output: 'Internal respond tool - handled by copilot backend', options,
} 'subagent run tool'
return )
}
// Tools that only exist on the Go backend (e.g. search_patterns,
// search_errors, remember_debug) should NOT be re-executed on the Sim side.
// The Go backend already executed them and will send its own tool_result
// SSE event with the real outcome. Trying to execute them here would fail
// with "Tool not found" and incorrectly mark the tool as failed.
if (!isToolAvailableOnSimSide(toolName)) {
return
}
// Interrupt tools and integration tools (user-installed) require approval
// in interactive mode, same as top-level handler.
const needsSubagentApproval = isInterruptToolName(toolName) || isIntegrationTool(toolName)
if (options.interactive === true && needsSubagentApproval) {
const decision = await waitForToolDecision(
toolCallId,
options.timeout || STREAM_TIMEOUT_MS,
options.abortSignal
)
if (decision?.status === 'accepted' || decision?.status === 'success') {
await executeToolAndReport(toolCallId, context, execContext, options)
return
}
if (decision?.status === 'rejected' || decision?.status === 'error') {
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
// Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport
markToolComplete(
toolCall.id,
toolCall.name,
400,
decision.message || 'Tool execution rejected',
{ skipped: true, reason: 'user_rejected' }
).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (subagent rejected)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
if (decision?.status === 'background') {
toolCall.status = 'skipped'
toolCall.endTime = Date.now()
// Fire-and-forget: must NOT await — see deadlock note in executeToolAndReport
markToolComplete(
toolCall.id,
toolCall.name,
202,
decision.message || 'Tool execution moved to background',
{ background: true }
).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (subagent background)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
// Decision was null — timed out or aborted.
// Do NOT fall through to auto-execute.
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
markToolComplete(toolCall.id, toolCall.name, 408, 'Tool approval timed out', {
skipped: true,
reason: 'timeout',
}).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (subagent timeout)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCall.id)
return
}
// Client-executable run tools in interactive mode: defer to client.
// Same pattern as main handler: wait for client completion, then tell Go.
if (options.interactive === true && CLIENT_EXECUTABLE_RUN_TOOLS.has(toolName)) {
toolCall.status = 'executing'
const completion = await waitForToolCompletion(
toolCallId,
options.timeout || STREAM_TIMEOUT_MS,
options.abortSignal
)
if (completion?.status === 'rejected') {
toolCall.status = 'rejected'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
400,
completion.message || 'Tool execution rejected'
).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (subagent run tool rejected)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
if (completion?.status === 'background') {
toolCall.status = 'skipped'
toolCall.endTime = Date.now()
markToolComplete(
toolCall.id,
toolCall.name,
202,
completion.message || 'Tool execution moved to background',
{ background: true }
).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (subagent run tool background)', {
toolCallId: toolCall.id,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
const success = completion?.status === 'success'
toolCall.status = success ? 'success' : 'error'
toolCall.endTime = Date.now()
const msg = completion?.message || (success ? 'Tool completed' : 'Tool failed or timed out')
markToolComplete(toolCall.id, toolCall.name, success ? 200 : 500, msg).catch((err) => {
logger.error('markToolComplete fire-and-forget failed (subagent run tool)', {
toolCallId: toolCall.id,
toolName: toolCall.name,
error: err instanceof Error ? err.message : String(err),
})
})
markToolResultSeen(toolCallId)
return
}
if (options.autoExecuteTools !== false) {
await executeToolAndReport(toolCallId, context, execContext, options)
}
}, },
tool_result: (event, context) => { 'copilot.tool.result': (event, context) => {
const parentToolCallId = context.subAgentParentToolCallId const parentToolCallId = context.subAgentParentToolCallId
if (!parentToolCallId) return if (!parentToolCallId) return
const data = getEventData(event) const data = getEventData(event)
@@ -596,7 +488,7 @@ export const subAgentHandlers: Record<string, SSEHandler> = {
const { success, hasResultData, hasError } = inferToolSuccess(data) const { success, hasResultData, hasError } = inferToolSuccess(data)
const status = success ? 'success' : 'error' const status = data?.state ? mapServerStateToToolStatus(data.state) : success ? 'success' : 'error'
const endTime = Date.now() const endTime = Date.now()
const result = hasResultData ? { success, output: data?.result || data?.data } : undefined const result = hasResultData ? { success, output: data?.result || data?.data } : undefined
@@ -620,8 +512,22 @@ export const subAgentHandlers: Record<string, SSEHandler> = {
} }
} }
}, },
'copilot.phase.progress': () => {
// Subagent reasoning chunks are surfaced via copilot.content.
},
'copilot.phase.completed': () => {},
} }
sseHandlers['copilot.tool.interrupt_required'] = sseHandlers['copilot.tool.call']
sseHandlers['copilot.workflow.patch'] = sseHandlers['copilot.tool.result']
sseHandlers['copilot.workflow.verify'] = sseHandlers['copilot.tool.result']
sseHandlers['copilot.tool.interrupt_resolved'] = sseHandlers['copilot.tool.result']
subAgentHandlers['copilot.tool.interrupt_required'] = subAgentHandlers['copilot.tool.call']
subAgentHandlers['copilot.workflow.patch'] = subAgentHandlers['copilot.tool.result']
subAgentHandlers['copilot.workflow.verify'] = subAgentHandlers['copilot.tool.result']
subAgentHandlers['copilot.tool.interrupt_resolved'] = subAgentHandlers['copilot.tool.result']
export function handleSubagentRouting(event: SSEEvent, context: StreamingContext): boolean { export function handleSubagentRouting(event: SSEEvent, context: StreamingContext): boolean {
if (!event.subagent) return false if (!event.subagent) return false
if (!context.subAgentParentToolCallId) { if (!context.subAgentParentToolCallId) {

Some files were not shown because too many files have changed in this diff Show More