Compare commits

..

19 Commits

Author SHA1 Message Date
Waleed Latif
ad109712c0 move userstats record creation inside tx 2026-02-11 20:22:27 -08:00
Waleed Latif
720137414f lint 2026-02-11 20:16:57 -08:00
Waleed Latif
043f060c24 reran migrations 2026-02-11 20:14:34 -08:00
Waleed Latif
8abe8af289 update admin routes 2026-02-11 19:57:46 -08:00
Waleed
85284eb7c4 fix(terminal): reconnect to running executions after page refresh (#3200)
* fix(terminal): reconnect to running executions after page refresh

* fix(terminal): use ExecutionEvent type instead of any in reconnection stream

* fix(execution): type event buffer with ExecutionEvent instead of Record<string, unknown>

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(execution): validate fromEventId query param in reconnection endpoint

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* Fix some bugs

* fix(variables): fix tag dropdown and cursor alignment in variables block (#3199)

* feat(confluence): added list space labels, delete label, delete page prop (#3201)

* updated route

* ack comments

* fix(execution): reset execution state in reconnection cleanup to unblock re-entry

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(execution): restore running entries when reconnection is interrupted by navigation

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* done

* remove cast in ioredis types

* ack PR comments

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
Co-authored-by: Siddharth Ganesan <siddharthganesan@gmail.com>
2026-02-11 19:37:12 -08:00
Waleed
d068ed6d65 fix(change-detection): resolve false positive trigger block change detection (#3204) 2026-02-11 19:37:12 -08:00
Vikhyath Mondreti
bef43f3e84 fix build 2026-02-11 19:37:12 -08:00
Waleed
7453a2bd27 fix(confl): use recommended query param pattern for confluence route (#3202)
* fix(confl): use recommended query param pattern for confluence route

* use unused var
2026-02-11 19:37:12 -08:00
Waleed
bf41c78815 feat(confluence): added list space labels, delete label, delete page prop (#3201) 2026-02-11 19:37:12 -08:00
Waleed
ccdd42639f fix(variables): fix tag dropdown and cursor alignment in variables block (#3199) 2026-02-11 19:37:12 -08:00
Waleed
4a1dd261da fix(hotkeys): remove C, T, E tab-switching hotkeys (#3197) 2026-02-11 19:37:12 -08:00
Waleed
ae43131cfe improvement(oom): increase trigger machine size (#3196) 2026-02-11 19:37:12 -08:00
Waleed Latif
808dc4f8b4 remove duplicate index 2026-02-11 13:14:52 -08:00
Waleed Latif
27e03c44dc remove default 2026-02-11 13:07:42 -08:00
Waleed Latif
05d1c92e1a added zod 2026-02-11 12:29:37 -08:00
Waleed Latif
9228893c19 more 2026-02-11 12:25:19 -08:00
Waleed Latif
eedf67013c feat(creators): added referrers, code redemption, campaign tracking, etc 2026-02-11 11:54:50 -08:00
Waleed
2f492cacc1 feat(providers): add Gemini Deep Research via Interactions API (#3192)
* feat(providers): add Gemini Deep Research via Interactions API

* fix(providers): hide memory UI for deep research models

* feat(providers): add multi-turn support and token logging for deep research

* fix(providers): only collect user messages as deep research input

* fix(providers): forward previousInteractionId to provider request

* fix(blocks): hide memory child fields for deep research models

* remove memory params from models that don't support it in provider requests

* update blog
2026-02-11 01:01:59 -08:00
Vikhyath Mondreti
5792e7e5f9 fix(auth): workflow system handler (#3193) 2026-02-10 22:25:48 -08:00
73 changed files with 15518 additions and 294 deletions

View File

@@ -41,9 +41,6 @@ Diese Tastenkombinationen wechseln zwischen den Panel-Tabs auf der rechten Seite
| Tastenkombination | Aktion | | Tastenkombination | Aktion |
|----------|--------| |----------|--------|
| `C` | Copilot-Tab fokussieren |
| `T` | Toolbar-Tab fokussieren |
| `E` | Editor-Tab fokussieren |
| `Mod` + `F` | Toolbar-Suche fokussieren | | `Mod` + `F` | Toolbar-Suche fokussieren |
## Globale Navigation ## Globale Navigation

View File

@@ -43,9 +43,6 @@ These shortcuts switch between panel tabs on the right side of the canvas.
| Shortcut | Action | | Shortcut | Action |
|----------|--------| |----------|--------|
| `C` | Focus Copilot tab |
| `T` | Focus Toolbar tab |
| `E` | Focus Editor tab |
| `Mod` + `F` | Focus Toolbar search | | `Mod` + `F` | Focus Toolbar search |
## Global Navigation ## Global Navigation

View File

@@ -399,6 +399,28 @@ Create a new custom property (metadata) on a Confluence page.
| ↳ `authorId` | string | Account ID of the version author | | ↳ `authorId` | string | Account ID of the version author |
| ↳ `createdAt` | string | ISO 8601 timestamp of version creation | | ↳ `createdAt` | string | ISO 8601 timestamp of version creation |
### `confluence_delete_page_property`
Delete a content property from a Confluence page by its property ID.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) |
| `pageId` | string | Yes | The ID of the page containing the property |
| `propertyId` | string | Yes | The ID of the property to delete |
| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `ts` | string | ISO 8601 timestamp of the operation |
| `pageId` | string | ID of the page |
| `propertyId` | string | ID of the deleted property |
| `deleted` | boolean | Deletion status |
### `confluence_search` ### `confluence_search`
Search for content across Confluence pages, blog posts, and other content. Search for content across Confluence pages, blog posts, and other content.
@@ -872,6 +894,90 @@ Add a label to a Confluence page for organization and categorization.
| `labelName` | string | Name of the added label | | `labelName` | string | Name of the added label |
| `labelId` | string | ID of the added label | | `labelId` | string | ID of the added label |
### `confluence_delete_label`
Remove a label from a Confluence page.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) |
| `pageId` | string | Yes | Confluence page ID to remove the label from |
| `labelName` | string | Yes | Name of the label to remove |
| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `ts` | string | ISO 8601 timestamp of the operation |
| `pageId` | string | Page ID the label was removed from |
| `labelName` | string | Name of the removed label |
| `deleted` | boolean | Deletion status |
### `confluence_get_pages_by_label`
Retrieve all pages that have a specific label applied.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) |
| `labelId` | string | Yes | The ID of the label to get pages for |
| `limit` | number | No | Maximum number of pages to return \(default: 50, max: 250\) |
| `cursor` | string | No | Pagination cursor from previous response |
| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `ts` | string | ISO 8601 timestamp of the operation |
| `labelId` | string | ID of the label |
| `pages` | array | Array of pages with this label |
| ↳ `id` | string | Unique page identifier |
| ↳ `title` | string | Page title |
| ↳ `status` | string | Page status \(e.g., current, archived, trashed, draft\) |
| ↳ `spaceId` | string | ID of the space containing the page |
| ↳ `parentId` | string | ID of the parent page \(null if top-level\) |
| ↳ `authorId` | string | Account ID of the page author |
| ↳ `createdAt` | string | ISO 8601 timestamp when the page was created |
| ↳ `version` | object | Page version information |
| ↳ `number` | number | Version number |
| ↳ `message` | string | Version message |
| ↳ `minorEdit` | boolean | Whether this is a minor edit |
| ↳ `authorId` | string | Account ID of the version author |
| ↳ `createdAt` | string | ISO 8601 timestamp of version creation |
| `nextCursor` | string | Cursor for fetching the next page of results |
### `confluence_list_space_labels`
List all labels associated with a Confluence space.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `domain` | string | Yes | Your Confluence domain \(e.g., yourcompany.atlassian.net\) |
| `spaceId` | string | Yes | The ID of the Confluence space to list labels from |
| `limit` | number | No | Maximum number of labels to return \(default: 25, max: 250\) |
| `cursor` | string | No | Pagination cursor from previous response |
| `cloudId` | string | No | Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `ts` | string | ISO 8601 timestamp of the operation |
| `spaceId` | string | ID of the space |
| `labels` | array | Array of labels on the space |
| ↳ `id` | string | Unique label identifier |
| ↳ `name` | string | Label name |
| ↳ `prefix` | string | Label prefix/type \(e.g., global, my, team\) |
| `nextCursor` | string | Cursor for fetching the next page of results |
### `confluence_get_space` ### `confluence_get_space`
Get details about a specific Confluence space. Get details about a specific Confluence space.

View File

@@ -42,9 +42,6 @@ Estos atajos cambian entre las pestañas del panel en el lado derecho del lienzo
| Atajo | Acción | | Atajo | Acción |
|----------|--------| |----------|--------|
| `C` | Enfocar pestaña Copilot |
| `T` | Enfocar pestaña Barra de herramientas |
| `E` | Enfocar pestaña Editor |
| `Mod` + `F` | Enfocar búsqueda de Barra de herramientas | | `Mod` + `F` | Enfocar búsqueda de Barra de herramientas |
## Navegación global ## Navegación global

View File

@@ -42,9 +42,6 @@ Ces raccourcis permettent de basculer entre les onglets du panneau sur le côté
| Raccourci | Action | | Raccourci | Action |
|----------|--------| |----------|--------|
| `C` | Activer l'onglet Copilot |
| `T` | Activer l'onglet Barre d'outils |
| `E` | Activer l'onglet Éditeur |
| `Mod` + `F` | Activer la recherche dans la barre d'outils | | `Mod` + `F` | Activer la recherche dans la barre d'outils |
## Navigation globale ## Navigation globale

View File

@@ -41,9 +41,6 @@ import { Callout } from 'fumadocs-ui/components/callout'
| ショートカット | 操作 | | ショートカット | 操作 |
|----------|--------| |----------|--------|
| `C` | Copilotタブにフォーカス |
| `T` | Toolbarタブにフォーカス |
| `E` | Editorタブにフォーカス |
| `Mod` + `F` | Toolbar検索にフォーカス | | `Mod` + `F` | Toolbar検索にフォーカス |
## グローバルナビゲーション ## グローバルナビゲーション

View File

@@ -41,9 +41,6 @@ import { Callout } from 'fumadocs-ui/components/callout'
| 快捷键 | 操作 | | 快捷键 | 操作 |
|----------|--------| |----------|--------|
| `C` | 聚焦 Copilot 标签页 |
| `T` | 聚焦 Toolbar 标签页 |
| `E` | 聚焦 Editor 标签页 |
| `Mod` + `F` | 聚焦 Toolbar 搜索 | | `Mod` + `F` | 聚焦 Toolbar 搜索 |
## 全局导航 ## 全局导航

View File

@@ -0,0 +1,215 @@
/**
* POST /api/attribution
*
* Automatic UTM-based referral attribution for new signups.
*
* Reads the `sim_utm` cookie (set by proxy on auth pages), verifies the user
* account was created after the cookie was set, matches a campaign by UTM
* specificity, and atomically inserts an attribution record + applies bonus credits.
*
* Idempotent — the unique constraint on `userId` prevents double-attribution.
*/
import { db } from '@sim/db'
import { referralAttribution, referralCampaigns, user, userStats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { nanoid } from 'nanoid'
import { cookies } from 'next/headers'
import { NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { applyBonusCredits } from '@/lib/billing/credits/bonus'
const logger = createLogger('AttributionAPI')
const COOKIE_NAME = 'sim_utm'
const CLOCK_DRIFT_TOLERANCE_MS = 60 * 1000
const UtmCookieSchema = z.object({
utm_source: z.string().optional(),
utm_medium: z.string().optional(),
utm_campaign: z.string().optional(),
utm_content: z.string().optional(),
referrer_url: z.string().optional(),
landing_page: z.string().optional(),
created_at: z.string().min(1),
})
/**
* Finds the most specific active campaign matching the given UTM params.
* Null fields on a campaign act as wildcards. Ties broken by newest campaign.
*/
async function findMatchingCampaign(utmData: z.infer<typeof UtmCookieSchema>) {
const campaigns = await db
.select()
.from(referralCampaigns)
.where(eq(referralCampaigns.isActive, true))
let bestMatch: (typeof campaigns)[number] | null = null
let bestScore = -1
for (const campaign of campaigns) {
let score = 0
let mismatch = false
const fields = [
{ campaignVal: campaign.utmSource, utmVal: utmData.utm_source },
{ campaignVal: campaign.utmMedium, utmVal: utmData.utm_medium },
{ campaignVal: campaign.utmCampaign, utmVal: utmData.utm_campaign },
{ campaignVal: campaign.utmContent, utmVal: utmData.utm_content },
] as const
for (const { campaignVal, utmVal } of fields) {
if (campaignVal === null) continue
if (campaignVal === utmVal) {
score++
} else {
mismatch = true
break
}
}
if (!mismatch && score > 0) {
if (
score > bestScore ||
(score === bestScore &&
bestMatch &&
campaign.createdAt.getTime() > bestMatch.createdAt.getTime())
) {
bestScore = score
bestMatch = campaign
}
}
}
return bestMatch
}
export async function POST() {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const cookieStore = await cookies()
const utmCookie = cookieStore.get(COOKIE_NAME)
if (!utmCookie?.value) {
return NextResponse.json({ attributed: false, reason: 'no_utm_cookie' })
}
let utmData: z.infer<typeof UtmCookieSchema>
try {
let decoded: string
try {
decoded = decodeURIComponent(utmCookie.value)
} catch {
decoded = utmCookie.value
}
utmData = UtmCookieSchema.parse(JSON.parse(decoded))
} catch {
logger.warn('Failed to parse UTM cookie', { userId: session.user.id })
cookieStore.delete(COOKIE_NAME)
return NextResponse.json({ attributed: false, reason: 'invalid_cookie' })
}
const cookieCreatedAt = Number(utmData.created_at)
if (!Number.isFinite(cookieCreatedAt)) {
logger.warn('UTM cookie has invalid created_at timestamp', { userId: session.user.id })
cookieStore.delete(COOKIE_NAME)
return NextResponse.json({ attributed: false, reason: 'invalid_cookie' })
}
const userRows = await db
.select({ createdAt: user.createdAt })
.from(user)
.where(eq(user.id, session.user.id))
.limit(1)
if (userRows.length === 0) {
return NextResponse.json({ error: 'User not found' }, { status: 404 })
}
const userCreatedAt = userRows[0].createdAt.getTime()
if (userCreatedAt < cookieCreatedAt - CLOCK_DRIFT_TOLERANCE_MS) {
logger.info('User account predates UTM cookie, skipping attribution', {
userId: session.user.id,
userCreatedAt: new Date(userCreatedAt).toISOString(),
cookieCreatedAt: new Date(cookieCreatedAt).toISOString(),
})
cookieStore.delete(COOKIE_NAME)
return NextResponse.json({ attributed: false, reason: 'account_predates_cookie' })
}
const matchedCampaign = await findMatchingCampaign(utmData)
if (!matchedCampaign) {
cookieStore.delete(COOKIE_NAME)
return NextResponse.json({ attributed: false, reason: 'no_matching_campaign' })
}
const bonusAmount = Number(matchedCampaign.bonusCreditAmount)
let attributed = false
await db.transaction(async (tx) => {
const [existingStats] = await tx
.select({ id: userStats.id })
.from(userStats)
.where(eq(userStats.userId, session.user.id))
.limit(1)
if (!existingStats) {
await tx.insert(userStats).values({
id: nanoid(),
userId: session.user.id,
})
}
const result = await tx
.insert(referralAttribution)
.values({
id: nanoid(),
userId: session.user.id,
campaignId: matchedCampaign.id,
utmSource: utmData.utm_source || null,
utmMedium: utmData.utm_medium || null,
utmCampaign: utmData.utm_campaign || null,
utmContent: utmData.utm_content || null,
referrerUrl: utmData.referrer_url || null,
landingPage: utmData.landing_page || null,
bonusCreditAmount: bonusAmount.toString(),
})
.onConflictDoNothing({ target: referralAttribution.userId })
.returning({ id: referralAttribution.id })
if (result.length > 0) {
await applyBonusCredits(session.user.id, bonusAmount, tx)
attributed = true
}
})
if (attributed) {
logger.info('Referral attribution created and bonus credits applied', {
userId: session.user.id,
campaignId: matchedCampaign.id,
campaignName: matchedCampaign.name,
utmSource: utmData.utm_source,
utmCampaign: utmData.utm_campaign,
utmContent: utmData.utm_content,
bonusAmount,
})
} else {
logger.info('User already attributed, skipping', { userId: session.user.id })
}
cookieStore.delete(COOKIE_NAME)
return NextResponse.json({
attributed,
bonusAmount: attributed ? bonusAmount : undefined,
})
} catch (error) {
logger.error('Attribution error', { error })
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
}
}

View File

@@ -113,6 +113,7 @@ const ChatMessageSchema = z.object({
workflowId: z.string().optional(), workflowId: z.string().optional(),
knowledgeId: z.string().optional(), knowledgeId: z.string().optional(),
blockId: z.string().optional(), blockId: z.string().optional(),
blockIds: z.array(z.string()).optional(),
templateId: z.string().optional(), templateId: z.string().optional(),
executionId: z.string().optional(), executionId: z.string().optional(),
// For workflow_block, provide both workflowId and blockId // For workflow_block, provide both workflowId and blockId
@@ -159,6 +160,20 @@ export async function POST(req: NextRequest) {
commands, commands,
} = ChatMessageSchema.parse(body) } = ChatMessageSchema.parse(body)
const normalizedContexts = Array.isArray(contexts)
? contexts.map((ctx) => {
if (ctx.kind !== 'blocks') return ctx
if (Array.isArray(ctx.blockIds) && ctx.blockIds.length > 0) return ctx
if (ctx.blockId) {
return {
...ctx,
blockIds: [ctx.blockId],
}
}
return ctx
})
: contexts
// Resolve workflowId - if not provided, use first workflow or find by name // Resolve workflowId - if not provided, use first workflow or find by name
const resolved = await resolveWorkflowIdForUser( const resolved = await resolveWorkflowIdForUser(
authenticatedUserId, authenticatedUserId,
@@ -176,10 +191,10 @@ export async function POST(req: NextRequest) {
const userMessageIdToUse = userMessageId || crypto.randomUUID() const userMessageIdToUse = userMessageId || crypto.randomUUID()
try { try {
logger.info(`[${tracker.requestId}] Received chat POST`, { logger.info(`[${tracker.requestId}] Received chat POST`, {
hasContexts: Array.isArray(contexts), hasContexts: Array.isArray(normalizedContexts),
contextsCount: Array.isArray(contexts) ? contexts.length : 0, contextsCount: Array.isArray(normalizedContexts) ? normalizedContexts.length : 0,
contextsPreview: Array.isArray(contexts) contextsPreview: Array.isArray(normalizedContexts)
? contexts.map((c: any) => ({ ? normalizedContexts.map((c: any) => ({
kind: c?.kind, kind: c?.kind,
chatId: c?.chatId, chatId: c?.chatId,
workflowId: c?.workflowId, workflowId: c?.workflowId,
@@ -191,17 +206,25 @@ export async function POST(req: NextRequest) {
} catch {} } catch {}
// Preprocess contexts server-side // Preprocess contexts server-side
let agentContexts: Array<{ type: string; content: string }> = [] let agentContexts: Array<{ type: string; content: string }> = []
if (Array.isArray(contexts) && contexts.length > 0) { if (Array.isArray(normalizedContexts) && normalizedContexts.length > 0) {
try { try {
const { processContextsServer } = await import('@/lib/copilot/process-contents') const { processContextsServer } = await import('@/lib/copilot/process-contents')
const processed = await processContextsServer(contexts as any, authenticatedUserId, message) const processed = await processContextsServer(
normalizedContexts as any,
authenticatedUserId,
message
)
agentContexts = processed agentContexts = processed
logger.info(`[${tracker.requestId}] Contexts processed for request`, { logger.info(`[${tracker.requestId}] Contexts processed for request`, {
processedCount: agentContexts.length, processedCount: agentContexts.length,
kinds: agentContexts.map((c) => c.type), kinds: agentContexts.map((c) => c.type),
lengthPreview: agentContexts.map((c) => c.content?.length ?? 0), lengthPreview: agentContexts.map((c) => c.content?.length ?? 0),
}) })
if (Array.isArray(contexts) && contexts.length > 0 && agentContexts.length === 0) { if (
Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 &&
agentContexts.length === 0
) {
logger.warn( logger.warn(
`[${tracker.requestId}] Contexts provided but none processed. Check executionId for logs contexts.` `[${tracker.requestId}] Contexts provided but none processed. Check executionId for logs contexts.`
) )
@@ -246,11 +269,13 @@ export async function POST(req: NextRequest) {
mode, mode,
model: selectedModel, model: selectedModel,
provider, provider,
conversationId: effectiveConversationId,
conversationHistory, conversationHistory,
contexts: agentContexts, contexts: agentContexts,
fileAttachments, fileAttachments,
commands, commands,
chatId: actualChatId, chatId: actualChatId,
prefetch,
implicitFeedback, implicitFeedback,
}, },
{ {
@@ -432,10 +457,15 @@ export async function POST(req: NextRequest) {
content: message, content: message,
timestamp: new Date().toISOString(), timestamp: new Date().toISOString(),
...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }), ...(fileAttachments && fileAttachments.length > 0 && { fileAttachments }),
...(Array.isArray(contexts) && contexts.length > 0 && { contexts }), ...(Array.isArray(normalizedContexts) &&
...(Array.isArray(contexts) && normalizedContexts.length > 0 && {
contexts.length > 0 && { contexts: normalizedContexts,
contentBlocks: [{ type: 'contexts', contexts: contexts as any, timestamp: Date.now() }], }),
...(Array.isArray(normalizedContexts) &&
normalizedContexts.length > 0 && {
contentBlocks: [
{ type: 'contexts', contexts: normalizedContexts as any, timestamp: Date.now() },
],
}), }),
} }

View File

@@ -0,0 +1,170 @@
/**
* POST /api/referral-code/redeem
*
* Redeem a referral/promo code to receive bonus credits.
*
* Body:
* - code: string — The referral code to redeem
*
* Response: { redeemed: boolean, bonusAmount?: number, error?: string }
*
* Constraints:
* - Enterprise users cannot redeem codes
* - One redemption per user, ever (unique constraint on userId)
* - One redemption per organization for team users (partial unique on organizationId)
*/
import { db } from '@sim/db'
import { referralAttribution, referralCampaigns, userStats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq } from 'drizzle-orm'
import { nanoid } from 'nanoid'
import { NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import { applyBonusCredits } from '@/lib/billing/credits/bonus'
const logger = createLogger('ReferralCodeRedemption')
const RedeemCodeSchema = z.object({
code: z.string().min(1, 'Code is required'),
})
export async function POST(request: Request) {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const body = await request.json()
const { code } = RedeemCodeSchema.parse(body)
const subscription = await getHighestPrioritySubscription(session.user.id)
if (subscription?.plan === 'enterprise') {
return NextResponse.json({
redeemed: false,
error: 'Enterprise accounts cannot redeem referral codes',
})
}
const isTeam = subscription?.plan === 'team'
const orgId = isTeam ? subscription.referenceId : null
const normalizedCode = code.trim().toUpperCase()
const [campaign] = await db
.select()
.from(referralCampaigns)
.where(and(eq(referralCampaigns.code, normalizedCode), eq(referralCampaigns.isActive, true)))
.limit(1)
if (!campaign) {
logger.info('Invalid code redemption attempt', {
userId: session.user.id,
code: normalizedCode,
})
return NextResponse.json({ error: 'Invalid or expired code' }, { status: 404 })
}
const [existingUserAttribution] = await db
.select({ id: referralAttribution.id })
.from(referralAttribution)
.where(eq(referralAttribution.userId, session.user.id))
.limit(1)
if (existingUserAttribution) {
return NextResponse.json({
redeemed: false,
error: 'You have already redeemed a code',
})
}
if (orgId) {
const [existingOrgAttribution] = await db
.select({ id: referralAttribution.id })
.from(referralAttribution)
.where(eq(referralAttribution.organizationId, orgId))
.limit(1)
if (existingOrgAttribution) {
return NextResponse.json({
redeemed: false,
error: 'A code has already been redeemed for your organization',
})
}
}
const bonusAmount = Number(campaign.bonusCreditAmount)
let redeemed = false
await db.transaction(async (tx) => {
const [existingStats] = await tx
.select({ id: userStats.id })
.from(userStats)
.where(eq(userStats.userId, session.user.id))
.limit(1)
if (!existingStats) {
await tx.insert(userStats).values({
id: nanoid(),
userId: session.user.id,
})
}
const result = await tx
.insert(referralAttribution)
.values({
id: nanoid(),
userId: session.user.id,
organizationId: orgId,
campaignId: campaign.id,
utmSource: null,
utmMedium: null,
utmCampaign: null,
utmContent: null,
referrerUrl: null,
landingPage: null,
bonusCreditAmount: bonusAmount.toString(),
})
.onConflictDoNothing()
.returning({ id: referralAttribution.id })
if (result.length > 0) {
await applyBonusCredits(session.user.id, bonusAmount, tx)
redeemed = true
}
})
if (redeemed) {
logger.info('Referral code redeemed', {
userId: session.user.id,
organizationId: orgId,
code: normalizedCode,
campaignId: campaign.id,
campaignName: campaign.name,
bonusAmount,
})
}
if (!redeemed) {
return NextResponse.json({
redeemed: false,
error: 'You have already redeemed a code',
})
}
return NextResponse.json({
redeemed: true,
bonusAmount,
})
} catch (error) {
if (error instanceof z.ZodError) {
return NextResponse.json({ error: error.errors[0].message }, { status: 400 })
}
logger.error('Referral code redemption error', { error })
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
}
}

View File

@@ -191,3 +191,84 @@ export async function GET(request: NextRequest) {
) )
} }
} }
// Delete a label from a page
export async function DELETE(request: NextRequest) {
try {
const auth = await checkSessionOrInternalAuth(request)
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
const {
domain,
accessToken,
cloudId: providedCloudId,
pageId,
labelName,
} = await request.json()
if (!domain) {
return NextResponse.json({ error: 'Domain is required' }, { status: 400 })
}
if (!accessToken) {
return NextResponse.json({ error: 'Access token is required' }, { status: 400 })
}
if (!pageId) {
return NextResponse.json({ error: 'Page ID is required' }, { status: 400 })
}
if (!labelName) {
return NextResponse.json({ error: 'Label name is required' }, { status: 400 })
}
const pageIdValidation = validateAlphanumericId(pageId, 'pageId', 255)
if (!pageIdValidation.isValid) {
return NextResponse.json({ error: pageIdValidation.error }, { status: 400 })
}
const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken))
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
if (!cloudIdValidation.isValid) {
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
}
const encodedLabel = encodeURIComponent(labelName.trim())
const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/rest/api/content/${pageId}/label?name=${encodedLabel}`
const response = await fetch(url, {
method: 'DELETE',
headers: {
Accept: 'application/json',
Authorization: `Bearer ${accessToken}`,
},
})
if (!response.ok) {
const errorData = await response.json().catch(() => null)
logger.error('Confluence API error response:', {
status: response.status,
statusText: response.statusText,
error: JSON.stringify(errorData, null, 2),
})
const errorMessage =
errorData?.message || `Failed to delete Confluence label (${response.status})`
return NextResponse.json({ error: errorMessage }, { status: response.status })
}
return NextResponse.json({
pageId,
labelName,
deleted: true,
})
} catch (error) {
logger.error('Error deleting Confluence label:', error)
return NextResponse.json(
{ error: (error as Error).message || 'Internal server error' },
{ status: 500 }
)
}
}

View File

@@ -0,0 +1,103 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid'
import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation'
import { getConfluenceCloudId } from '@/tools/confluence/utils'
const logger = createLogger('ConfluencePagesByLabelAPI')
export const dynamic = 'force-dynamic'
export async function GET(request: NextRequest) {
try {
const auth = await checkSessionOrInternalAuth(request)
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
const { searchParams } = new URL(request.url)
const domain = searchParams.get('domain')
const accessToken = searchParams.get('accessToken')
const labelId = searchParams.get('labelId')
const providedCloudId = searchParams.get('cloudId')
const limit = searchParams.get('limit') || '50'
const cursor = searchParams.get('cursor')
if (!domain) {
return NextResponse.json({ error: 'Domain is required' }, { status: 400 })
}
if (!accessToken) {
return NextResponse.json({ error: 'Access token is required' }, { status: 400 })
}
if (!labelId) {
return NextResponse.json({ error: 'Label ID is required' }, { status: 400 })
}
const labelIdValidation = validateAlphanumericId(labelId, 'labelId', 255)
if (!labelIdValidation.isValid) {
return NextResponse.json({ error: labelIdValidation.error }, { status: 400 })
}
const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken))
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
if (!cloudIdValidation.isValid) {
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
}
const queryParams = new URLSearchParams()
queryParams.append('limit', String(Math.min(Number(limit), 250)))
if (cursor) {
queryParams.append('cursor', cursor)
}
const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/labels/${labelId}/pages?${queryParams.toString()}`
const response = await fetch(url, {
method: 'GET',
headers: {
Accept: 'application/json',
Authorization: `Bearer ${accessToken}`,
},
})
if (!response.ok) {
const errorData = await response.json().catch(() => null)
logger.error('Confluence API error response:', {
status: response.status,
statusText: response.statusText,
error: JSON.stringify(errorData, null, 2),
})
const errorMessage = errorData?.message || `Failed to get pages by label (${response.status})`
return NextResponse.json({ error: errorMessage }, { status: response.status })
}
const data = await response.json()
const pages = (data.results || []).map((page: any) => ({
id: page.id,
title: page.title,
status: page.status ?? null,
spaceId: page.spaceId ?? null,
parentId: page.parentId ?? null,
authorId: page.authorId ?? null,
createdAt: page.createdAt ?? null,
version: page.version ?? null,
}))
return NextResponse.json({
pages,
labelId,
nextCursor: data._links?.next
? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor')
: null,
})
} catch (error) {
logger.error('Error getting pages by label:', error)
return NextResponse.json(
{ error: (error as Error).message || 'Internal server error' },
{ status: 500 }
)
}
}

View File

@@ -0,0 +1,98 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid'
import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation'
import { getConfluenceCloudId } from '@/tools/confluence/utils'
const logger = createLogger('ConfluenceSpaceLabelsAPI')
export const dynamic = 'force-dynamic'
export async function GET(request: NextRequest) {
try {
const auth = await checkSessionOrInternalAuth(request)
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
const { searchParams } = new URL(request.url)
const domain = searchParams.get('domain')
const accessToken = searchParams.get('accessToken')
const spaceId = searchParams.get('spaceId')
const providedCloudId = searchParams.get('cloudId')
const limit = searchParams.get('limit') || '25'
const cursor = searchParams.get('cursor')
if (!domain) {
return NextResponse.json({ error: 'Domain is required' }, { status: 400 })
}
if (!accessToken) {
return NextResponse.json({ error: 'Access token is required' }, { status: 400 })
}
if (!spaceId) {
return NextResponse.json({ error: 'Space ID is required' }, { status: 400 })
}
const spaceIdValidation = validateAlphanumericId(spaceId, 'spaceId', 255)
if (!spaceIdValidation.isValid) {
return NextResponse.json({ error: spaceIdValidation.error }, { status: 400 })
}
const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken))
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
if (!cloudIdValidation.isValid) {
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
}
const queryParams = new URLSearchParams()
queryParams.append('limit', String(Math.min(Number(limit), 250)))
if (cursor) {
queryParams.append('cursor', cursor)
}
const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/spaces/${spaceId}/labels?${queryParams.toString()}`
const response = await fetch(url, {
method: 'GET',
headers: {
Accept: 'application/json',
Authorization: `Bearer ${accessToken}`,
},
})
if (!response.ok) {
const errorData = await response.json().catch(() => null)
logger.error('Confluence API error response:', {
status: response.status,
statusText: response.statusText,
error: JSON.stringify(errorData, null, 2),
})
const errorMessage = errorData?.message || `Failed to list space labels (${response.status})`
return NextResponse.json({ error: errorMessage }, { status: response.status })
}
const data = await response.json()
const labels = (data.results || []).map((label: any) => ({
id: label.id,
name: label.name,
prefix: label.prefix || 'global',
}))
return NextResponse.json({
labels,
spaceId,
nextCursor: data._links?.next
? new URL(data._links.next, 'https://placeholder').searchParams.get('cursor')
: null,
})
} catch (error) {
logger.error('Error listing space labels:', error)
return NextResponse.json(
{ error: (error as Error).message || 'Internal server error' },
{ status: 500 }
)
}
}

View File

@@ -66,6 +66,12 @@
* Credits: * Credits:
* POST /api/v1/admin/credits - Issue credits to user (by userId or email) * POST /api/v1/admin/credits - Issue credits to user (by userId or email)
* *
* Referral Campaigns:
* GET /api/v1/admin/referral-campaigns - List campaigns (?active=true/false)
* POST /api/v1/admin/referral-campaigns - Create campaign
* GET /api/v1/admin/referral-campaigns/:id - Get campaign details
* PATCH /api/v1/admin/referral-campaigns/:id - Update campaign fields
*
* Access Control (Permission Groups): * Access Control (Permission Groups):
* GET /api/v1/admin/access-control - List permission groups (?organizationId=X) * GET /api/v1/admin/access-control - List permission groups (?organizationId=X)
* DELETE /api/v1/admin/access-control - Delete permission groups for org (?organizationId=X) * DELETE /api/v1/admin/access-control - Delete permission groups for org (?organizationId=X)
@@ -97,6 +103,7 @@ export type {
AdminOrganization, AdminOrganization,
AdminOrganizationBillingSummary, AdminOrganizationBillingSummary,
AdminOrganizationDetail, AdminOrganizationDetail,
AdminReferralCampaign,
AdminSeatAnalytics, AdminSeatAnalytics,
AdminSingleResponse, AdminSingleResponse,
AdminSubscription, AdminSubscription,
@@ -111,6 +118,7 @@ export type {
AdminWorkspaceMember, AdminWorkspaceMember,
DbMember, DbMember,
DbOrganization, DbOrganization,
DbReferralCampaign,
DbSubscription, DbSubscription,
DbUser, DbUser,
DbUserStats, DbUserStats,
@@ -139,6 +147,7 @@ export {
parseWorkflowVariables, parseWorkflowVariables,
toAdminFolder, toAdminFolder,
toAdminOrganization, toAdminOrganization,
toAdminReferralCampaign,
toAdminSubscription, toAdminSubscription,
toAdminUser, toAdminUser,
toAdminWorkflow, toAdminWorkflow,

View File

@@ -0,0 +1,142 @@
/**
* GET /api/v1/admin/referral-campaigns/:id
*
* Get a single referral campaign by ID.
*
* PATCH /api/v1/admin/referral-campaigns/:id
*
* Update campaign fields. All fields are optional.
*
* Body:
* - name: string (non-empty) - Campaign name
* - bonusCreditAmount: number (> 0) - Bonus credits in dollars
* - isActive: boolean - Enable/disable the campaign
* - code: string | null (min 6 chars, auto-uppercased, null to remove) - Redeemable code
* - utmSource: string | null - UTM source match (null = wildcard)
* - utmMedium: string | null - UTM medium match (null = wildcard)
* - utmCampaign: string | null - UTM campaign match (null = wildcard)
* - utmContent: string | null - UTM content match (null = wildcard)
*/
import { db } from '@sim/db'
import { referralCampaigns } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { withAdminAuthParams } from '@/app/api/v1/admin/middleware'
import {
badRequestResponse,
internalErrorResponse,
notFoundResponse,
singleResponse,
} from '@/app/api/v1/admin/responses'
import { toAdminReferralCampaign } from '@/app/api/v1/admin/types'
const logger = createLogger('AdminReferralCampaignDetailAPI')
interface RouteParams {
id: string
}
export const GET = withAdminAuthParams<RouteParams>(async (_, context) => {
try {
const { id: campaignId } = await context.params
const [campaign] = await db
.select()
.from(referralCampaigns)
.where(eq(referralCampaigns.id, campaignId))
.limit(1)
if (!campaign) {
return notFoundResponse('Campaign')
}
logger.info(`Admin API: Retrieved referral campaign ${campaignId}`)
return singleResponse(toAdminReferralCampaign(campaign, getBaseUrl()))
} catch (error) {
logger.error('Admin API: Failed to get referral campaign', { error })
return internalErrorResponse('Failed to get referral campaign')
}
})
export const PATCH = withAdminAuthParams<RouteParams>(async (request, context) => {
try {
const { id: campaignId } = await context.params
const body = await request.json()
const [existing] = await db
.select()
.from(referralCampaigns)
.where(eq(referralCampaigns.id, campaignId))
.limit(1)
if (!existing) {
return notFoundResponse('Campaign')
}
const updateData: Record<string, unknown> = { updatedAt: new Date() }
if (body.name !== undefined) {
if (typeof body.name !== 'string' || body.name.trim().length === 0) {
return badRequestResponse('name must be a non-empty string')
}
updateData.name = body.name.trim()
}
if (body.bonusCreditAmount !== undefined) {
if (
typeof body.bonusCreditAmount !== 'number' ||
!Number.isFinite(body.bonusCreditAmount) ||
body.bonusCreditAmount <= 0
) {
return badRequestResponse('bonusCreditAmount must be a positive number')
}
updateData.bonusCreditAmount = body.bonusCreditAmount.toString()
}
if (body.isActive !== undefined) {
if (typeof body.isActive !== 'boolean') {
return badRequestResponse('isActive must be a boolean')
}
updateData.isActive = body.isActive
}
if (body.code !== undefined) {
if (body.code !== null) {
if (typeof body.code !== 'string') {
return badRequestResponse('code must be a string or null')
}
if (body.code.trim().length < 6) {
return badRequestResponse('code must be at least 6 characters')
}
}
updateData.code = body.code ? body.code.trim().toUpperCase() : null
}
for (const field of ['utmSource', 'utmMedium', 'utmCampaign', 'utmContent'] as const) {
if (body[field] !== undefined) {
if (body[field] !== null && typeof body[field] !== 'string') {
return badRequestResponse(`${field} must be a string or null`)
}
updateData[field] = body[field] || null
}
}
const [updated] = await db
.update(referralCampaigns)
.set(updateData)
.where(eq(referralCampaigns.id, campaignId))
.returning()
logger.info(`Admin API: Updated referral campaign ${campaignId}`, {
fields: Object.keys(updateData).filter((k) => k !== 'updatedAt'),
})
return singleResponse(toAdminReferralCampaign(updated, getBaseUrl()))
} catch (error) {
logger.error('Admin API: Failed to update referral campaign', { error })
return internalErrorResponse('Failed to update referral campaign')
}
})

View File

@@ -0,0 +1,140 @@
/**
* GET /api/v1/admin/referral-campaigns
*
* List referral campaigns with optional filtering and pagination.
*
* Query Parameters:
* - active: string (optional) - Filter by active status ('true' or 'false')
* - limit: number (default: 50, max: 250)
* - offset: number (default: 0)
*
* POST /api/v1/admin/referral-campaigns
*
* Create a new referral campaign.
*
* Body:
* - name: string (required) - Campaign name
* - bonusCreditAmount: number (required, > 0) - Bonus credits in dollars
* - code: string | null (optional, min 6 chars, auto-uppercased) - Redeemable code
* - utmSource: string | null (optional) - UTM source match (null = wildcard)
* - utmMedium: string | null (optional) - UTM medium match (null = wildcard)
* - utmCampaign: string | null (optional) - UTM campaign match (null = wildcard)
* - utmContent: string | null (optional) - UTM content match (null = wildcard)
*/
import { db } from '@sim/db'
import { referralCampaigns } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { count, eq, type SQL } from 'drizzle-orm'
import { nanoid } from 'nanoid'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { withAdminAuth } from '@/app/api/v1/admin/middleware'
import {
badRequestResponse,
internalErrorResponse,
listResponse,
singleResponse,
} from '@/app/api/v1/admin/responses'
import {
type AdminReferralCampaign,
createPaginationMeta,
parsePaginationParams,
toAdminReferralCampaign,
} from '@/app/api/v1/admin/types'
const logger = createLogger('AdminReferralCampaignsAPI')
export const GET = withAdminAuth(async (request) => {
const url = new URL(request.url)
const { limit, offset } = parsePaginationParams(url)
const activeFilter = url.searchParams.get('active')
try {
const conditions: SQL<unknown>[] = []
if (activeFilter === 'true') {
conditions.push(eq(referralCampaigns.isActive, true))
} else if (activeFilter === 'false') {
conditions.push(eq(referralCampaigns.isActive, false))
}
const whereClause = conditions.length > 0 ? conditions[0] : undefined
const baseUrl = getBaseUrl()
const [countResult, campaigns] = await Promise.all([
db.select({ total: count() }).from(referralCampaigns).where(whereClause),
db
.select()
.from(referralCampaigns)
.where(whereClause)
.orderBy(referralCampaigns.createdAt)
.limit(limit)
.offset(offset),
])
const total = countResult[0].total
const data: AdminReferralCampaign[] = campaigns.map((c) => toAdminReferralCampaign(c, baseUrl))
const pagination = createPaginationMeta(total, limit, offset)
logger.info(`Admin API: Listed ${data.length} referral campaigns (total: ${total})`)
return listResponse(data, pagination)
} catch (error) {
logger.error('Admin API: Failed to list referral campaigns', { error })
return internalErrorResponse('Failed to list referral campaigns')
}
})
export const POST = withAdminAuth(async (request) => {
try {
const body = await request.json()
const { name, code, utmSource, utmMedium, utmCampaign, utmContent, bonusCreditAmount } = body
if (!name || typeof name !== 'string') {
return badRequestResponse('name is required and must be a string')
}
if (
typeof bonusCreditAmount !== 'number' ||
!Number.isFinite(bonusCreditAmount) ||
bonusCreditAmount <= 0
) {
return badRequestResponse('bonusCreditAmount must be a positive number')
}
if (code !== undefined && code !== null) {
if (typeof code !== 'string') {
return badRequestResponse('code must be a string or null')
}
if (code.trim().length < 6) {
return badRequestResponse('code must be at least 6 characters')
}
}
const id = nanoid()
const [campaign] = await db
.insert(referralCampaigns)
.values({
id,
name,
code: code ? code.trim().toUpperCase() : null,
utmSource: utmSource || null,
utmMedium: utmMedium || null,
utmCampaign: utmCampaign || null,
utmContent: utmContent || null,
bonusCreditAmount: bonusCreditAmount.toString(),
})
.returning()
logger.info(`Admin API: Created referral campaign ${id}`, {
name,
code: campaign.code,
bonusCreditAmount,
})
return singleResponse(toAdminReferralCampaign(campaign, getBaseUrl()))
} catch (error) {
logger.error('Admin API: Failed to create referral campaign', { error })
return internalErrorResponse('Failed to create referral campaign')
}
})

View File

@@ -8,6 +8,7 @@
import type { import type {
member, member,
organization, organization,
referralCampaigns,
subscription, subscription,
user, user,
userStats, userStats,
@@ -31,6 +32,7 @@ export type DbOrganization = InferSelectModel<typeof organization>
export type DbSubscription = InferSelectModel<typeof subscription> export type DbSubscription = InferSelectModel<typeof subscription>
export type DbMember = InferSelectModel<typeof member> export type DbMember = InferSelectModel<typeof member>
export type DbUserStats = InferSelectModel<typeof userStats> export type DbUserStats = InferSelectModel<typeof userStats>
export type DbReferralCampaign = InferSelectModel<typeof referralCampaigns>
// ============================================================================= // =============================================================================
// Pagination // Pagination
@@ -646,3 +648,49 @@ export interface AdminDeployResult {
export interface AdminUndeployResult { export interface AdminUndeployResult {
isDeployed: boolean isDeployed: boolean
} }
// =============================================================================
// Referral Campaign Types
// =============================================================================
export interface AdminReferralCampaign {
id: string
name: string
code: string | null
utmSource: string | null
utmMedium: string | null
utmCampaign: string | null
utmContent: string | null
bonusCreditAmount: string
isActive: boolean
signupUrl: string | null
createdAt: string
updatedAt: string
}
export function toAdminReferralCampaign(
dbCampaign: DbReferralCampaign,
baseUrl: string
): AdminReferralCampaign {
const utmParams = new URLSearchParams()
if (dbCampaign.utmSource) utmParams.set('utm_source', dbCampaign.utmSource)
if (dbCampaign.utmMedium) utmParams.set('utm_medium', dbCampaign.utmMedium)
if (dbCampaign.utmCampaign) utmParams.set('utm_campaign', dbCampaign.utmCampaign)
if (dbCampaign.utmContent) utmParams.set('utm_content', dbCampaign.utmContent)
const query = utmParams.toString()
return {
id: dbCampaign.id,
name: dbCampaign.name,
code: dbCampaign.code,
utmSource: dbCampaign.utmSource,
utmMedium: dbCampaign.utmMedium,
utmCampaign: dbCampaign.utmCampaign,
utmContent: dbCampaign.utmContent,
bonusCreditAmount: dbCampaign.bonusCreditAmount,
isActive: dbCampaign.isActive,
signupUrl: query ? `${baseUrl}/signup?${query}` : null,
createdAt: dbCampaign.createdAt.toISOString(),
updatedAt: dbCampaign.updatedAt.toISOString(),
}
}

View File

@@ -29,7 +29,7 @@ const patchBodySchema = z
description: z description: z
.string() .string()
.trim() .trim()
.max(500, 'Description must be 500 characters or less') .max(2000, 'Description must be 2000 characters or less')
.nullable() .nullable()
.optional(), .optional(),
isActive: z.literal(true).optional(), // Set to true to activate this version isActive: z.literal(true).optional(), // Set to true to activate this version

View File

@@ -12,7 +12,7 @@ import {
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
import { SSE_HEADERS } from '@/lib/core/utils/sse' import { SSE_HEADERS } from '@/lib/core/utils/sse'
import { getBaseUrl } from '@/lib/core/utils/urls' import { getBaseUrl } from '@/lib/core/utils/urls'
import { markExecutionCancelled } from '@/lib/execution/cancellation' import { createExecutionEventWriter, setExecutionMeta } from '@/lib/execution/event-buffer'
import { processInputFileFields } from '@/lib/execution/files' import { processInputFileFields } from '@/lib/execution/files'
import { preprocessExecution } from '@/lib/execution/preprocessing' import { preprocessExecution } from '@/lib/execution/preprocessing'
import { LoggingSession } from '@/lib/logs/execution/logging-session' import { LoggingSession } from '@/lib/logs/execution/logging-session'
@@ -700,15 +700,27 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
const timeoutController = createTimeoutAbortController(preprocessResult.executionTimeout?.sync) const timeoutController = createTimeoutAbortController(preprocessResult.executionTimeout?.sync)
let isStreamClosed = false let isStreamClosed = false
const eventWriter = createExecutionEventWriter(executionId)
setExecutionMeta(executionId, {
status: 'active',
userId: actorUserId,
workflowId,
}).catch(() => {})
const stream = new ReadableStream<Uint8Array>({ const stream = new ReadableStream<Uint8Array>({
async start(controller) { async start(controller) {
const sendEvent = (event: ExecutionEvent) => { let finalMetaStatus: 'complete' | 'error' | 'cancelled' | null = null
if (isStreamClosed) return
try { const sendEvent = (event: ExecutionEvent) => {
controller.enqueue(encodeSSEEvent(event)) if (!isStreamClosed) {
} catch { try {
isStreamClosed = true controller.enqueue(encodeSSEEvent(event))
} catch {
isStreamClosed = true
}
}
if (event.type !== 'stream:chunk' && event.type !== 'stream:done') {
eventWriter.write(event).catch(() => {})
} }
} }
@@ -829,14 +841,12 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
const reader = streamingExec.stream.getReader() const reader = streamingExec.stream.getReader()
const decoder = new TextDecoder() const decoder = new TextDecoder()
let chunkCount = 0
try { try {
while (true) { while (true) {
const { done, value } = await reader.read() const { done, value } = await reader.read()
if (done) break if (done) break
chunkCount++
const chunk = decoder.decode(value, { stream: true }) const chunk = decoder.decode(value, { stream: true })
sendEvent({ sendEvent({
type: 'stream:chunk', type: 'stream:chunk',
@@ -951,6 +961,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
duration: result.metadata?.duration || 0, duration: result.metadata?.duration || 0,
}, },
}) })
finalMetaStatus = 'error'
} else { } else {
logger.info(`[${requestId}] Workflow execution was cancelled`) logger.info(`[${requestId}] Workflow execution was cancelled`)
@@ -963,6 +974,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
duration: result.metadata?.duration || 0, duration: result.metadata?.duration || 0,
}, },
}) })
finalMetaStatus = 'cancelled'
} }
return return
} }
@@ -986,6 +998,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
endTime: result.metadata?.endTime || new Date().toISOString(), endTime: result.metadata?.endTime || new Date().toISOString(),
}, },
}) })
finalMetaStatus = 'complete'
} catch (error: unknown) { } catch (error: unknown) {
const isTimeout = isTimeoutError(error) || timeoutController.isTimedOut() const isTimeout = isTimeoutError(error) || timeoutController.isTimedOut()
const errorMessage = isTimeout const errorMessage = isTimeout
@@ -1017,7 +1030,18 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
duration: executionResult?.metadata?.duration || 0, duration: executionResult?.metadata?.duration || 0,
}, },
}) })
finalMetaStatus = 'error'
} finally { } finally {
try {
await eventWriter.close()
} catch (closeError) {
logger.warn(`[${requestId}] Failed to close event writer`, {
error: closeError instanceof Error ? closeError.message : String(closeError),
})
}
if (finalMetaStatus) {
setExecutionMeta(executionId, { status: finalMetaStatus }).catch(() => {})
}
timeoutController.cleanup() timeoutController.cleanup()
if (executionId) { if (executionId) {
await cleanupExecutionBase64Cache(executionId) await cleanupExecutionBase64Cache(executionId)
@@ -1032,10 +1056,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
}, },
cancel() { cancel() {
isStreamClosed = true isStreamClosed = true
timeoutController.cleanup() logger.info(`[${requestId}] Client disconnected from SSE stream`)
logger.info(`[${requestId}] Client aborted SSE stream, signalling cancellation`)
timeoutController.abort()
markExecutionCancelled(executionId).catch(() => {})
}, },
}) })

View File

@@ -0,0 +1,170 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { checkHybridAuth } from '@/lib/auth/hybrid'
import { SSE_HEADERS } from '@/lib/core/utils/sse'
import {
type ExecutionStreamStatus,
getExecutionMeta,
readExecutionEvents,
} from '@/lib/execution/event-buffer'
import { formatSSEEvent } from '@/lib/workflows/executor/execution-events'
import { authorizeWorkflowByWorkspacePermission } from '@/lib/workflows/utils'
const logger = createLogger('ExecutionStreamReconnectAPI')
const POLL_INTERVAL_MS = 500
const MAX_POLL_DURATION_MS = 10 * 60 * 1000 // 10 minutes
function isTerminalStatus(status: ExecutionStreamStatus): boolean {
return status === 'complete' || status === 'error' || status === 'cancelled'
}
export const runtime = 'nodejs'
export const dynamic = 'force-dynamic'
export async function GET(
req: NextRequest,
{ params }: { params: Promise<{ id: string; executionId: string }> }
) {
const { id: workflowId, executionId } = await params
try {
const auth = await checkHybridAuth(req, { requireWorkflowId: false })
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
const workflowAuthorization = await authorizeWorkflowByWorkspacePermission({
workflowId,
userId: auth.userId,
action: 'read',
})
if (!workflowAuthorization.allowed) {
return NextResponse.json(
{ error: workflowAuthorization.message || 'Access denied' },
{ status: workflowAuthorization.status }
)
}
const meta = await getExecutionMeta(executionId)
if (!meta) {
return NextResponse.json({ error: 'Execution buffer not found or expired' }, { status: 404 })
}
if (meta.workflowId && meta.workflowId !== workflowId) {
return NextResponse.json(
{ error: 'Execution does not belong to this workflow' },
{ status: 403 }
)
}
const fromParam = req.nextUrl.searchParams.get('from')
const parsed = fromParam ? Number.parseInt(fromParam, 10) : 0
const fromEventId = Number.isFinite(parsed) && parsed >= 0 ? parsed : 0
logger.info('Reconnection stream requested', {
workflowId,
executionId,
fromEventId,
metaStatus: meta.status,
})
const encoder = new TextEncoder()
let closed = false
const stream = new ReadableStream<Uint8Array>({
async start(controller) {
let lastEventId = fromEventId
const pollDeadline = Date.now() + MAX_POLL_DURATION_MS
const enqueue = (text: string) => {
if (closed) return
try {
controller.enqueue(encoder.encode(text))
} catch {
closed = true
}
}
try {
const events = await readExecutionEvents(executionId, lastEventId)
for (const entry of events) {
if (closed) return
enqueue(formatSSEEvent(entry.event))
lastEventId = entry.eventId
}
const currentMeta = await getExecutionMeta(executionId)
if (!currentMeta || isTerminalStatus(currentMeta.status)) {
enqueue('data: [DONE]\n\n')
if (!closed) controller.close()
return
}
while (!closed && Date.now() < pollDeadline) {
await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS))
if (closed) return
const newEvents = await readExecutionEvents(executionId, lastEventId)
for (const entry of newEvents) {
if (closed) return
enqueue(formatSSEEvent(entry.event))
lastEventId = entry.eventId
}
const polledMeta = await getExecutionMeta(executionId)
if (!polledMeta || isTerminalStatus(polledMeta.status)) {
const finalEvents = await readExecutionEvents(executionId, lastEventId)
for (const entry of finalEvents) {
if (closed) return
enqueue(formatSSEEvent(entry.event))
lastEventId = entry.eventId
}
enqueue('data: [DONE]\n\n')
if (!closed) controller.close()
return
}
}
if (!closed) {
logger.warn('Reconnection stream poll deadline reached', { executionId })
enqueue('data: [DONE]\n\n')
controller.close()
}
} catch (error) {
logger.error('Error in reconnection stream', {
executionId,
error: error instanceof Error ? error.message : String(error),
})
if (!closed) {
try {
controller.close()
} catch {}
}
}
},
cancel() {
closed = true
logger.info('Client disconnected from reconnection stream', { executionId })
},
})
return new NextResponse(stream, {
headers: {
...SSE_HEADERS,
'X-Execution-Id': executionId,
},
})
} catch (error: any) {
logger.error('Failed to start reconnection stream', {
workflowId,
executionId,
error: error.message,
})
return NextResponse.json(
{ error: error.message || 'Failed to start reconnection stream' },
{ status: 500 }
)
}
}

View File

@@ -38,6 +38,7 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
} }
const isInternalCall = auth.authType === 'internal_jwt'
const userId = auth.userId || null const userId = auth.userId || null
let workflowData = await getWorkflowById(workflowId) let workflowData = await getWorkflowById(workflowId)
@@ -47,29 +48,32 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 }) return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
} }
// Check if user has access to this workflow if (isInternalCall && !userId) {
if (!userId) { // Internal system calls (e.g. workflow-in-workflow executor) may not carry a userId.
// These are already authenticated via internal JWT; allow read access.
logger.info(`[${requestId}] Internal API call for workflow ${workflowId}`)
} else if (!userId) {
logger.warn(`[${requestId}] Unauthorized access attempt for workflow ${workflowId}`) logger.warn(`[${requestId}] Unauthorized access attempt for workflow ${workflowId}`)
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
} } else {
const authorization = await authorizeWorkflowByWorkspacePermission({
workflowId,
userId,
action: 'read',
})
if (!authorization.workflow) {
logger.warn(`[${requestId}] Workflow ${workflowId} not found`)
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
}
const authorization = await authorizeWorkflowByWorkspacePermission({ workflowData = authorization.workflow
workflowId, if (!authorization.allowed) {
userId, logger.warn(`[${requestId}] User ${userId} denied access to workflow ${workflowId}`)
action: 'read', return NextResponse.json(
}) { error: authorization.message || 'Access denied' },
if (!authorization.workflow) { { status: authorization.status }
logger.warn(`[${requestId}] Workflow ${workflowId} not found`) )
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 }) }
}
workflowData = authorization.workflow
if (!authorization.allowed) {
logger.warn(`[${requestId}] User ${userId} denied access to workflow ${workflowId}`)
return NextResponse.json(
{ error: authorization.message || 'Access denied' },
{ status: authorization.status }
)
} }
logger.debug(`[${requestId}] Attempting to load workflow ${workflowId} from normalized tables`) logger.debug(`[${requestId}] Attempting to load workflow ${workflowId} from normalized tables`)

View File

@@ -13,9 +13,6 @@ export type CommandId =
| 'goto-logs' | 'goto-logs'
| 'open-search' | 'open-search'
| 'run-workflow' | 'run-workflow'
| 'focus-copilot-tab'
| 'focus-toolbar-tab'
| 'focus-editor-tab'
| 'clear-terminal-console' | 'clear-terminal-console'
| 'focus-toolbar-search' | 'focus-toolbar-search'
| 'clear-notifications' | 'clear-notifications'
@@ -75,21 +72,6 @@ export const COMMAND_DEFINITIONS: Record<CommandId, CommandDefinition> = {
shortcut: 'Mod+Enter', shortcut: 'Mod+Enter',
allowInEditable: false, allowInEditable: false,
}, },
'focus-copilot-tab': {
id: 'focus-copilot-tab',
shortcut: 'C',
allowInEditable: false,
},
'focus-toolbar-tab': {
id: 'focus-toolbar-tab',
shortcut: 'T',
allowInEditable: false,
},
'focus-editor-tab': {
id: 'focus-editor-tab',
shortcut: 'E',
allowInEditable: false,
},
'clear-terminal-console': { 'clear-terminal-console': {
id: 'clear-terminal-console', id: 'clear-terminal-console',
shortcut: 'Mod+D', shortcut: 'Mod+D',

View File

@@ -113,7 +113,7 @@ export function VersionDescriptionModal({
className='min-h-[120px] resize-none' className='min-h-[120px] resize-none'
value={description} value={description}
onChange={(e) => setDescription(e.target.value)} onChange={(e) => setDescription(e.target.value)}
maxLength={500} maxLength={2000}
disabled={isGenerating} disabled={isGenerating}
/> />
<div className='flex items-center justify-between'> <div className='flex items-center justify-between'>
@@ -123,7 +123,7 @@ export function VersionDescriptionModal({
</p> </p>
)} )}
{!updateMutation.error && !generateMutation.error && <div />} {!updateMutation.error && !generateMutation.error && <div />}
<p className='text-[11px] text-[var(--text-tertiary)]'>{description.length}/500</p> <p className='text-[11px] text-[var(--text-tertiary)]'>{description.length}/2000</p>
</div> </div>
</ModalBody> </ModalBody>
<ModalFooter> <ModalFooter>

View File

@@ -57,6 +57,21 @@ export function useChangeDetection({
} }
} }
if (block.triggerMode) {
const triggerConfigValue = blockSubValues?.triggerConfig
if (
triggerConfigValue &&
typeof triggerConfigValue === 'object' &&
!subBlocks.triggerConfig
) {
subBlocks.triggerConfig = {
id: 'triggerConfig',
type: 'short-input',
value: triggerConfigValue,
}
}
}
blocksWithSubBlocks[blockId] = { blocksWithSubBlocks[blockId] = {
...block, ...block,
subBlocks, subBlocks,

View File

@@ -340,13 +340,7 @@ export const Panel = memo(function Panel() {
* Register global keyboard shortcuts using the central commands registry. * Register global keyboard shortcuts using the central commands registry.
* *
* - Mod+Enter: Run / cancel workflow (matches the Run button behavior) * - Mod+Enter: Run / cancel workflow (matches the Run button behavior)
* - C: Focus Copilot tab
* - T: Focus Toolbar tab
* - E: Focus Editor tab
* - Mod+F: Focus Toolbar tab and search input * - Mod+F: Focus Toolbar tab and search input
*
* The tab-switching commands are disabled inside editable elements so typing
* in inputs or textareas is not interrupted.
*/ */
useRegisterGlobalCommands(() => useRegisterGlobalCommands(() =>
createCommands([ createCommands([
@@ -363,33 +357,6 @@ export const Panel = memo(function Panel() {
allowInEditable: false, allowInEditable: false,
}, },
}, },
{
id: 'focus-copilot-tab',
handler: () => {
setActiveTab('copilot')
},
overrides: {
allowInEditable: false,
},
},
{
id: 'focus-toolbar-tab',
handler: () => {
setActiveTab('toolbar')
},
overrides: {
allowInEditable: false,
},
},
{
id: 'focus-editor-tab',
handler: () => {
setActiveTab('editor')
},
overrides: {
allowInEditable: false,
},
},
{ {
id: 'focus-toolbar-search', id: 'focus-toolbar-search',
handler: () => { handler: () => {

View File

@@ -1,4 +1,4 @@
import { useCallback, useRef, useState } from 'react' import { useCallback, useEffect, useRef, useState } from 'react'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { useQueryClient } from '@tanstack/react-query' import { useQueryClient } from '@tanstack/react-query'
import { v4 as uuidv4 } from 'uuid' import { v4 as uuidv4 } from 'uuid'
@@ -46,7 +46,13 @@ import { useWorkflowStore } from '@/stores/workflows/workflow/store'
const logger = createLogger('useWorkflowExecution') const logger = createLogger('useWorkflowExecution')
// Debug state validation result /**
* Module-level Set tracking which workflows have an active reconnection effect.
* Prevents multiple hook instances (from different components) from starting
* concurrent reconnection streams for the same workflow during the same mount cycle.
*/
const activeReconnections = new Set<string>()
interface DebugValidationResult { interface DebugValidationResult {
isValid: boolean isValid: boolean
error?: string error?: string
@@ -54,7 +60,7 @@ interface DebugValidationResult {
interface BlockEventHandlerConfig { interface BlockEventHandlerConfig {
workflowId?: string workflowId?: string
executionId?: string executionIdRef: { current: string }
workflowEdges: Array<{ id: string; target: string; sourceHandle?: string | null }> workflowEdges: Array<{ id: string; target: string; sourceHandle?: string | null }>
activeBlocksSet: Set<string> activeBlocksSet: Set<string>
accumulatedBlockLogs: BlockLog[] accumulatedBlockLogs: BlockLog[]
@@ -108,12 +114,15 @@ export function useWorkflowExecution() {
const queryClient = useQueryClient() const queryClient = useQueryClient()
const currentWorkflow = useCurrentWorkflow() const currentWorkflow = useCurrentWorkflow()
const { activeWorkflowId, workflows } = useWorkflowRegistry() const { activeWorkflowId, workflows } = useWorkflowRegistry()
const { toggleConsole, addConsole, updateConsole, cancelRunningEntries } = const { toggleConsole, addConsole, updateConsole, cancelRunningEntries, clearExecutionEntries } =
useTerminalConsoleStore() useTerminalConsoleStore()
const hasHydrated = useTerminalConsoleStore((s) => s._hasHydrated)
const { getAllVariables } = useEnvironmentStore() const { getAllVariables } = useEnvironmentStore()
const { getVariablesByWorkflowId, variables } = useVariablesStore() const { getVariablesByWorkflowId, variables } = useVariablesStore()
const { isExecuting, isDebugging, pendingBlocks, executor, debugContext } = const { isExecuting, isDebugging, pendingBlocks, executor, debugContext } =
useCurrentWorkflowExecution() useCurrentWorkflowExecution()
const setCurrentExecutionId = useExecutionStore((s) => s.setCurrentExecutionId)
const getCurrentExecutionId = useExecutionStore((s) => s.getCurrentExecutionId)
const setIsExecuting = useExecutionStore((s) => s.setIsExecuting) const setIsExecuting = useExecutionStore((s) => s.setIsExecuting)
const setIsDebugging = useExecutionStore((s) => s.setIsDebugging) const setIsDebugging = useExecutionStore((s) => s.setIsDebugging)
const setPendingBlocks = useExecutionStore((s) => s.setPendingBlocks) const setPendingBlocks = useExecutionStore((s) => s.setPendingBlocks)
@@ -297,7 +306,7 @@ export function useWorkflowExecution() {
(config: BlockEventHandlerConfig) => { (config: BlockEventHandlerConfig) => {
const { const {
workflowId, workflowId,
executionId, executionIdRef,
workflowEdges, workflowEdges,
activeBlocksSet, activeBlocksSet,
accumulatedBlockLogs, accumulatedBlockLogs,
@@ -308,6 +317,14 @@ export function useWorkflowExecution() {
onBlockCompleteCallback, onBlockCompleteCallback,
} = config } = config
/** Returns true if this execution was cancelled or superseded by another run. */
const isStaleExecution = () =>
!!(
workflowId &&
executionIdRef.current &&
useExecutionStore.getState().getCurrentExecutionId(workflowId) !== executionIdRef.current
)
const updateActiveBlocks = (blockId: string, isActive: boolean) => { const updateActiveBlocks = (blockId: string, isActive: boolean) => {
if (!workflowId) return if (!workflowId) return
if (isActive) { if (isActive) {
@@ -360,7 +377,7 @@ export function useWorkflowExecution() {
endedAt: data.endedAt, endedAt: data.endedAt,
workflowId, workflowId,
blockId: data.blockId, blockId: data.blockId,
executionId, executionId: executionIdRef.current,
blockName: data.blockName || 'Unknown Block', blockName: data.blockName || 'Unknown Block',
blockType: data.blockType || 'unknown', blockType: data.blockType || 'unknown',
iterationCurrent: data.iterationCurrent, iterationCurrent: data.iterationCurrent,
@@ -383,7 +400,7 @@ export function useWorkflowExecution() {
endedAt: data.endedAt, endedAt: data.endedAt,
workflowId, workflowId,
blockId: data.blockId, blockId: data.blockId,
executionId, executionId: executionIdRef.current,
blockName: data.blockName || 'Unknown Block', blockName: data.blockName || 'Unknown Block',
blockType: data.blockType || 'unknown', blockType: data.blockType || 'unknown',
iterationCurrent: data.iterationCurrent, iterationCurrent: data.iterationCurrent,
@@ -410,7 +427,7 @@ export function useWorkflowExecution() {
iterationType: data.iterationType, iterationType: data.iterationType,
iterationContainerId: data.iterationContainerId, iterationContainerId: data.iterationContainerId,
}, },
executionId executionIdRef.current
) )
} }
@@ -432,11 +449,12 @@ export function useWorkflowExecution() {
iterationType: data.iterationType, iterationType: data.iterationType,
iterationContainerId: data.iterationContainerId, iterationContainerId: data.iterationContainerId,
}, },
executionId executionIdRef.current
) )
} }
const onBlockStarted = (data: BlockStartedData) => { const onBlockStarted = (data: BlockStartedData) => {
if (isStaleExecution()) return
updateActiveBlocks(data.blockId, true) updateActiveBlocks(data.blockId, true)
markIncomingEdges(data.blockId) markIncomingEdges(data.blockId)
@@ -453,7 +471,7 @@ export function useWorkflowExecution() {
endedAt: undefined, endedAt: undefined,
workflowId, workflowId,
blockId: data.blockId, blockId: data.blockId,
executionId, executionId: executionIdRef.current,
blockName: data.blockName || 'Unknown Block', blockName: data.blockName || 'Unknown Block',
blockType: data.blockType || 'unknown', blockType: data.blockType || 'unknown',
isRunning: true, isRunning: true,
@@ -465,6 +483,7 @@ export function useWorkflowExecution() {
} }
const onBlockCompleted = (data: BlockCompletedData) => { const onBlockCompleted = (data: BlockCompletedData) => {
if (isStaleExecution()) return
updateActiveBlocks(data.blockId, false) updateActiveBlocks(data.blockId, false)
if (workflowId) setBlockRunStatus(workflowId, data.blockId, 'success') if (workflowId) setBlockRunStatus(workflowId, data.blockId, 'success')
@@ -495,6 +514,7 @@ export function useWorkflowExecution() {
} }
const onBlockError = (data: BlockErrorData) => { const onBlockError = (data: BlockErrorData) => {
if (isStaleExecution()) return
updateActiveBlocks(data.blockId, false) updateActiveBlocks(data.blockId, false)
if (workflowId) setBlockRunStatus(workflowId, data.blockId, 'error') if (workflowId) setBlockRunStatus(workflowId, data.blockId, 'error')
@@ -902,10 +922,6 @@ export function useWorkflowExecution() {
// Update block logs with actual stream completion times // Update block logs with actual stream completion times
if (result.logs && streamCompletionTimes.size > 0) { if (result.logs && streamCompletionTimes.size > 0) {
const streamCompletionEndTime = new Date(
Math.max(...Array.from(streamCompletionTimes.values()))
).toISOString()
result.logs.forEach((log: BlockLog) => { result.logs.forEach((log: BlockLog) => {
if (streamCompletionTimes.has(log.blockId)) { if (streamCompletionTimes.has(log.blockId)) {
const completionTime = streamCompletionTimes.get(log.blockId)! const completionTime = streamCompletionTimes.get(log.blockId)!
@@ -987,7 +1003,6 @@ export function useWorkflowExecution() {
return { success: true, stream } return { success: true, stream }
} }
// For manual (non-chat) execution
const manualExecutionId = uuidv4() const manualExecutionId = uuidv4()
try { try {
const result = await executeWorkflow( const result = await executeWorkflow(
@@ -1002,29 +1017,10 @@ export function useWorkflowExecution() {
if (result.metadata.pendingBlocks) { if (result.metadata.pendingBlocks) {
setPendingBlocks(activeWorkflowId, result.metadata.pendingBlocks) setPendingBlocks(activeWorkflowId, result.metadata.pendingBlocks)
} }
} else if (result && 'success' in result) {
setExecutionResult(result)
// Reset execution state after successful non-debug execution
setIsExecuting(activeWorkflowId, false)
setIsDebugging(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
if (isChatExecution) {
if (!result.metadata) {
result.metadata = { duration: 0, startTime: new Date().toISOString() }
}
;(result.metadata as any).source = 'chat'
}
// Invalidate subscription queries to update usage
setTimeout(() => {
queryClient.invalidateQueries({ queryKey: subscriptionKeys.all })
}, 1000)
} }
return result return result
} catch (error: any) { } catch (error: any) {
const errorResult = handleExecutionError(error, { executionId: manualExecutionId }) const errorResult = handleExecutionError(error, { executionId: manualExecutionId })
// Note: Error logs are already persisted server-side via execution-core.ts
return errorResult return errorResult
} }
}, },
@@ -1275,7 +1271,7 @@ export function useWorkflowExecution() {
if (activeWorkflowId) { if (activeWorkflowId) {
logger.info('Using server-side executor') logger.info('Using server-side executor')
const executionId = uuidv4() const executionIdRef = { current: '' }
let executionResult: ExecutionResult = { let executionResult: ExecutionResult = {
success: false, success: false,
@@ -1293,7 +1289,7 @@ export function useWorkflowExecution() {
try { try {
const blockHandlers = buildBlockEventHandlers({ const blockHandlers = buildBlockEventHandlers({
workflowId: activeWorkflowId, workflowId: activeWorkflowId,
executionId, executionIdRef,
workflowEdges, workflowEdges,
activeBlocksSet, activeBlocksSet,
accumulatedBlockLogs, accumulatedBlockLogs,
@@ -1326,6 +1322,10 @@ export function useWorkflowExecution() {
loops: clientWorkflowState.loops, loops: clientWorkflowState.loops,
parallels: clientWorkflowState.parallels, parallels: clientWorkflowState.parallels,
}, },
onExecutionId: (id) => {
executionIdRef.current = id
setCurrentExecutionId(activeWorkflowId, id)
},
callbacks: { callbacks: {
onExecutionStarted: (data) => { onExecutionStarted: (data) => {
logger.info('Server execution started:', data) logger.info('Server execution started:', data)
@@ -1368,6 +1368,18 @@ export function useWorkflowExecution() {
}, },
onExecutionCompleted: (data) => { onExecutionCompleted: (data) => {
if (
activeWorkflowId &&
executionIdRef.current &&
useExecutionStore.getState().getCurrentExecutionId(activeWorkflowId) !==
executionIdRef.current
)
return
if (activeWorkflowId) {
setCurrentExecutionId(activeWorkflowId, null)
}
executionResult = { executionResult = {
success: data.success, success: data.success,
output: data.output, output: data.output,
@@ -1425,9 +1437,33 @@ export function useWorkflowExecution() {
}) })
} }
} }
const workflowExecState = activeWorkflowId
? useExecutionStore.getState().getWorkflowExecution(activeWorkflowId)
: null
if (activeWorkflowId && !workflowExecState?.isDebugging) {
setExecutionResult(executionResult)
setIsExecuting(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
setTimeout(() => {
queryClient.invalidateQueries({ queryKey: subscriptionKeys.all })
}, 1000)
}
}, },
onExecutionError: (data) => { onExecutionError: (data) => {
if (
activeWorkflowId &&
executionIdRef.current &&
useExecutionStore.getState().getCurrentExecutionId(activeWorkflowId) !==
executionIdRef.current
)
return
if (activeWorkflowId) {
setCurrentExecutionId(activeWorkflowId, null)
}
executionResult = { executionResult = {
success: false, success: false,
output: {}, output: {},
@@ -1441,43 +1477,53 @@ export function useWorkflowExecution() {
const isPreExecutionError = accumulatedBlockLogs.length === 0 const isPreExecutionError = accumulatedBlockLogs.length === 0
handleExecutionErrorConsole({ handleExecutionErrorConsole({
workflowId: activeWorkflowId, workflowId: activeWorkflowId,
executionId, executionId: executionIdRef.current,
error: data.error, error: data.error,
durationMs: data.duration, durationMs: data.duration,
blockLogs: accumulatedBlockLogs, blockLogs: accumulatedBlockLogs,
isPreExecutionError, isPreExecutionError,
}) })
if (activeWorkflowId) {
setIsExecuting(activeWorkflowId, false)
setIsDebugging(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
}
}, },
onExecutionCancelled: (data) => { onExecutionCancelled: (data) => {
if (
activeWorkflowId &&
executionIdRef.current &&
useExecutionStore.getState().getCurrentExecutionId(activeWorkflowId) !==
executionIdRef.current
)
return
if (activeWorkflowId) {
setCurrentExecutionId(activeWorkflowId, null)
}
handleExecutionCancelledConsole({ handleExecutionCancelledConsole({
workflowId: activeWorkflowId, workflowId: activeWorkflowId,
executionId, executionId: executionIdRef.current,
durationMs: data?.duration, durationMs: data?.duration,
}) })
if (activeWorkflowId) {
setIsExecuting(activeWorkflowId, false)
setIsDebugging(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
}
}, },
}, },
}) })
return executionResult return executionResult
} catch (error: any) { } catch (error: any) {
// Don't log abort errors - they're intentional user actions
if (error.name === 'AbortError' || error.message?.includes('aborted')) { if (error.name === 'AbortError' || error.message?.includes('aborted')) {
logger.info('Execution aborted by user') logger.info('Execution aborted by user')
return executionResult
// Reset execution state
if (activeWorkflowId) {
setIsExecuting(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
}
// Return gracefully without error
return {
success: false,
output: {},
metadata: { duration: 0 },
logs: [],
}
} }
logger.error('Server-side execution failed:', error) logger.error('Server-side execution failed:', error)
@@ -1485,7 +1531,6 @@ export function useWorkflowExecution() {
} }
} }
// Fallback: should never reach here
throw new Error('Server-side execution is required') throw new Error('Server-side execution is required')
} }
@@ -1717,25 +1762,28 @@ export function useWorkflowExecution() {
* Handles cancelling the current workflow execution * Handles cancelling the current workflow execution
*/ */
const handleCancelExecution = useCallback(() => { const handleCancelExecution = useCallback(() => {
if (!activeWorkflowId) return
logger.info('Workflow execution cancellation requested') logger.info('Workflow execution cancellation requested')
// Cancel the execution stream for this workflow (server-side) const storedExecutionId = getCurrentExecutionId(activeWorkflowId)
executionStream.cancel(activeWorkflowId ?? undefined)
// Mark current chat execution as superseded so its cleanup won't affect new executions if (storedExecutionId) {
currentChatExecutionIdRef.current = null setCurrentExecutionId(activeWorkflowId, null)
fetch(`/api/workflows/${activeWorkflowId}/executions/${storedExecutionId}/cancel`, {
// Mark all running entries as canceled in the terminal method: 'POST',
if (activeWorkflowId) { }).catch(() => {})
cancelRunningEntries(activeWorkflowId) handleExecutionCancelledConsole({
workflowId: activeWorkflowId,
// Reset execution state - this triggers chat stream cleanup via useEffect in chat.tsx executionId: storedExecutionId,
setIsExecuting(activeWorkflowId, false) })
setIsDebugging(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
} }
// If in debug mode, also reset debug state executionStream.cancel(activeWorkflowId)
currentChatExecutionIdRef.current = null
setIsExecuting(activeWorkflowId, false)
setIsDebugging(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
if (isDebugging) { if (isDebugging) {
resetDebugState() resetDebugState()
} }
@@ -1747,7 +1795,9 @@ export function useWorkflowExecution() {
setIsDebugging, setIsDebugging,
setActiveBlocks, setActiveBlocks,
activeWorkflowId, activeWorkflowId,
cancelRunningEntries, getCurrentExecutionId,
setCurrentExecutionId,
handleExecutionCancelledConsole,
]) ])
/** /**
@@ -1847,7 +1897,7 @@ export function useWorkflowExecution() {
} }
setIsExecuting(workflowId, true) setIsExecuting(workflowId, true)
const executionId = uuidv4() const executionIdRef = { current: '' }
const accumulatedBlockLogs: BlockLog[] = [] const accumulatedBlockLogs: BlockLog[] = []
const accumulatedBlockStates = new Map<string, BlockState>() const accumulatedBlockStates = new Map<string, BlockState>()
const executedBlockIds = new Set<string>() const executedBlockIds = new Set<string>()
@@ -1856,7 +1906,7 @@ export function useWorkflowExecution() {
try { try {
const blockHandlers = buildBlockEventHandlers({ const blockHandlers = buildBlockEventHandlers({
workflowId, workflowId,
executionId, executionIdRef,
workflowEdges, workflowEdges,
activeBlocksSet, activeBlocksSet,
accumulatedBlockLogs, accumulatedBlockLogs,
@@ -1871,6 +1921,10 @@ export function useWorkflowExecution() {
startBlockId: blockId, startBlockId: blockId,
sourceSnapshot: effectiveSnapshot, sourceSnapshot: effectiveSnapshot,
input: workflowInput, input: workflowInput,
onExecutionId: (id) => {
executionIdRef.current = id
setCurrentExecutionId(workflowId, id)
},
callbacks: { callbacks: {
onBlockStarted: blockHandlers.onBlockStarted, onBlockStarted: blockHandlers.onBlockStarted,
onBlockCompleted: blockHandlers.onBlockCompleted, onBlockCompleted: blockHandlers.onBlockCompleted,
@@ -1878,7 +1932,6 @@ export function useWorkflowExecution() {
onExecutionCompleted: (data) => { onExecutionCompleted: (data) => {
if (data.success) { if (data.success) {
// Add the start block (trigger) to executed blocks
executedBlockIds.add(blockId) executedBlockIds.add(blockId)
const mergedBlockStates: Record<string, BlockState> = { const mergedBlockStates: Record<string, BlockState> = {
@@ -1902,6 +1955,10 @@ export function useWorkflowExecution() {
} }
setLastExecutionSnapshot(workflowId, updatedSnapshot) setLastExecutionSnapshot(workflowId, updatedSnapshot)
} }
setCurrentExecutionId(workflowId, null)
setIsExecuting(workflowId, false)
setActiveBlocks(workflowId, new Set())
}, },
onExecutionError: (data) => { onExecutionError: (data) => {
@@ -1921,19 +1978,27 @@ export function useWorkflowExecution() {
handleExecutionErrorConsole({ handleExecutionErrorConsole({
workflowId, workflowId,
executionId, executionId: executionIdRef.current,
error: data.error, error: data.error,
durationMs: data.duration, durationMs: data.duration,
blockLogs: accumulatedBlockLogs, blockLogs: accumulatedBlockLogs,
}) })
setCurrentExecutionId(workflowId, null)
setIsExecuting(workflowId, false)
setActiveBlocks(workflowId, new Set())
}, },
onExecutionCancelled: (data) => { onExecutionCancelled: (data) => {
handleExecutionCancelledConsole({ handleExecutionCancelledConsole({
workflowId, workflowId,
executionId, executionId: executionIdRef.current,
durationMs: data?.duration, durationMs: data?.duration,
}) })
setCurrentExecutionId(workflowId, null)
setIsExecuting(workflowId, false)
setActiveBlocks(workflowId, new Set())
}, },
}, },
}) })
@@ -1942,14 +2007,20 @@ export function useWorkflowExecution() {
logger.error('Run-from-block failed:', error) logger.error('Run-from-block failed:', error)
} }
} finally { } finally {
setIsExecuting(workflowId, false) const currentId = getCurrentExecutionId(workflowId)
setActiveBlocks(workflowId, new Set()) if (currentId === null || currentId === executionIdRef.current) {
setCurrentExecutionId(workflowId, null)
setIsExecuting(workflowId, false)
setActiveBlocks(workflowId, new Set())
}
} }
}, },
[ [
getLastExecutionSnapshot, getLastExecutionSnapshot,
setLastExecutionSnapshot, setLastExecutionSnapshot,
clearLastExecutionSnapshot, clearLastExecutionSnapshot,
getCurrentExecutionId,
setCurrentExecutionId,
setIsExecuting, setIsExecuting,
setActiveBlocks, setActiveBlocks,
setBlockRunStatus, setBlockRunStatus,
@@ -1979,29 +2050,213 @@ export function useWorkflowExecution() {
const executionId = uuidv4() const executionId = uuidv4()
try { try {
const result = await executeWorkflow( await executeWorkflow(undefined, undefined, executionId, undefined, 'manual', blockId)
undefined,
undefined,
executionId,
undefined,
'manual',
blockId
)
if (result && 'success' in result) {
setExecutionResult(result)
}
} catch (error) { } catch (error) {
const errorResult = handleExecutionError(error, { executionId }) const errorResult = handleExecutionError(error, { executionId })
return errorResult return errorResult
} finally { } finally {
setCurrentExecutionId(workflowId, null)
setIsExecuting(workflowId, false) setIsExecuting(workflowId, false)
setIsDebugging(workflowId, false) setIsDebugging(workflowId, false)
setActiveBlocks(workflowId, new Set()) setActiveBlocks(workflowId, new Set())
} }
}, },
[activeWorkflowId, setExecutionResult, setIsExecuting, setIsDebugging, setActiveBlocks] [
activeWorkflowId,
setCurrentExecutionId,
setExecutionResult,
setIsExecuting,
setIsDebugging,
setActiveBlocks,
]
) )
useEffect(() => {
if (!activeWorkflowId || !hasHydrated) return
const entries = useTerminalConsoleStore.getState().entries
const runningEntries = entries.filter(
(e) => e.isRunning && e.workflowId === activeWorkflowId && e.executionId
)
if (runningEntries.length === 0) return
if (activeReconnections.has(activeWorkflowId)) return
activeReconnections.add(activeWorkflowId)
executionStream.cancel(activeWorkflowId)
const sorted = [...runningEntries].sort((a, b) => {
const aTime = a.startedAt ? new Date(a.startedAt).getTime() : 0
const bTime = b.startedAt ? new Date(b.startedAt).getTime() : 0
return bTime - aTime
})
const executionId = sorted[0].executionId!
const otherExecutionIds = new Set(
sorted.filter((e) => e.executionId !== executionId).map((e) => e.executionId!)
)
if (otherExecutionIds.size > 0) {
cancelRunningEntries(activeWorkflowId)
}
setCurrentExecutionId(activeWorkflowId, executionId)
setIsExecuting(activeWorkflowId, true)
const workflowEdges = useWorkflowStore.getState().edges
const activeBlocksSet = new Set<string>()
const accumulatedBlockLogs: BlockLog[] = []
const accumulatedBlockStates = new Map<string, BlockState>()
const executedBlockIds = new Set<string>()
const executionIdRef = { current: executionId }
const handlers = buildBlockEventHandlers({
workflowId: activeWorkflowId,
executionIdRef,
workflowEdges,
activeBlocksSet,
accumulatedBlockLogs,
accumulatedBlockStates,
executedBlockIds,
consoleMode: 'update',
includeStartConsoleEntry: true,
})
const originalEntries = entries
.filter((e) => e.executionId === executionId)
.map((e) => ({ ...e }))
let cleared = false
let reconnectionComplete = false
let cleanupRan = false
const clearOnce = () => {
if (!cleared) {
cleared = true
clearExecutionEntries(executionId)
}
}
const reconnectWorkflowId = activeWorkflowId
executionStream
.reconnect({
workflowId: reconnectWorkflowId,
executionId,
callbacks: {
onBlockStarted: (data) => {
clearOnce()
handlers.onBlockStarted(data)
},
onBlockCompleted: (data) => {
clearOnce()
handlers.onBlockCompleted(data)
},
onBlockError: (data) => {
clearOnce()
handlers.onBlockError(data)
},
onExecutionCompleted: () => {
const currentId = useExecutionStore
.getState()
.getCurrentExecutionId(reconnectWorkflowId)
if (currentId !== executionId) {
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
return
}
clearOnce()
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
setCurrentExecutionId(reconnectWorkflowId, null)
setIsExecuting(reconnectWorkflowId, false)
setActiveBlocks(reconnectWorkflowId, new Set())
},
onExecutionError: (data) => {
const currentId = useExecutionStore
.getState()
.getCurrentExecutionId(reconnectWorkflowId)
if (currentId !== executionId) {
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
return
}
clearOnce()
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
setCurrentExecutionId(reconnectWorkflowId, null)
setIsExecuting(reconnectWorkflowId, false)
setActiveBlocks(reconnectWorkflowId, new Set())
handleExecutionErrorConsole({
workflowId: reconnectWorkflowId,
executionId,
error: data.error,
blockLogs: accumulatedBlockLogs,
})
},
onExecutionCancelled: () => {
const currentId = useExecutionStore
.getState()
.getCurrentExecutionId(reconnectWorkflowId)
if (currentId !== executionId) {
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
return
}
clearOnce()
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
setCurrentExecutionId(reconnectWorkflowId, null)
setIsExecuting(reconnectWorkflowId, false)
setActiveBlocks(reconnectWorkflowId, new Set())
handleExecutionCancelledConsole({
workflowId: reconnectWorkflowId,
executionId,
})
},
},
})
.catch((error) => {
logger.warn('Execution reconnection failed', { executionId, error })
})
.finally(() => {
if (reconnectionComplete || cleanupRan) return
const currentId = useExecutionStore.getState().getCurrentExecutionId(reconnectWorkflowId)
if (currentId !== executionId) return
reconnectionComplete = true
activeReconnections.delete(reconnectWorkflowId)
clearExecutionEntries(executionId)
for (const entry of originalEntries) {
addConsole({
workflowId: entry.workflowId,
blockId: entry.blockId,
blockName: entry.blockName,
blockType: entry.blockType,
executionId: entry.executionId,
executionOrder: entry.executionOrder,
isRunning: false,
warning: 'Execution result unavailable — check the logs page',
})
}
setCurrentExecutionId(reconnectWorkflowId, null)
setIsExecuting(reconnectWorkflowId, false)
setActiveBlocks(reconnectWorkflowId, new Set())
})
return () => {
cleanupRan = true
executionStream.cancel(reconnectWorkflowId)
activeReconnections.delete(reconnectWorkflowId)
if (cleared && !reconnectionComplete) {
clearExecutionEntries(executionId)
for (const entry of originalEntries) {
addConsole(entry)
}
}
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [activeWorkflowId, hasHydrated])
return { return {
isExecuting, isExecuting,
isDebugging, isDebugging,

View File

@@ -1,3 +1,4 @@
export { CancelSubscription } from './cancel-subscription' export { CancelSubscription } from './cancel-subscription'
export { CreditBalance } from './credit-balance' export { CreditBalance } from './credit-balance'
export { PlanCard, type PlanCardProps, type PlanFeature } from './plan-card' export { PlanCard, type PlanCardProps, type PlanFeature } from './plan-card'
export { ReferralCode } from './referral-code'

View File

@@ -0,0 +1,101 @@
'use client'
import { useState } from 'react'
import { createLogger } from '@sim/logger'
import { Button, Input, Label } from '@/components/emcn'
const logger = createLogger('ReferralCode')
interface ReferralCodeProps {
onRedeemComplete?: () => void
}
/**
* Inline referral/promo code entry field with redeem button.
* One-time use per account — shows success or "already redeemed" state.
*/
export function ReferralCode({ onRedeemComplete }: ReferralCodeProps) {
const [code, setCode] = useState('')
const [isRedeeming, setIsRedeeming] = useState(false)
const [error, setError] = useState<string | null>(null)
const [success, setSuccess] = useState<{ bonusAmount: number } | null>(null)
const handleRedeem = async () => {
const trimmed = code.trim()
if (!trimmed || isRedeeming) return
setIsRedeeming(true)
setError(null)
try {
const response = await fetch('/api/referral-code/redeem', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ code: trimmed }),
})
const data = await response.json()
if (!response.ok) {
throw new Error(data.error || 'Failed to redeem code')
}
if (data.redeemed) {
setSuccess({ bonusAmount: data.bonusAmount })
setCode('')
onRedeemComplete?.()
} else {
setError(data.error || 'Code could not be redeemed')
}
} catch (err) {
logger.error('Referral code redemption failed', { error: err })
setError(err instanceof Error ? err.message : 'Failed to redeem code')
} finally {
setIsRedeeming(false)
}
}
if (success) {
return (
<div className='flex items-center justify-between'>
<Label>Referral Code</Label>
<span className='text-[12px] text-[var(--text-secondary)]'>
+${success.bonusAmount} credits applied
</span>
</div>
)
}
return (
<div className='flex items-center justify-between gap-[12px]'>
<Label className='shrink-0'>Referral Code</Label>
<div className='flex items-center gap-[8px]'>
<div className='flex flex-col'>
<Input
type='text'
value={code}
onChange={(e) => {
setCode(e.target.value)
setError(null)
}}
onKeyDown={(e) => {
if (e.key === 'Enter') handleRedeem()
}}
placeholder='Enter code'
className='h-[32px] w-[140px] text-[12px]'
disabled={isRedeeming}
/>
{error && <span className='mt-[4px] text-[11px] text-[var(--text-error)]'>{error}</span>}
</div>
<Button
variant='active'
className='h-[32px] shrink-0 rounded-[6px] text-[12px]'
onClick={handleRedeem}
disabled={isRedeeming || !code.trim()}
>
{isRedeeming ? 'Redeeming...' : 'Redeem'}
</Button>
</div>
</div>
)
}

View File

@@ -17,6 +17,7 @@ import {
CancelSubscription, CancelSubscription,
CreditBalance, CreditBalance,
PlanCard, PlanCard,
ReferralCode,
} from '@/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/subscription/components' } from '@/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/subscription/components'
import { import {
ENTERPRISE_PLAN_FEATURES, ENTERPRISE_PLAN_FEATURES,
@@ -549,6 +550,10 @@ export function Subscription() {
/> />
)} )}
{!subscription.isEnterprise && (
<ReferralCode onRedeemComplete={() => refetchSubscription()} />
)}
{/* Next Billing Date - hidden from team members */} {/* Next Billing Date - hidden from team members */}
{subscription.isPaid && {subscription.isPaid &&
subscriptionData?.data?.periodEnd && subscriptionData?.data?.periodEnd &&

View File

@@ -4,12 +4,14 @@ import { useEffect } from 'react'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { useRouter } from 'next/navigation' import { useRouter } from 'next/navigation'
import { useSession } from '@/lib/auth/auth-client' import { useSession } from '@/lib/auth/auth-client'
import { useReferralAttribution } from '@/hooks/use-referral-attribution'
const logger = createLogger('WorkspacePage') const logger = createLogger('WorkspacePage')
export default function WorkspacePage() { export default function WorkspacePage() {
const router = useRouter() const router = useRouter()
const { data: session, isPending } = useSession() const { data: session, isPending } = useSession()
useReferralAttribution()
useEffect(() => { useEffect(() => {
const redirectToFirstWorkspace = async () => { const redirectToFirstWorkspace = async () => {

View File

@@ -589,6 +589,7 @@ export async function executeScheduleJob(payload: ScheduleExecutionPayload) {
export const scheduleExecution = task({ export const scheduleExecution = task({
id: 'schedule-execution', id: 'schedule-execution',
machine: 'medium-1x',
retry: { retry: {
maxAttempts: 1, maxAttempts: 1,
}, },

View File

@@ -669,6 +669,7 @@ async function executeWebhookJobInternal(
export const webhookExecution = task({ export const webhookExecution = task({
id: 'webhook-execution', id: 'webhook-execution',
machine: 'medium-1x',
retry: { retry: {
maxAttempts: 1, maxAttempts: 1,
}, },

View File

@@ -197,5 +197,6 @@ export async function executeWorkflowJob(payload: WorkflowExecutionPayload) {
export const workflowExecutionTask = task({ export const workflowExecutionTask = task({
id: 'workflow-execution', id: 'workflow-execution',
machine: 'medium-1x',
run: executeWorkflowJob, run: executeWorkflowJob,
}) })

View File

@@ -10,9 +10,11 @@ import {
getReasoningEffortValuesForModel, getReasoningEffortValuesForModel,
getThinkingLevelsForModel, getThinkingLevelsForModel,
getVerbosityValuesForModel, getVerbosityValuesForModel,
MODELS_WITH_DEEP_RESEARCH,
MODELS_WITH_REASONING_EFFORT, MODELS_WITH_REASONING_EFFORT,
MODELS_WITH_THINKING, MODELS_WITH_THINKING,
MODELS_WITH_VERBOSITY, MODELS_WITH_VERBOSITY,
MODELS_WITHOUT_MEMORY,
providers, providers,
supportsTemperature, supportsTemperature,
} from '@/providers/utils' } from '@/providers/utils'
@@ -412,12 +414,22 @@ Return ONLY the JSON array.`,
title: 'Tools', title: 'Tools',
type: 'tool-input', type: 'tool-input',
defaultValue: [], defaultValue: [],
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
not: true,
},
}, },
{ {
id: 'skills', id: 'skills',
title: 'Skills', title: 'Skills',
type: 'skill-input', type: 'skill-input',
defaultValue: [], defaultValue: [],
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
not: true,
},
}, },
{ {
id: 'memoryType', id: 'memoryType',
@@ -431,6 +443,11 @@ Return ONLY the JSON array.`,
{ label: 'Sliding window (tokens)', id: 'sliding_window_tokens' }, { label: 'Sliding window (tokens)', id: 'sliding_window_tokens' },
], ],
defaultValue: 'none', defaultValue: 'none',
condition: {
field: 'model',
value: MODELS_WITHOUT_MEMORY,
not: true,
},
}, },
{ {
id: 'conversationId', id: 'conversationId',
@@ -444,6 +461,7 @@ Return ONLY the JSON array.`,
condition: { condition: {
field: 'memoryType', field: 'memoryType',
value: ['conversation', 'sliding_window', 'sliding_window_tokens'], value: ['conversation', 'sliding_window', 'sliding_window_tokens'],
and: { field: 'model', value: MODELS_WITHOUT_MEMORY, not: true },
}, },
}, },
{ {
@@ -454,6 +472,7 @@ Return ONLY the JSON array.`,
condition: { condition: {
field: 'memoryType', field: 'memoryType',
value: ['sliding_window'], value: ['sliding_window'],
and: { field: 'model', value: MODELS_WITHOUT_MEMORY, not: true },
}, },
}, },
{ {
@@ -464,6 +483,7 @@ Return ONLY the JSON array.`,
condition: { condition: {
field: 'memoryType', field: 'memoryType',
value: ['sliding_window_tokens'], value: ['sliding_window_tokens'],
and: { field: 'model', value: MODELS_WITHOUT_MEMORY, not: true },
}, },
}, },
{ {
@@ -477,9 +497,13 @@ Return ONLY the JSON array.`,
condition: () => ({ condition: () => ({
field: 'model', field: 'model',
value: (() => { value: (() => {
const deepResearch = new Set(MODELS_WITH_DEEP_RESEARCH.map((m) => m.toLowerCase()))
const allModels = Object.keys(getBaseModelProviders()) const allModels = Object.keys(getBaseModelProviders())
return allModels.filter( return allModels.filter(
(model) => supportsTemperature(model) && getMaxTemperature(model) === 1 (model) =>
supportsTemperature(model) &&
getMaxTemperature(model) === 1 &&
!deepResearch.has(model.toLowerCase())
) )
})(), })(),
}), }),
@@ -495,9 +519,13 @@ Return ONLY the JSON array.`,
condition: () => ({ condition: () => ({
field: 'model', field: 'model',
value: (() => { value: (() => {
const deepResearch = new Set(MODELS_WITH_DEEP_RESEARCH.map((m) => m.toLowerCase()))
const allModels = Object.keys(getBaseModelProviders()) const allModels = Object.keys(getBaseModelProviders())
return allModels.filter( return allModels.filter(
(model) => supportsTemperature(model) && getMaxTemperature(model) === 2 (model) =>
supportsTemperature(model) &&
getMaxTemperature(model) === 2 &&
!deepResearch.has(model.toLowerCase())
) )
})(), })(),
}), }),
@@ -508,6 +536,11 @@ Return ONLY the JSON array.`,
type: 'short-input', type: 'short-input',
placeholder: 'Enter max tokens (e.g., 4096)...', placeholder: 'Enter max tokens (e.g., 4096)...',
mode: 'advanced', mode: 'advanced',
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
not: true,
},
}, },
{ {
id: 'responseFormat', id: 'responseFormat',
@@ -515,6 +548,11 @@ Return ONLY the JSON array.`,
type: 'code', type: 'code',
placeholder: 'Enter JSON schema...', placeholder: 'Enter JSON schema...',
language: 'json', language: 'json',
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
not: true,
},
wandConfig: { wandConfig: {
enabled: true, enabled: true,
maintainHistory: true, maintainHistory: true,
@@ -607,6 +645,16 @@ Example 3 (Array Input):
generationType: 'json-schema', generationType: 'json-schema',
}, },
}, },
{
id: 'previousInteractionId',
title: 'Previous Interaction ID',
type: 'short-input',
placeholder: 'e.g., {{agent_1.interactionId}}',
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
},
},
], ],
tools: { tools: {
access: [ access: [
@@ -770,5 +818,13 @@ Example 3 (Array Input):
description: 'Provider timing information', description: 'Provider timing information',
}, },
cost: { type: 'json', description: 'Cost of the API call' }, cost: { type: 'json', description: 'Cost of the API call' },
interactionId: {
type: 'string',
description: 'Interaction ID for multi-turn deep research follow-ups',
condition: {
field: 'model',
value: MODELS_WITH_DEEP_RESEARCH,
},
},
}, },
} }

View File

@@ -394,6 +394,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Page Property Operations // Page Property Operations
{ label: 'List Page Properties', id: 'list_page_properties' }, { label: 'List Page Properties', id: 'list_page_properties' },
{ label: 'Create Page Property', id: 'create_page_property' }, { label: 'Create Page Property', id: 'create_page_property' },
{ label: 'Delete Page Property', id: 'delete_page_property' },
// Search Operations // Search Operations
{ label: 'Search Content', id: 'search' }, { label: 'Search Content', id: 'search' },
{ label: 'Search in Space', id: 'search_in_space' }, { label: 'Search in Space', id: 'search_in_space' },
@@ -414,6 +415,9 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Label Operations // Label Operations
{ label: 'List Labels', id: 'list_labels' }, { label: 'List Labels', id: 'list_labels' },
{ label: 'Add Label', id: 'add_label' }, { label: 'Add Label', id: 'add_label' },
{ label: 'Delete Label', id: 'delete_label' },
{ label: 'Get Pages by Label', id: 'get_pages_by_label' },
{ label: 'List Space Labels', id: 'list_space_labels' },
// Space Operations // Space Operations
{ label: 'Get Space', id: 'get_space' }, { label: 'Get Space', id: 'get_space' },
{ label: 'List Spaces', id: 'list_spaces' }, { label: 'List Spaces', id: 'list_spaces' },
@@ -485,6 +489,8 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'search_in_space', 'search_in_space',
'get_space', 'get_space',
'list_spaces', 'list_spaces',
'get_pages_by_label',
'list_space_labels',
], ],
not: true, not: true,
}, },
@@ -500,6 +506,8 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'list_labels', 'list_labels',
'upload_attachment', 'upload_attachment',
'add_label', 'add_label',
'delete_label',
'delete_page_property',
'get_page_children', 'get_page_children',
'get_page_ancestors', 'get_page_ancestors',
'list_page_versions', 'list_page_versions',
@@ -527,6 +535,8 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'search_in_space', 'search_in_space',
'get_space', 'get_space',
'list_spaces', 'list_spaces',
'get_pages_by_label',
'list_space_labels',
], ],
not: true, not: true,
}, },
@@ -542,6 +552,8 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'list_labels', 'list_labels',
'upload_attachment', 'upload_attachment',
'add_label', 'add_label',
'delete_label',
'delete_page_property',
'get_page_children', 'get_page_children',
'get_page_ancestors', 'get_page_ancestors',
'list_page_versions', 'list_page_versions',
@@ -566,6 +578,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'search_in_space', 'search_in_space',
'create_blogpost', 'create_blogpost',
'list_blogposts_in_space', 'list_blogposts_in_space',
'list_space_labels',
], ],
}, },
}, },
@@ -601,6 +614,14 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
required: true, required: true,
condition: { field: 'operation', value: 'create_page_property' }, condition: { field: 'operation', value: 'create_page_property' },
}, },
{
id: 'propertyId',
title: 'Property ID',
type: 'short-input',
placeholder: 'Enter property ID to delete',
required: true,
condition: { field: 'operation', value: 'delete_page_property' },
},
{ {
id: 'title', id: 'title',
title: 'Title', title: 'Title',
@@ -694,7 +715,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
type: 'short-input', type: 'short-input',
placeholder: 'Enter label name', placeholder: 'Enter label name',
required: true, required: true,
condition: { field: 'operation', value: 'add_label' }, condition: { field: 'operation', value: ['add_label', 'delete_label'] },
}, },
{ {
id: 'labelPrefix', id: 'labelPrefix',
@@ -709,6 +730,14 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
value: () => 'global', value: () => 'global',
condition: { field: 'operation', value: 'add_label' }, condition: { field: 'operation', value: 'add_label' },
}, },
{
id: 'labelId',
title: 'Label ID',
type: 'short-input',
placeholder: 'Enter label ID',
required: true,
condition: { field: 'operation', value: 'get_pages_by_label' },
},
{ {
id: 'blogPostStatus', id: 'blogPostStatus',
title: 'Status', title: 'Status',
@@ -759,6 +788,8 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'list_page_versions', 'list_page_versions',
'list_page_properties', 'list_page_properties',
'list_labels', 'list_labels',
'get_pages_by_label',
'list_space_labels',
], ],
}, },
}, },
@@ -780,6 +811,8 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
'list_page_versions', 'list_page_versions',
'list_page_properties', 'list_page_properties',
'list_labels', 'list_labels',
'get_pages_by_label',
'list_space_labels',
], ],
}, },
}, },
@@ -800,6 +833,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Property Tools // Property Tools
'confluence_list_page_properties', 'confluence_list_page_properties',
'confluence_create_page_property', 'confluence_create_page_property',
'confluence_delete_page_property',
// Search Tools // Search Tools
'confluence_search', 'confluence_search',
'confluence_search_in_space', 'confluence_search_in_space',
@@ -820,6 +854,9 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Label Tools // Label Tools
'confluence_list_labels', 'confluence_list_labels',
'confluence_add_label', 'confluence_add_label',
'confluence_delete_label',
'confluence_get_pages_by_label',
'confluence_list_space_labels',
// Space Tools // Space Tools
'confluence_get_space', 'confluence_get_space',
'confluence_list_spaces', 'confluence_list_spaces',
@@ -852,6 +889,8 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
return 'confluence_list_page_properties' return 'confluence_list_page_properties'
case 'create_page_property': case 'create_page_property':
return 'confluence_create_page_property' return 'confluence_create_page_property'
case 'delete_page_property':
return 'confluence_delete_page_property'
// Search Operations // Search Operations
case 'search': case 'search':
return 'confluence_search' return 'confluence_search'
@@ -887,6 +926,12 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
return 'confluence_list_labels' return 'confluence_list_labels'
case 'add_label': case 'add_label':
return 'confluence_add_label' return 'confluence_add_label'
case 'delete_label':
return 'confluence_delete_label'
case 'get_pages_by_label':
return 'confluence_get_pages_by_label'
case 'list_space_labels':
return 'confluence_list_space_labels'
// Space Operations // Space Operations
case 'get_space': case 'get_space':
return 'confluence_get_space' return 'confluence_get_space'
@@ -908,7 +953,9 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
versionNumber, versionNumber,
propertyKey, propertyKey,
propertyValue, propertyValue,
propertyId,
labelPrefix, labelPrefix,
labelId,
blogPostStatus, blogPostStatus,
purge, purge,
bodyFormat, bodyFormat,
@@ -959,7 +1006,9 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
} }
} }
// Operations that support cursor pagination // Operations that support generic cursor pagination.
// get_pages_by_label and list_space_labels have dedicated handlers
// below that pass cursor along with their required params (labelId, spaceId).
const supportsCursor = [ const supportsCursor = [
'list_attachments', 'list_attachments',
'list_spaces', 'list_spaces',
@@ -996,6 +1045,35 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
} }
} }
if (operation === 'delete_page_property') {
return {
credential,
pageId: effectivePageId,
operation,
propertyId,
...rest,
}
}
if (operation === 'get_pages_by_label') {
return {
credential,
operation,
labelId,
cursor: cursor || undefined,
...rest,
}
}
if (operation === 'list_space_labels') {
return {
credential,
operation,
cursor: cursor || undefined,
...rest,
}
}
if (operation === 'upload_attachment') { if (operation === 'upload_attachment') {
const normalizedFile = normalizeFileInput(attachmentFile, { single: true }) const normalizedFile = normalizeFileInput(attachmentFile, { single: true })
if (!normalizedFile) { if (!normalizedFile) {
@@ -1044,7 +1122,9 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
attachmentFileName: { type: 'string', description: 'Custom file name for attachment' }, attachmentFileName: { type: 'string', description: 'Custom file name for attachment' },
attachmentComment: { type: 'string', description: 'Comment for the attachment' }, attachmentComment: { type: 'string', description: 'Comment for the attachment' },
labelName: { type: 'string', description: 'Label name' }, labelName: { type: 'string', description: 'Label name' },
labelId: { type: 'string', description: 'Label identifier' },
labelPrefix: { type: 'string', description: 'Label prefix (global, my, team, system)' }, labelPrefix: { type: 'string', description: 'Label prefix (global, my, team, system)' },
propertyId: { type: 'string', description: 'Property identifier' },
blogPostStatus: { type: 'string', description: 'Blog post status (current or draft)' }, blogPostStatus: { type: 'string', description: 'Blog post status (current or draft)' },
purge: { type: 'boolean', description: 'Permanently delete instead of moving to trash' }, purge: { type: 'boolean', description: 'Permanently delete instead of moving to trash' },
bodyFormat: { type: 'string', description: 'Body format for comments' }, bodyFormat: { type: 'string', description: 'Body format for comments' },
@@ -1080,6 +1160,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
// Label Results // Label Results
labels: { type: 'array', description: 'List of labels' }, labels: { type: 'array', description: 'List of labels' },
labelName: { type: 'string', description: 'Label name' }, labelName: { type: 'string', description: 'Label name' },
labelId: { type: 'string', description: 'Label identifier' },
// Space Results // Space Results
spaces: { type: 'array', description: 'List of spaces' }, spaces: { type: 'array', description: 'List of spaces' },
spaceId: { type: 'string', description: 'Space identifier' }, spaceId: { type: 'string', description: 'Space identifier' },

View File

@@ -2,8 +2,8 @@
slug: enterprise slug: enterprise
title: 'Build with Sim for Enterprise' title: 'Build with Sim for Enterprise'
description: 'Access control, BYOK, self-hosted deployments, on-prem Copilot, SSO & SAML, whitelabeling, Admin API, and flexible data retention—enterprise features for teams with strict security and compliance requirements.' description: 'Access control, BYOK, self-hosted deployments, on-prem Copilot, SSO & SAML, whitelabeling, Admin API, and flexible data retention—enterprise features for teams with strict security and compliance requirements.'
date: 2026-01-23 date: 2026-02-11
updated: 2026-01-23 updated: 2026-02-11
authors: authors:
- vik - vik
readingTime: 10 readingTime: 10
@@ -13,8 +13,8 @@ ogAlt: 'Sim Enterprise features overview'
about: ['Enterprise Software', 'Security', 'Compliance', 'Self-Hosting'] about: ['Enterprise Software', 'Security', 'Compliance', 'Self-Hosting']
timeRequired: PT10M timeRequired: PT10M
canonical: https://sim.ai/studio/enterprise canonical: https://sim.ai/studio/enterprise
featured: false featured: true
draft: true draft: false
--- ---
We've been working with security teams at larger organizations to bring Sim into environments with strict compliance and data handling requirements. This post covers the enterprise capabilities we've built: granular access control, bring-your-own-keys, self-hosted deployments, on-prem Copilot, SSO & SAML, whitelabeling, compliance, and programmatic management via the Admin API. We've been working with security teams at larger organizations to bring Sim into environments with strict compliance and data handling requirements. This post covers the enterprise capabilities we've built: granular access control, bring-your-own-keys, self-hosted deployments, on-prem Copilot, SSO & SAML, whitelabeling, compliance, and programmatic management via the Admin API.

View File

@@ -999,6 +999,7 @@ export class AgentBlockHandler implements BlockHandler {
reasoningEffort: inputs.reasoningEffort, reasoningEffort: inputs.reasoningEffort,
verbosity: inputs.verbosity, verbosity: inputs.verbosity,
thinkingLevel: inputs.thinkingLevel, thinkingLevel: inputs.thinkingLevel,
previousInteractionId: inputs.previousInteractionId,
} }
} }
@@ -1069,6 +1070,7 @@ export class AgentBlockHandler implements BlockHandler {
reasoningEffort: providerRequest.reasoningEffort, reasoningEffort: providerRequest.reasoningEffort,
verbosity: providerRequest.verbosity, verbosity: providerRequest.verbosity,
thinkingLevel: providerRequest.thinkingLevel, thinkingLevel: providerRequest.thinkingLevel,
previousInteractionId: providerRequest.previousInteractionId,
}) })
return this.processProviderResponse(response, block, responseFormat) return this.processProviderResponse(response, block, responseFormat)
@@ -1269,6 +1271,7 @@ export class AgentBlockHandler implements BlockHandler {
content: result.content, content: result.content,
model: result.model, model: result.model,
...this.createResponseMetadata(result), ...this.createResponseMetadata(result),
...(result.interactionId && { interactionId: result.interactionId }),
} }
} }

View File

@@ -20,6 +20,8 @@ export interface AgentInputs {
conversationId?: string // Required for all non-none memory types conversationId?: string // Required for all non-none memory types
slidingWindowSize?: string // For message-based sliding window slidingWindowSize?: string // For message-based sliding window
slidingWindowTokens?: string // For token-based sliding window slidingWindowTokens?: string // For token-based sliding window
// Deep research multi-turn
previousInteractionId?: string // Interactions API previous interaction reference
// LLM parameters // LLM parameters
temperature?: string temperature?: string
maxTokens?: string maxTokens?: string

View File

@@ -423,7 +423,7 @@ interface GenerateVersionDescriptionVariables {
const VERSION_DESCRIPTION_SYSTEM_PROMPT = `You are writing deployment version descriptions for a workflow automation platform. const VERSION_DESCRIPTION_SYSTEM_PROMPT = `You are writing deployment version descriptions for a workflow automation platform.
Write a brief, factual description (1-3 sentences, under 400 characters) that states what changed between versions. Write a brief, factual description (1-3 sentences, under 2000 characters) that states what changed between versions.
Guidelines: Guidelines:
- Use the specific values provided (credential names, channel names, model names) - Use the specific values provided (credential names, channel names, model names)

View File

@@ -1,4 +1,4 @@
import { useCallback, useRef } from 'react' import { useCallback } from 'react'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import type { import type {
BlockCompletedData, BlockCompletedData,
@@ -16,6 +16,18 @@ import type { SerializableExecutionState } from '@/executor/execution/types'
const logger = createLogger('useExecutionStream') const logger = createLogger('useExecutionStream')
/**
* Detects errors caused by the browser killing a fetch (page refresh, navigation, tab close).
* These should be treated as clean disconnects, not execution errors.
*/
function isClientDisconnectError(error: any): boolean {
if (error.name === 'AbortError') return true
const msg = (error.message ?? '').toLowerCase()
return (
msg.includes('network error') || msg.includes('failed to fetch') || msg.includes('load failed')
)
}
/** /**
* Processes SSE events from a response body and invokes appropriate callbacks. * Processes SSE events from a response body and invokes appropriate callbacks.
*/ */
@@ -121,6 +133,7 @@ export interface ExecuteStreamOptions {
parallels?: Record<string, any> parallels?: Record<string, any>
} }
stopAfterBlockId?: string stopAfterBlockId?: string
onExecutionId?: (executionId: string) => void
callbacks?: ExecutionStreamCallbacks callbacks?: ExecutionStreamCallbacks
} }
@@ -129,30 +142,40 @@ export interface ExecuteFromBlockOptions {
startBlockId: string startBlockId: string
sourceSnapshot: SerializableExecutionState sourceSnapshot: SerializableExecutionState
input?: any input?: any
onExecutionId?: (executionId: string) => void
callbacks?: ExecutionStreamCallbacks callbacks?: ExecutionStreamCallbacks
} }
export interface ReconnectStreamOptions {
workflowId: string
executionId: string
fromEventId?: number
callbacks?: ExecutionStreamCallbacks
}
/**
* Module-level map shared across all hook instances.
* Ensures ANY instance can cancel streams started by ANY other instance,
* which is critical for SPA navigation where the original hook instance unmounts
* but the SSE stream must be cancellable from the new instance.
*/
const sharedAbortControllers = new Map<string, AbortController>()
/** /**
* Hook for executing workflows via server-side SSE streaming. * Hook for executing workflows via server-side SSE streaming.
* Supports concurrent executions via per-workflow AbortController maps. * Supports concurrent executions via per-workflow AbortController maps.
*/ */
export function useExecutionStream() { export function useExecutionStream() {
const abortControllersRef = useRef<Map<string, AbortController>>(new Map())
const currentExecutionsRef = useRef<Map<string, { workflowId: string; executionId: string }>>(
new Map()
)
const execute = useCallback(async (options: ExecuteStreamOptions) => { const execute = useCallback(async (options: ExecuteStreamOptions) => {
const { workflowId, callbacks = {}, ...payload } = options const { workflowId, callbacks = {}, onExecutionId, ...payload } = options
const existing = abortControllersRef.current.get(workflowId) const existing = sharedAbortControllers.get(workflowId)
if (existing) { if (existing) {
existing.abort() existing.abort()
} }
const abortController = new AbortController() const abortController = new AbortController()
abortControllersRef.current.set(workflowId, abortController) sharedAbortControllers.set(workflowId, abortController)
currentExecutionsRef.current.delete(workflowId)
try { try {
const response = await fetch(`/api/workflows/${workflowId}/execute`, { const response = await fetch(`/api/workflows/${workflowId}/execute`, {
@@ -177,42 +200,48 @@ export function useExecutionStream() {
throw new Error('No response body') throw new Error('No response body')
} }
const executionId = response.headers.get('X-Execution-Id') const serverExecutionId = response.headers.get('X-Execution-Id')
if (executionId) { if (serverExecutionId) {
currentExecutionsRef.current.set(workflowId, { workflowId, executionId }) onExecutionId?.(serverExecutionId)
} }
const reader = response.body.getReader() const reader = response.body.getReader()
await processSSEStream(reader, callbacks, 'Execution') await processSSEStream(reader, callbacks, 'Execution')
} catch (error: any) { } catch (error: any) {
if (error.name === 'AbortError') { if (isClientDisconnectError(error)) {
logger.info('Execution stream cancelled') logger.info('Execution stream disconnected (page unload or abort)')
callbacks.onExecutionCancelled?.({ duration: 0 }) return
} else {
logger.error('Execution stream error:', error)
callbacks.onExecutionError?.({
error: error.message || 'Unknown error',
duration: 0,
})
} }
logger.error('Execution stream error:', error)
callbacks.onExecutionError?.({
error: error.message || 'Unknown error',
duration: 0,
})
throw error throw error
} finally { } finally {
abortControllersRef.current.delete(workflowId) if (sharedAbortControllers.get(workflowId) === abortController) {
currentExecutionsRef.current.delete(workflowId) sharedAbortControllers.delete(workflowId)
}
} }
}, []) }, [])
const executeFromBlock = useCallback(async (options: ExecuteFromBlockOptions) => { const executeFromBlock = useCallback(async (options: ExecuteFromBlockOptions) => {
const { workflowId, startBlockId, sourceSnapshot, input, callbacks = {} } = options const {
workflowId,
startBlockId,
sourceSnapshot,
input,
onExecutionId,
callbacks = {},
} = options
const existing = abortControllersRef.current.get(workflowId) const existing = sharedAbortControllers.get(workflowId)
if (existing) { if (existing) {
existing.abort() existing.abort()
} }
const abortController = new AbortController() const abortController = new AbortController()
abortControllersRef.current.set(workflowId, abortController) sharedAbortControllers.set(workflowId, abortController)
currentExecutionsRef.current.delete(workflowId)
try { try {
const response = await fetch(`/api/workflows/${workflowId}/execute`, { const response = await fetch(`/api/workflows/${workflowId}/execute`, {
@@ -246,64 +275,80 @@ export function useExecutionStream() {
throw new Error('No response body') throw new Error('No response body')
} }
const executionId = response.headers.get('X-Execution-Id') const serverExecutionId = response.headers.get('X-Execution-Id')
if (executionId) { if (serverExecutionId) {
currentExecutionsRef.current.set(workflowId, { workflowId, executionId }) onExecutionId?.(serverExecutionId)
} }
const reader = response.body.getReader() const reader = response.body.getReader()
await processSSEStream(reader, callbacks, 'Run-from-block') await processSSEStream(reader, callbacks, 'Run-from-block')
} catch (error: any) { } catch (error: any) {
if (error.name === 'AbortError') { if (isClientDisconnectError(error)) {
logger.info('Run-from-block execution cancelled') logger.info('Run-from-block stream disconnected (page unload or abort)')
callbacks.onExecutionCancelled?.({ duration: 0 }) return
} else {
logger.error('Run-from-block execution error:', error)
callbacks.onExecutionError?.({
error: error.message || 'Unknown error',
duration: 0,
})
} }
logger.error('Run-from-block execution error:', error)
callbacks.onExecutionError?.({
error: error.message || 'Unknown error',
duration: 0,
})
throw error throw error
} finally { } finally {
abortControllersRef.current.delete(workflowId) if (sharedAbortControllers.get(workflowId) === abortController) {
currentExecutionsRef.current.delete(workflowId) sharedAbortControllers.delete(workflowId)
}
}
}, [])
const reconnect = useCallback(async (options: ReconnectStreamOptions) => {
const { workflowId, executionId, fromEventId = 0, callbacks = {} } = options
const existing = sharedAbortControllers.get(workflowId)
if (existing) {
existing.abort()
}
const abortController = new AbortController()
sharedAbortControllers.set(workflowId, abortController)
try {
const response = await fetch(
`/api/workflows/${workflowId}/executions/${executionId}/stream?from=${fromEventId}`,
{ signal: abortController.signal }
)
if (!response.ok) throw new Error(`Reconnect failed (${response.status})`)
if (!response.body) throw new Error('No response body')
await processSSEStream(response.body.getReader(), callbacks, 'Reconnect')
} catch (error: any) {
if (isClientDisconnectError(error)) return
logger.error('Reconnection stream error:', error)
throw error
} finally {
if (sharedAbortControllers.get(workflowId) === abortController) {
sharedAbortControllers.delete(workflowId)
}
} }
}, []) }, [])
const cancel = useCallback((workflowId?: string) => { const cancel = useCallback((workflowId?: string) => {
if (workflowId) { if (workflowId) {
const execution = currentExecutionsRef.current.get(workflowId) const controller = sharedAbortControllers.get(workflowId)
if (execution) {
fetch(`/api/workflows/${execution.workflowId}/executions/${execution.executionId}/cancel`, {
method: 'POST',
}).catch(() => {})
}
const controller = abortControllersRef.current.get(workflowId)
if (controller) { if (controller) {
controller.abort() controller.abort()
abortControllersRef.current.delete(workflowId) sharedAbortControllers.delete(workflowId)
} }
currentExecutionsRef.current.delete(workflowId)
} else { } else {
for (const [, execution] of currentExecutionsRef.current) { for (const [, controller] of sharedAbortControllers) {
fetch(`/api/workflows/${execution.workflowId}/executions/${execution.executionId}/cancel`, {
method: 'POST',
}).catch(() => {})
}
for (const [, controller] of abortControllersRef.current) {
controller.abort() controller.abort()
} }
abortControllersRef.current.clear() sharedAbortControllers.clear()
currentExecutionsRef.current.clear()
} }
}, []) }, [])
return { return {
execute, execute,
executeFromBlock, executeFromBlock,
reconnect,
cancel, cancel,
} }
} }

View File

@@ -0,0 +1,46 @@
'use client'
import { useEffect, useRef } from 'react'
import { createLogger } from '@sim/logger'
const logger = createLogger('ReferralAttribution')
const COOKIE_NAME = 'sim_utm'
const TERMINAL_REASONS = new Set([
'account_predates_cookie',
'invalid_cookie',
'no_utm_cookie',
'no_matching_campaign',
])
/**
* Fires a one-shot `POST /api/attribution` when a `sim_utm` cookie is present.
* Retries on transient failures; stops on terminal outcomes.
*/
export function useReferralAttribution() {
const calledRef = useRef(false)
useEffect(() => {
if (calledRef.current) return
if (!document.cookie.includes(COOKIE_NAME)) return
calledRef.current = true
fetch('/api/attribution', { method: 'POST' })
.then((res) => res.json())
.then((data) => {
if (data.attributed) {
logger.info('Referral attribution successful', { bonusAmount: data.bonusAmount })
} else if (data.error || TERMINAL_REASONS.has(data.reason)) {
logger.info('Referral attribution skipped', { reason: data.reason || data.error })
} else {
calledRef.current = false
}
})
.catch((err) => {
logger.warn('Referral attribution failed, will retry', { error: err })
calledRef.current = false
})
}, [])
}

View File

@@ -0,0 +1,64 @@
import { db } from '@sim/db'
import { organization, userStats } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq, sql } from 'drizzle-orm'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import type { DbOrTx } from '@/lib/db/types'
const logger = createLogger('BonusCredits')
/**
* Apply bonus credits to a user (e.g. referral bonuses, promotional codes).
*
* Detects the user's current plan and routes credits accordingly:
* - Free/Pro: adds to `userStats.creditBalance` and increments `currentUsageLimit`
* - Team/Enterprise: adds to `organization.creditBalance` and increments `orgUsageLimit`
*
* Uses direct increment (not recalculation) so it works correctly for free-tier
* users where `setUsageLimitForCredits` would compute planBase=0 and skip the update.
*
* @param tx - Optional Drizzle transaction context. When provided, all DB writes
* participate in the caller's transaction for atomicity.
*/
export async function applyBonusCredits(
userId: string,
amount: number,
tx?: DbOrTx
): Promise<void> {
const dbCtx = tx ?? db
const subscription = await getHighestPrioritySubscription(userId)
const isTeamOrEnterprise = subscription?.plan === 'team' || subscription?.plan === 'enterprise'
if (isTeamOrEnterprise && subscription?.referenceId) {
const orgId = subscription.referenceId
await dbCtx
.update(organization)
.set({
creditBalance: sql`${organization.creditBalance} + ${amount}`,
orgUsageLimit: sql`COALESCE(${organization.orgUsageLimit}, '0')::decimal + ${amount}`,
})
.where(eq(organization.id, orgId))
logger.info('Applied bonus credits to organization', {
userId,
organizationId: orgId,
plan: subscription.plan,
amount,
})
} else {
await dbCtx
.update(userStats)
.set({
creditBalance: sql`${userStats.creditBalance} + ${amount}`,
currentUsageLimit: sql`COALESCE(${userStats.currentUsageLimit}, '0')::decimal + ${amount}`,
})
.where(eq(userStats.userId, userId))
logger.info('Applied bonus credits to user', {
userId,
plan: subscription?.plan || 'free',
amount,
})
}
}

View File

@@ -20,6 +20,8 @@ export interface BuildPayloadParams {
fileAttachments?: Array<{ id: string; key: string; size: number; [key: string]: unknown }> fileAttachments?: Array<{ id: string; key: string; size: number; [key: string]: unknown }>
commands?: string[] commands?: string[]
chatId?: string chatId?: string
conversationId?: string
prefetch?: boolean
implicitFeedback?: string implicitFeedback?: string
} }
@@ -64,6 +66,10 @@ export async function buildCopilotRequestPayload(
fileAttachments, fileAttachments,
commands, commands,
chatId, chatId,
conversationId,
prefetch,
conversationHistory,
implicitFeedback,
} = params } = params
const selectedModel = options.selectedModel const selectedModel = options.selectedModel
@@ -154,6 +160,12 @@ export async function buildCopilotRequestPayload(
version: SIM_AGENT_VERSION, version: SIM_AGENT_VERSION,
...(contexts && contexts.length > 0 ? { context: contexts } : {}), ...(contexts && contexts.length > 0 ? { context: contexts } : {}),
...(chatId ? { chatId } : {}), ...(chatId ? { chatId } : {}),
...(conversationId ? { conversationId } : {}),
...(Array.isArray(conversationHistory) && conversationHistory.length > 0
? { conversationHistory }
: {}),
...(typeof prefetch === 'boolean' ? { prefetch } : {}),
...(implicitFeedback ? { implicitFeedback } : {}),
...(processedFileContents.length > 0 ? { fileAttachments: processedFileContents } : {}), ...(processedFileContents.length > 0 ? { fileAttachments: processedFileContents } : {}),
...(integrationTools.length > 0 ? { integrationTools } : {}), ...(integrationTools.length > 0 ? { integrationTools } : {}),
...(credentials ? { credentials } : {}), ...(credentials ? { credentials } : {}),

View File

@@ -1,7 +1,7 @@
import { db } from '@sim/db' import { db } from '@sim/db'
import { workflow } from '@sim/db/schema' import { customTools, workflow } from '@sim/db/schema'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm' import { and, desc, eq, isNull, or } from 'drizzle-orm'
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants' import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
import type { import type {
ExecutionContext, ExecutionContext,
@@ -12,6 +12,7 @@ import { routeExecution } from '@/lib/copilot/tools/server/router'
import { env } from '@/lib/core/config/env' import { env } from '@/lib/core/config/env'
import { getBaseUrl } from '@/lib/core/utils/urls' import { getBaseUrl } from '@/lib/core/utils/urls'
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
import { upsertCustomTools } from '@/lib/workflows/custom-tools/operations'
import { getTool, resolveToolId } from '@/tools/utils' import { getTool, resolveToolId } from '@/tools/utils'
import { import {
executeCheckDeploymentStatus, executeCheckDeploymentStatus,
@@ -76,6 +77,247 @@ import {
const logger = createLogger('CopilotToolExecutor') const logger = createLogger('CopilotToolExecutor')
type ManageCustomToolOperation = 'add' | 'edit' | 'delete' | 'list'
interface ManageCustomToolSchema {
type: 'function'
function: {
name: string
description?: string
parameters: Record<string, unknown>
}
}
interface ManageCustomToolParams {
operation?: string
toolId?: string
schema?: ManageCustomToolSchema
code?: string
title?: string
workspaceId?: string
}
async function executeManageCustomTool(
rawParams: Record<string, unknown>,
context: ExecutionContext
): Promise<ToolCallResult> {
const params = rawParams as ManageCustomToolParams
const operation = String(params.operation || '').toLowerCase() as ManageCustomToolOperation
const workspaceId = params.workspaceId || context.workspaceId
if (!operation) {
return { success: false, error: "Missing required 'operation' argument" }
}
try {
if (operation === 'list') {
const toolsForUser = workspaceId
? await db
.select()
.from(customTools)
.where(
or(
eq(customTools.workspaceId, workspaceId),
and(isNull(customTools.workspaceId), eq(customTools.userId, context.userId))
)
)
.orderBy(desc(customTools.createdAt))
: await db
.select()
.from(customTools)
.where(and(isNull(customTools.workspaceId), eq(customTools.userId, context.userId)))
.orderBy(desc(customTools.createdAt))
return {
success: true,
output: {
success: true,
operation,
tools: toolsForUser,
count: toolsForUser.length,
},
}
}
if (operation === 'add') {
if (!workspaceId) {
return {
success: false,
error: "workspaceId is required for operation 'add'",
}
}
if (!params.schema || !params.code) {
return {
success: false,
error: "Both 'schema' and 'code' are required for operation 'add'",
}
}
const title = params.title || params.schema.function?.name
if (!title) {
return { success: false, error: "Missing tool title or schema.function.name for 'add'" }
}
const resultTools = await upsertCustomTools({
tools: [
{
title,
schema: params.schema,
code: params.code,
},
],
workspaceId,
userId: context.userId,
})
const created = resultTools.find((tool) => tool.title === title)
return {
success: true,
output: {
success: true,
operation,
toolId: created?.id,
title,
message: `Created custom tool "${title}"`,
},
}
}
if (operation === 'edit') {
if (!workspaceId) {
return {
success: false,
error: "workspaceId is required for operation 'edit'",
}
}
if (!params.toolId) {
return { success: false, error: "'toolId' is required for operation 'edit'" }
}
if (!params.schema && !params.code) {
return {
success: false,
error: "At least one of 'schema' or 'code' is required for operation 'edit'",
}
}
const workspaceTool = await db
.select()
.from(customTools)
.where(and(eq(customTools.id, params.toolId), eq(customTools.workspaceId, workspaceId)))
.limit(1)
const legacyTool =
workspaceTool.length === 0
? await db
.select()
.from(customTools)
.where(
and(
eq(customTools.id, params.toolId),
isNull(customTools.workspaceId),
eq(customTools.userId, context.userId)
)
)
.limit(1)
: []
const existing = workspaceTool[0] || legacyTool[0]
if (!existing) {
return { success: false, error: `Custom tool not found: ${params.toolId}` }
}
const mergedSchema = params.schema || (existing.schema as ManageCustomToolSchema)
const mergedCode = params.code || existing.code
const title = params.title || mergedSchema.function?.name || existing.title
await upsertCustomTools({
tools: [
{
id: params.toolId,
title,
schema: mergedSchema,
code: mergedCode,
},
],
workspaceId,
userId: context.userId,
})
return {
success: true,
output: {
success: true,
operation,
toolId: params.toolId,
title,
message: `Updated custom tool "${title}"`,
},
}
}
if (operation === 'delete') {
if (!params.toolId) {
return { success: false, error: "'toolId' is required for operation 'delete'" }
}
const workspaceDelete =
workspaceId != null
? await db
.delete(customTools)
.where(
and(eq(customTools.id, params.toolId), eq(customTools.workspaceId, workspaceId))
)
.returning({ id: customTools.id })
: []
const legacyDelete =
workspaceDelete.length === 0
? await db
.delete(customTools)
.where(
and(
eq(customTools.id, params.toolId),
isNull(customTools.workspaceId),
eq(customTools.userId, context.userId)
)
)
.returning({ id: customTools.id })
: []
const deleted = workspaceDelete[0] || legacyDelete[0]
if (!deleted) {
return { success: false, error: `Custom tool not found: ${params.toolId}` }
}
return {
success: true,
output: {
success: true,
operation,
toolId: params.toolId,
message: 'Deleted custom tool',
},
}
}
return {
success: false,
error: `Unsupported operation for manage_custom_tool: ${operation}`,
}
} catch (error) {
logger.error('manage_custom_tool execution failed', {
operation,
workspaceId,
userId: context.userId,
error: error instanceof Error ? error.message : String(error),
})
return {
success: false,
error: error instanceof Error ? error.message : 'Failed to manage custom tool',
}
}
}
const SERVER_TOOLS = new Set<string>([ const SERVER_TOOLS = new Set<string>([
'get_blocks_and_tools', 'get_blocks_and_tools',
'get_blocks_metadata', 'get_blocks_metadata',
@@ -161,6 +403,19 @@ const SIM_WORKFLOW_TOOL_HANDLERS: Record<
} }
} }
}, },
oauth_request_access: async (p, _c) => {
const providerName = (p.providerName || p.provider_name || 'the provider') as string
return {
success: true,
output: {
success: true,
status: 'requested',
providerName,
message: `Requested ${providerName} OAuth connection. The user should complete the OAuth modal in the UI, then retry credential-dependent actions.`,
},
}
},
manage_custom_tool: (p, c) => executeManageCustomTool(p, c),
} }
/** /**

View File

@@ -0,0 +1,246 @@
import { createLogger } from '@sim/logger'
import { getRedisClient } from '@/lib/core/config/redis'
import type { ExecutionEvent } from '@/lib/workflows/executor/execution-events'
const logger = createLogger('ExecutionEventBuffer')
const REDIS_PREFIX = 'execution:stream:'
const TTL_SECONDS = 60 * 60 // 1 hour
const EVENT_LIMIT = 1000
const RESERVE_BATCH = 100
const FLUSH_INTERVAL_MS = 15
const FLUSH_MAX_BATCH = 200
function getEventsKey(executionId: string) {
return `${REDIS_PREFIX}${executionId}:events`
}
function getSeqKey(executionId: string) {
return `${REDIS_PREFIX}${executionId}:seq`
}
function getMetaKey(executionId: string) {
return `${REDIS_PREFIX}${executionId}:meta`
}
export type ExecutionStreamStatus = 'active' | 'complete' | 'error' | 'cancelled'
export interface ExecutionStreamMeta {
status: ExecutionStreamStatus
userId?: string
workflowId?: string
updatedAt?: string
}
export interface ExecutionEventEntry {
eventId: number
executionId: string
event: ExecutionEvent
}
export interface ExecutionEventWriter {
write: (event: ExecutionEvent) => Promise<ExecutionEventEntry>
flush: () => Promise<void>
close: () => Promise<void>
}
export async function setExecutionMeta(
executionId: string,
meta: Partial<ExecutionStreamMeta>
): Promise<void> {
const redis = getRedisClient()
if (!redis) {
logger.warn('setExecutionMeta: Redis client unavailable', { executionId })
return
}
try {
const key = getMetaKey(executionId)
const payload: Record<string, string> = {
updatedAt: new Date().toISOString(),
}
if (meta.status) payload.status = meta.status
if (meta.userId) payload.userId = meta.userId
if (meta.workflowId) payload.workflowId = meta.workflowId
await redis.hset(key, payload)
await redis.expire(key, TTL_SECONDS)
} catch (error) {
logger.warn('Failed to update execution meta', {
executionId,
error: error instanceof Error ? error.message : String(error),
})
}
}
export async function getExecutionMeta(executionId: string): Promise<ExecutionStreamMeta | null> {
const redis = getRedisClient()
if (!redis) {
logger.warn('getExecutionMeta: Redis client unavailable', { executionId })
return null
}
try {
const key = getMetaKey(executionId)
const meta = await redis.hgetall(key)
if (!meta || Object.keys(meta).length === 0) return null
return meta as unknown as ExecutionStreamMeta
} catch (error) {
logger.warn('Failed to read execution meta', {
executionId,
error: error instanceof Error ? error.message : String(error),
})
return null
}
}
export async function readExecutionEvents(
executionId: string,
afterEventId: number
): Promise<ExecutionEventEntry[]> {
const redis = getRedisClient()
if (!redis) return []
try {
const raw = await redis.zrangebyscore(getEventsKey(executionId), afterEventId + 1, '+inf')
return raw
.map((entry) => {
try {
return JSON.parse(entry) as ExecutionEventEntry
} catch {
return null
}
})
.filter((entry): entry is ExecutionEventEntry => Boolean(entry))
} catch (error) {
logger.warn('Failed to read execution events', {
executionId,
error: error instanceof Error ? error.message : String(error),
})
return []
}
}
export function createExecutionEventWriter(executionId: string): ExecutionEventWriter {
const redis = getRedisClient()
if (!redis) {
logger.warn(
'createExecutionEventWriter: Redis client unavailable, events will not be buffered',
{
executionId,
}
)
return {
write: async (event) => ({ eventId: 0, executionId, event }),
flush: async () => {},
close: async () => {},
}
}
let pending: ExecutionEventEntry[] = []
let nextEventId = 0
let maxReservedId = 0
let flushTimer: ReturnType<typeof setTimeout> | null = null
const scheduleFlush = () => {
if (flushTimer) return
flushTimer = setTimeout(() => {
flushTimer = null
void flush()
}, FLUSH_INTERVAL_MS)
}
const reserveIds = async (minCount: number) => {
const reserveCount = Math.max(RESERVE_BATCH, minCount)
const newMax = await redis.incrby(getSeqKey(executionId), reserveCount)
const startId = newMax - reserveCount + 1
if (nextEventId === 0 || nextEventId > maxReservedId) {
nextEventId = startId
maxReservedId = newMax
}
}
let flushPromise: Promise<void> | null = null
let closed = false
const inflightWrites = new Set<Promise<ExecutionEventEntry>>()
const doFlush = async () => {
if (pending.length === 0) return
const batch = pending
pending = []
try {
const key = getEventsKey(executionId)
const zaddArgs: (string | number)[] = []
for (const entry of batch) {
zaddArgs.push(entry.eventId, JSON.stringify(entry))
}
const pipeline = redis.pipeline()
pipeline.zadd(key, ...zaddArgs)
pipeline.expire(key, TTL_SECONDS)
pipeline.expire(getSeqKey(executionId), TTL_SECONDS)
pipeline.zremrangebyrank(key, 0, -EVENT_LIMIT - 1)
await pipeline.exec()
} catch (error) {
logger.warn('Failed to flush execution events', {
executionId,
batchSize: batch.length,
error: error instanceof Error ? error.message : String(error),
stack: error instanceof Error ? error.stack : undefined,
})
pending = batch.concat(pending)
}
}
const flush = async () => {
if (flushPromise) {
await flushPromise
return
}
flushPromise = doFlush()
try {
await flushPromise
} finally {
flushPromise = null
if (pending.length > 0) scheduleFlush()
}
}
const writeCore = async (event: ExecutionEvent): Promise<ExecutionEventEntry> => {
if (closed) return { eventId: 0, executionId, event }
if (nextEventId === 0 || nextEventId > maxReservedId) {
await reserveIds(1)
}
const eventId = nextEventId++
const entry: ExecutionEventEntry = { eventId, executionId, event }
pending.push(entry)
if (pending.length >= FLUSH_MAX_BATCH) {
await flush()
} else {
scheduleFlush()
}
return entry
}
const write = (event: ExecutionEvent): Promise<ExecutionEventEntry> => {
const p = writeCore(event)
inflightWrites.add(p)
const remove = () => inflightWrites.delete(p)
p.then(remove, remove)
return p
}
const close = async () => {
closed = true
if (flushTimer) {
clearTimeout(flushTimer)
flushTimer = null
}
if (inflightWrites.size > 0) {
await Promise.allSettled(inflightWrites)
}
if (flushPromise) {
await flushPromise
}
if (pending.length > 0) {
await doFlush()
}
}
return { write, flush, close }
}

View File

@@ -2364,6 +2364,261 @@ describe('hasWorkflowChanged', () => {
}) })
}) })
describe('Trigger Config Normalization (False Positive Prevention)', () => {
it.concurrent(
'should not detect change when deployed has null fields but current has values from triggerConfig',
() => {
// Core scenario: deployed state has null individual fields, current state has
// values populated from triggerConfig at runtime by populateTriggerFieldsFromConfig
const deployedState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
botToken: { id: 'botToken', type: 'short-input', value: null },
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123', botToken: 'token456' },
},
},
}),
},
})
const currentState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: 'secret123' },
botToken: { id: 'botToken', type: 'short-input', value: 'token456' },
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123', botToken: 'token456' },
},
},
}),
},
})
expect(hasWorkflowChanged(currentState, deployedState)).toBe(false)
}
)
it.concurrent(
'should detect change when user edits a trigger field to a different value',
() => {
const deployedState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'old-secret' },
},
},
}),
},
})
const currentState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: 'new-secret' },
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'old-secret' },
},
},
}),
},
})
expect(hasWorkflowChanged(currentState, deployedState)).toBe(true)
}
)
it.concurrent('should not detect change when both sides have no triggerConfig', () => {
const deployedState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
},
}),
},
})
const currentState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
},
}),
},
})
expect(hasWorkflowChanged(currentState, deployedState)).toBe(false)
})
it.concurrent(
'should not detect change when deployed has empty fields and triggerConfig populates them',
() => {
// Empty string is also treated as "empty" by normalizeTriggerConfigValues
const deployedState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: '' },
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123' },
},
},
}),
},
})
const currentState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: 'secret123' },
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123' },
},
},
}),
},
})
expect(hasWorkflowChanged(currentState, deployedState)).toBe(false)
}
)
it.concurrent('should not detect change when triggerId differs', () => {
const deployedState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
model: { value: 'gpt-4' },
triggerId: { value: null },
},
}),
},
})
const currentState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
model: { value: 'gpt-4' },
triggerId: { value: 'slack_webhook' },
},
}),
},
})
expect(hasWorkflowChanged(currentState, deployedState)).toBe(false)
})
it.concurrent(
'should not detect change for namespaced system subBlock IDs like samplePayload_slack_webhook',
() => {
const deployedState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
model: { value: 'gpt-4' },
samplePayload_slack_webhook: { value: 'old payload' },
triggerInstructions_slack_webhook: { value: 'old instructions' },
},
}),
},
})
const currentState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
model: { value: 'gpt-4' },
samplePayload_slack_webhook: { value: 'new payload' },
triggerInstructions_slack_webhook: { value: 'new instructions' },
},
}),
},
})
expect(hasWorkflowChanged(currentState, deployedState)).toBe(false)
}
)
it.concurrent(
'should handle mixed scenario: some fields from triggerConfig, some user-edited',
() => {
const deployedState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
botToken: { id: 'botToken', type: 'short-input', value: null },
includeFiles: { id: 'includeFiles', type: 'switch', value: false },
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123', botToken: 'token456' },
},
},
}),
},
})
const currentState = createWorkflowState({
blocks: {
block1: createBlock('block1', {
type: 'starter',
subBlocks: {
signingSecret: { id: 'signingSecret', type: 'short-input', value: 'secret123' },
botToken: { id: 'botToken', type: 'short-input', value: 'token456' },
includeFiles: { id: 'includeFiles', type: 'switch', value: true },
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123', botToken: 'token456' },
},
},
}),
},
})
// includeFiles changed from false to true — this IS a real change
expect(hasWorkflowChanged(currentState, deployedState)).toBe(true)
}
)
})
describe('Trigger Runtime Metadata (Should Not Trigger Change)', () => { describe('Trigger Runtime Metadata (Should Not Trigger Change)', () => {
it.concurrent('should not detect change when webhookId differs', () => { it.concurrent('should not detect change when webhookId differs', () => {
const deployedState = createWorkflowState({ const deployedState = createWorkflowState({

View File

@@ -9,6 +9,7 @@ import {
normalizeLoop, normalizeLoop,
normalizeParallel, normalizeParallel,
normalizeSubBlockValue, normalizeSubBlockValue,
normalizeTriggerConfigValues,
normalizeValue, normalizeValue,
normalizeVariables, normalizeVariables,
sanitizeVariable, sanitizeVariable,
@@ -172,14 +173,18 @@ export function generateWorkflowDiffSummary(
} }
} }
// Normalize trigger config values for both states before comparison
const normalizedCurrentSubs = normalizeTriggerConfigValues(currentSubBlocks)
const normalizedPreviousSubs = normalizeTriggerConfigValues(previousSubBlocks)
// Compare subBlocks using shared helper for filtering (single source of truth) // Compare subBlocks using shared helper for filtering (single source of truth)
const allSubBlockIds = filterSubBlockIds([ const allSubBlockIds = filterSubBlockIds([
...new Set([...Object.keys(currentSubBlocks), ...Object.keys(previousSubBlocks)]), ...new Set([...Object.keys(normalizedCurrentSubs), ...Object.keys(normalizedPreviousSubs)]),
]) ])
for (const subId of allSubBlockIds) { for (const subId of allSubBlockIds) {
const currentSub = currentSubBlocks[subId] as Record<string, unknown> | undefined const currentSub = normalizedCurrentSubs[subId] as Record<string, unknown> | undefined
const previousSub = previousSubBlocks[subId] as Record<string, unknown> | undefined const previousSub = normalizedPreviousSubs[subId] as Record<string, unknown> | undefined
if (!currentSub || !previousSub) { if (!currentSub || !previousSub) {
changes.push({ changes.push({

View File

@@ -4,10 +4,12 @@
import { describe, expect, it } from 'vitest' import { describe, expect, it } from 'vitest'
import type { Loop, Parallel } from '@/stores/workflows/workflow/types' import type { Loop, Parallel } from '@/stores/workflows/workflow/types'
import { import {
filterSubBlockIds,
normalizedStringify, normalizedStringify,
normalizeEdge, normalizeEdge,
normalizeLoop, normalizeLoop,
normalizeParallel, normalizeParallel,
normalizeTriggerConfigValues,
normalizeValue, normalizeValue,
sanitizeInputFormat, sanitizeInputFormat,
sanitizeTools, sanitizeTools,
@@ -584,4 +586,214 @@ describe('Workflow Normalization Utilities', () => {
expect(result2).toBe(result3) expect(result2).toBe(result3)
}) })
}) })
describe('filterSubBlockIds', () => {
it.concurrent('should exclude exact SYSTEM_SUBBLOCK_IDS', () => {
const ids = ['signingSecret', 'samplePayload', 'triggerInstructions', 'botToken']
const result = filterSubBlockIds(ids)
expect(result).toEqual(['botToken', 'signingSecret'])
})
it.concurrent('should exclude namespaced SYSTEM_SUBBLOCK_IDS (prefix matching)', () => {
const ids = [
'signingSecret',
'samplePayload_slack_webhook',
'triggerInstructions_slack_webhook',
'webhookUrlDisplay_slack_webhook',
'botToken',
]
const result = filterSubBlockIds(ids)
expect(result).toEqual(['botToken', 'signingSecret'])
})
it.concurrent('should exclude exact TRIGGER_RUNTIME_SUBBLOCK_IDS', () => {
const ids = ['webhookId', 'triggerPath', 'triggerConfig', 'triggerId', 'signingSecret']
const result = filterSubBlockIds(ids)
expect(result).toEqual(['signingSecret'])
})
it.concurrent('should not exclude IDs that merely contain a system ID substring', () => {
const ids = ['mySamplePayload', 'notSamplePayload']
const result = filterSubBlockIds(ids)
expect(result).toEqual(['mySamplePayload', 'notSamplePayload'])
})
it.concurrent('should return sorted results', () => {
const ids = ['zebra', 'alpha', 'middle']
const result = filterSubBlockIds(ids)
expect(result).toEqual(['alpha', 'middle', 'zebra'])
})
it.concurrent('should handle empty array', () => {
expect(filterSubBlockIds([])).toEqual([])
})
it.concurrent('should handle all IDs being excluded', () => {
const ids = ['webhookId', 'triggerPath', 'samplePayload', 'triggerConfig']
const result = filterSubBlockIds(ids)
expect(result).toEqual([])
})
it.concurrent('should exclude setupScript and scheduleInfo namespaced variants', () => {
const ids = ['setupScript_google_sheets_row', 'scheduleInfo_cron_trigger', 'realField']
const result = filterSubBlockIds(ids)
expect(result).toEqual(['realField'])
})
it.concurrent('should exclude triggerCredentials namespaced variants', () => {
const ids = ['triggerCredentials_slack_webhook', 'signingSecret']
const result = filterSubBlockIds(ids)
expect(result).toEqual(['signingSecret'])
})
})
describe('normalizeTriggerConfigValues', () => {
it.concurrent('should return subBlocks unchanged when no triggerConfig exists', () => {
const subBlocks = {
signingSecret: { id: 'signingSecret', type: 'short-input', value: 'secret123' },
botToken: { id: 'botToken', type: 'short-input', value: 'token456' },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect(result).toEqual(subBlocks)
})
it.concurrent('should return subBlocks unchanged when triggerConfig value is null', () => {
const subBlocks = {
triggerConfig: { id: 'triggerConfig', type: 'short-input', value: null },
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect(result).toEqual(subBlocks)
})
it.concurrent(
'should return subBlocks unchanged when triggerConfig value is not an object',
() => {
const subBlocks = {
triggerConfig: { id: 'triggerConfig', type: 'short-input', value: 'string-value' },
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect(result).toEqual(subBlocks)
}
)
it.concurrent('should populate null individual fields from triggerConfig', () => {
const subBlocks = {
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123', botToken: 'token456' },
},
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
botToken: { id: 'botToken', type: 'short-input', value: null },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect((result.signingSecret as Record<string, unknown>).value).toBe('secret123')
expect((result.botToken as Record<string, unknown>).value).toBe('token456')
})
it.concurrent('should populate undefined individual fields from triggerConfig', () => {
const subBlocks = {
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123' },
},
signingSecret: { id: 'signingSecret', type: 'short-input', value: undefined },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect((result.signingSecret as Record<string, unknown>).value).toBe('secret123')
})
it.concurrent('should populate empty string individual fields from triggerConfig', () => {
const subBlocks = {
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123' },
},
signingSecret: { id: 'signingSecret', type: 'short-input', value: '' },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect((result.signingSecret as Record<string, unknown>).value).toBe('secret123')
})
it.concurrent('should NOT overwrite existing non-empty individual field values', () => {
const subBlocks = {
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'old-secret' },
},
signingSecret: { id: 'signingSecret', type: 'short-input', value: 'user-edited-secret' },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect((result.signingSecret as Record<string, unknown>).value).toBe('user-edited-secret')
})
it.concurrent('should skip triggerConfig fields that are null/undefined', () => {
const subBlocks = {
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: null, botToken: undefined },
},
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
botToken: { id: 'botToken', type: 'short-input', value: null },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect((result.signingSecret as Record<string, unknown>).value).toBe(null)
expect((result.botToken as Record<string, unknown>).value).toBe(null)
})
it.concurrent('should skip fields from triggerConfig that have no matching subBlock', () => {
const subBlocks = {
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { nonExistentField: 'value123' },
},
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
}
const result = normalizeTriggerConfigValues(subBlocks)
expect(result.nonExistentField).toBeUndefined()
expect((result.signingSecret as Record<string, unknown>).value).toBe(null)
})
it.concurrent('should not mutate the original subBlocks object', () => {
const original = {
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123' },
},
signingSecret: { id: 'signingSecret', type: 'short-input', value: null },
}
normalizeTriggerConfigValues(original)
expect((original.signingSecret as Record<string, unknown>).value).toBe(null)
})
it.concurrent('should preserve other subBlock properties when populating value', () => {
const subBlocks = {
triggerConfig: {
id: 'triggerConfig',
type: 'short-input',
value: { signingSecret: 'secret123' },
},
signingSecret: {
id: 'signingSecret',
type: 'short-input',
value: null,
placeholder: 'Enter signing secret',
},
}
const result = normalizeTriggerConfigValues(subBlocks)
const normalized = result.signingSecret as Record<string, unknown>
expect(normalized.value).toBe('secret123')
expect(normalized.id).toBe('signingSecret')
expect(normalized.type).toBe('short-input')
expect(normalized.placeholder).toBe('Enter signing secret')
})
})
}) })

View File

@@ -418,10 +418,48 @@ export function extractBlockFieldsForComparison(block: BlockState): ExtractedBlo
*/ */
export function filterSubBlockIds(subBlockIds: string[]): string[] { export function filterSubBlockIds(subBlockIds: string[]): string[] {
return subBlockIds return subBlockIds
.filter((id) => !SYSTEM_SUBBLOCK_IDS.includes(id) && !TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(id)) .filter((id) => {
if (TRIGGER_RUNTIME_SUBBLOCK_IDS.includes(id)) return false
if (SYSTEM_SUBBLOCK_IDS.some((sysId) => id === sysId || id.startsWith(`${sysId}_`)))
return false
return true
})
.sort() .sort()
} }
/**
* Normalizes trigger block subBlocks by populating null/empty individual fields
* from the triggerConfig aggregate subBlock. This compensates for the runtime
* population done by populateTriggerFieldsFromConfig, ensuring consistent
* comparison between client state (with populated values) and deployed state
* (with null values from DB).
*/
export function normalizeTriggerConfigValues(
subBlocks: Record<string, unknown>
): Record<string, unknown> {
const triggerConfigSub = subBlocks.triggerConfig as Record<string, unknown> | undefined
const triggerConfigValue = triggerConfigSub?.value
if (!triggerConfigValue || typeof triggerConfigValue !== 'object') {
return subBlocks
}
const result = { ...subBlocks }
for (const [fieldId, configValue] of Object.entries(
triggerConfigValue as Record<string, unknown>
)) {
if (configValue === null || configValue === undefined) continue
const existingSub = result[fieldId] as Record<string, unknown> | undefined
if (
existingSub &&
(existingSub.value === null || existingSub.value === undefined || existingSub.value === '')
) {
result[fieldId] = { ...existingSub, value: configValue }
}
}
return result
}
/** /**
* Normalizes a subBlock value with sanitization for specific subBlock types. * Normalizes a subBlock value with sanitization for specific subBlock types.
* Sanitizes: tools (removes isExpanded), inputFormat (removes collapsed) * Sanitizes: tools (removes isExpanded), inputFormat (removes collapsed)

View File

@@ -5,6 +5,7 @@ import {
type GenerateContentConfig, type GenerateContentConfig,
type GenerateContentResponse, type GenerateContentResponse,
type GoogleGenAI, type GoogleGenAI,
type Interactions,
type Part, type Part,
type Schema, type Schema,
type ThinkingConfig, type ThinkingConfig,
@@ -27,6 +28,7 @@ import {
import type { FunctionCallResponse, ProviderRequest, ProviderResponse } from '@/providers/types' import type { FunctionCallResponse, ProviderRequest, ProviderResponse } from '@/providers/types'
import { import {
calculateCost, calculateCost,
isDeepResearchModel,
prepareToolExecution, prepareToolExecution,
prepareToolsWithUsageControl, prepareToolsWithUsageControl,
} from '@/providers/utils' } from '@/providers/utils'
@@ -381,6 +383,468 @@ export interface GeminiExecutionConfig {
providerType: GeminiProviderType providerType: GeminiProviderType
} }
const DEEP_RESEARCH_POLL_INTERVAL_MS = 10_000
const DEEP_RESEARCH_MAX_DURATION_MS = 60 * 60 * 1000
/**
* Sleeps for the specified number of milliseconds
*/
function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms))
}
/**
* Collapses a ProviderRequest into a single input string and optional system instruction
* for the Interactions API, which takes a flat input rather than a messages array.
*
* Deep research is single-turn only — it takes one research query and returns a report.
* Memory/conversation history is hidden in the UI for deep research models, so only
* the last user message is used as input. System messages are passed via system_instruction.
*/
function collapseMessagesToInput(request: ProviderRequest): {
input: string
systemInstruction: string | undefined
} {
const systemParts: string[] = []
const userParts: string[] = []
if (request.systemPrompt) {
systemParts.push(request.systemPrompt)
}
if (request.messages) {
for (const msg of request.messages) {
if (msg.role === 'system' && msg.content) {
systemParts.push(msg.content)
} else if (msg.role === 'user' && msg.content) {
userParts.push(msg.content)
}
}
}
return {
input:
userParts.length > 0
? userParts[userParts.length - 1]
: 'Please conduct research on the provided topic.',
systemInstruction: systemParts.length > 0 ? systemParts.join('\n\n') : undefined,
}
}
/**
* Extracts text content from a completed interaction's outputs array.
* The outputs array can contain text, thought, google_search_result, and other types.
* We concatenate all text outputs to get the full research report.
*/
function extractTextFromInteractionOutputs(outputs: Interactions.Interaction['outputs']): string {
if (!outputs || outputs.length === 0) return ''
const textParts: string[] = []
for (const output of outputs) {
if (output.type === 'text') {
const text = (output as Interactions.TextContent).text
if (text) textParts.push(text)
}
}
return textParts.join('\n\n')
}
/**
* Extracts token usage from an Interaction's Usage object.
* The Interactions API provides total_input_tokens, total_output_tokens, total_tokens,
* and total_reasoning_tokens (for thinking models).
*
* Also handles the raw API field name total_thought_tokens which the SDK may
* map to total_reasoning_tokens.
*/
function extractInteractionUsage(usage: Interactions.Usage | undefined): {
inputTokens: number
outputTokens: number
reasoningTokens: number
totalTokens: number
} {
if (!usage) {
return { inputTokens: 0, outputTokens: 0, reasoningTokens: 0, totalTokens: 0 }
}
const usageLogger = createLogger('DeepResearchUsage')
usageLogger.info('Raw interaction usage', { usage: JSON.stringify(usage) })
const inputTokens = usage.total_input_tokens ?? 0
const outputTokens = usage.total_output_tokens ?? 0
const reasoningTokens =
usage.total_reasoning_tokens ??
((usage as Record<string, unknown>).total_thought_tokens as number) ??
0
const totalTokens = usage.total_tokens ?? inputTokens + outputTokens
return { inputTokens, outputTokens, reasoningTokens, totalTokens }
}
/**
* Builds a standard ProviderResponse from a completed deep research interaction.
*/
function buildDeepResearchResponse(
content: string,
model: string,
usage: {
inputTokens: number
outputTokens: number
reasoningTokens: number
totalTokens: number
},
providerStartTime: number,
providerStartTimeISO: string,
interactionId?: string
): ProviderResponse {
const providerEndTime = Date.now()
const duration = providerEndTime - providerStartTime
return {
content,
model,
tokens: {
input: usage.inputTokens,
output: usage.outputTokens,
total: usage.totalTokens,
},
timing: {
startTime: providerStartTimeISO,
endTime: new Date(providerEndTime).toISOString(),
duration,
modelTime: duration,
toolsTime: 0,
firstResponseTime: duration,
iterations: 1,
timeSegments: [
{
type: 'model',
name: 'Deep research',
startTime: providerStartTime,
endTime: providerEndTime,
duration,
},
],
},
cost: calculateCost(model, usage.inputTokens, usage.outputTokens),
interactionId,
}
}
/**
* Creates a ReadableStream from a deep research streaming interaction.
*
* Deep research streaming returns InteractionSSEEvent chunks including:
* - interaction.start: initial interaction with ID
* - content.delta: incremental text and thought_summary updates
* - content.start / content.stop: output boundaries
* - interaction.complete: final event (outputs is undefined in streaming; must reconstruct)
* - error: error events
*
* We stream text deltas to the client and track usage from the interaction.complete event.
*/
function createDeepResearchStream(
stream: AsyncIterable<Interactions.InteractionSSEEvent>,
onComplete?: (
content: string,
usage: {
inputTokens: number
outputTokens: number
reasoningTokens: number
totalTokens: number
},
interactionId?: string
) => void
): ReadableStream<Uint8Array> {
const streamLogger = createLogger('DeepResearchStream')
let fullContent = ''
let completionUsage = { inputTokens: 0, outputTokens: 0, reasoningTokens: 0, totalTokens: 0 }
let completedInteractionId: string | undefined
return new ReadableStream({
async start(controller) {
try {
for await (const event of stream) {
if (event.event_type === 'content.delta') {
const delta = (event as Interactions.ContentDelta).delta
if (delta?.type === 'text' && 'text' in delta && delta.text) {
fullContent += delta.text
controller.enqueue(new TextEncoder().encode(delta.text))
}
} else if (event.event_type === 'interaction.complete') {
const interaction = (event as Interactions.InteractionEvent).interaction
if (interaction?.usage) {
completionUsage = extractInteractionUsage(interaction.usage)
}
completedInteractionId = interaction?.id
} else if (event.event_type === 'interaction.start') {
const interaction = (event as Interactions.InteractionEvent).interaction
if (interaction?.id) {
completedInteractionId = interaction.id
}
} else if (event.event_type === 'error') {
const errorEvent = event as { error?: { code?: string; message?: string } }
const message = errorEvent.error?.message ?? 'Unknown deep research stream error'
streamLogger.error('Deep research stream error', {
code: errorEvent.error?.code,
message,
})
controller.error(new Error(message))
return
}
}
onComplete?.(fullContent, completionUsage, completedInteractionId)
controller.close()
} catch (error) {
streamLogger.error('Error reading deep research stream', {
error: error instanceof Error ? error.message : String(error),
})
controller.error(error)
}
},
})
}
/**
* Executes a deep research request using the Interactions API.
*
* Deep research uses the Interactions API ({@link https://ai.google.dev/api/interactions-api}),
* a completely different surface from generateContent. It creates a background interaction
* that performs comprehensive research (up to 60 minutes).
*
* Supports both streaming and non-streaming modes:
* - Streaming: returns a StreamingExecution with a ReadableStream of text deltas
* - Non-streaming: polls until completion and returns a ProviderResponse
*
* Deep research does NOT support custom function calling tools, MCP servers,
* or structured output (response_format). These are gracefully ignored.
*/
export async function executeDeepResearchRequest(
config: GeminiExecutionConfig
): Promise<ProviderResponse | StreamingExecution> {
const { ai, model, request, providerType } = config
const logger = createLogger(providerType === 'google' ? 'GoogleProvider' : 'VertexProvider')
logger.info('Preparing deep research request', {
model,
hasSystemPrompt: !!request.systemPrompt,
hasMessages: !!request.messages?.length,
streaming: !!request.stream,
hasPreviousInteractionId: !!request.previousInteractionId,
})
if (request.tools?.length) {
logger.warn('Deep research does not support custom tools — ignoring tools parameter')
}
if (request.responseFormat) {
logger.warn(
'Deep research does not support structured output — ignoring responseFormat parameter'
)
}
const providerStartTime = Date.now()
const providerStartTimeISO = new Date(providerStartTime).toISOString()
try {
const { input, systemInstruction } = collapseMessagesToInput(request)
// Deep research requires background=true and store=true (store defaults to true,
// but we set it explicitly per API requirements)
const baseParams = {
agent: model as Interactions.CreateAgentInteractionParamsNonStreaming['agent'],
input,
background: true,
store: true,
...(systemInstruction && { system_instruction: systemInstruction }),
...(request.previousInteractionId && {
previous_interaction_id: request.previousInteractionId,
}),
agent_config: {
type: 'deep-research' as const,
thinking_summaries: 'auto' as const,
},
}
logger.info('Creating deep research interaction', {
inputLength: input.length,
hasSystemInstruction: !!systemInstruction,
streaming: !!request.stream,
})
// Streaming mode: create a streaming interaction and return a StreamingExecution
if (request.stream) {
const streamParams: Interactions.CreateAgentInteractionParamsStreaming = {
...baseParams,
stream: true,
}
const streamResponse = await ai.interactions.create(streamParams)
const firstResponseTime = Date.now() - providerStartTime
const streamingResult: StreamingExecution = {
stream: undefined as unknown as ReadableStream<Uint8Array>,
execution: {
success: true,
output: {
content: '',
model,
tokens: { input: 0, output: 0, total: 0 },
providerTiming: {
startTime: providerStartTimeISO,
endTime: new Date().toISOString(),
duration: Date.now() - providerStartTime,
modelTime: firstResponseTime,
toolsTime: 0,
firstResponseTime,
iterations: 1,
timeSegments: [
{
type: 'model',
name: 'Deep research (streaming)',
startTime: providerStartTime,
endTime: providerStartTime + firstResponseTime,
duration: firstResponseTime,
},
],
},
cost: {
input: 0,
output: 0,
total: 0,
pricing: { input: 0, output: 0, updatedAt: new Date().toISOString() },
},
},
logs: [],
metadata: {
startTime: providerStartTimeISO,
endTime: new Date().toISOString(),
duration: Date.now() - providerStartTime,
},
isStreaming: true,
},
}
streamingResult.stream = createDeepResearchStream(
streamResponse,
(content, usage, streamInteractionId) => {
streamingResult.execution.output.content = content
streamingResult.execution.output.tokens = {
input: usage.inputTokens,
output: usage.outputTokens,
total: usage.totalTokens,
}
streamingResult.execution.output.interactionId = streamInteractionId
const cost = calculateCost(model, usage.inputTokens, usage.outputTokens)
streamingResult.execution.output.cost = cost
const streamEndTime = Date.now()
if (streamingResult.execution.output.providerTiming) {
streamingResult.execution.output.providerTiming.endTime = new Date(
streamEndTime
).toISOString()
streamingResult.execution.output.providerTiming.duration =
streamEndTime - providerStartTime
const segments = streamingResult.execution.output.providerTiming.timeSegments
if (segments?.[0]) {
segments[0].endTime = streamEndTime
segments[0].duration = streamEndTime - providerStartTime
}
}
}
)
return streamingResult
}
// Non-streaming mode: create and poll
const createParams: Interactions.CreateAgentInteractionParamsNonStreaming = {
...baseParams,
stream: false,
}
const interaction = await ai.interactions.create(createParams)
const interactionId = interaction.id
logger.info('Deep research interaction created', { interactionId, status: interaction.status })
// Poll until a terminal status
const pollStartTime = Date.now()
let result: Interactions.Interaction = interaction
while (Date.now() - pollStartTime < DEEP_RESEARCH_MAX_DURATION_MS) {
if (result.status === 'completed') {
break
}
if (result.status === 'failed') {
throw new Error(`Deep research interaction failed: ${interactionId}`)
}
if (result.status === 'cancelled') {
throw new Error(`Deep research interaction was cancelled: ${interactionId}`)
}
logger.info('Deep research in progress, polling...', {
interactionId,
status: result.status,
elapsedMs: Date.now() - pollStartTime,
})
await sleep(DEEP_RESEARCH_POLL_INTERVAL_MS)
result = await ai.interactions.get(interactionId)
}
if (result.status !== 'completed') {
throw new Error(
`Deep research timed out after ${DEEP_RESEARCH_MAX_DURATION_MS / 1000}s (status: ${result.status})`
)
}
const content = extractTextFromInteractionOutputs(result.outputs)
const usage = extractInteractionUsage(result.usage)
logger.info('Deep research completed', {
interactionId,
contentLength: content.length,
inputTokens: usage.inputTokens,
outputTokens: usage.outputTokens,
reasoningTokens: usage.reasoningTokens,
totalTokens: usage.totalTokens,
durationMs: Date.now() - providerStartTime,
})
return buildDeepResearchResponse(
content,
model,
usage,
providerStartTime,
providerStartTimeISO,
interactionId
)
} catch (error) {
const providerEndTime = Date.now()
const duration = providerEndTime - providerStartTime
logger.error('Error in deep research request:', {
error: error instanceof Error ? error.message : String(error),
stack: error instanceof Error ? error.stack : undefined,
})
const enhancedError = error instanceof Error ? error : new Error(String(error))
Object.assign(enhancedError, {
timing: {
startTime: providerStartTimeISO,
endTime: new Date(providerEndTime).toISOString(),
duration,
},
})
throw enhancedError
}
}
/** /**
* Executes a request using the Gemini API * Executes a request using the Gemini API
* *
@@ -391,6 +855,12 @@ export async function executeGeminiRequest(
config: GeminiExecutionConfig config: GeminiExecutionConfig
): Promise<ProviderResponse | StreamingExecution> { ): Promise<ProviderResponse | StreamingExecution> {
const { ai, model, request, providerType } = config const { ai, model, request, providerType } = config
// Route deep research models to the interactions API
if (isDeepResearchModel(model)) {
return executeDeepResearchRequest(config)
}
const logger = createLogger(providerType === 'google' ? 'GoogleProvider' : 'VertexProvider') const logger = createLogger(providerType === 'google' ? 'GoogleProvider' : 'VertexProvider')
logger.info(`Preparing ${providerType} Gemini request`, { logger.info(`Preparing ${providerType} Gemini request`, {

View File

@@ -46,6 +46,9 @@ export interface ModelCapabilities {
levels: string[] levels: string[]
default?: string default?: string
} }
deepResearch?: boolean
/** Whether this model supports conversation memory. Defaults to true if omitted. */
memory?: boolean
} }
export interface ModelDefinition { export interface ModelDefinition {
@@ -825,7 +828,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
name: 'Google', name: 'Google',
description: "Google's Gemini models", description: "Google's Gemini models",
defaultModel: 'gemini-2.5-pro', defaultModel: 'gemini-2.5-pro',
modelPatterns: [/^gemini/], modelPatterns: [/^gemini/, /^deep-research/],
capabilities: { capabilities: {
toolUsageControl: true, toolUsageControl: true,
}, },
@@ -928,6 +931,19 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
}, },
contextWindow: 1000000, contextWindow: 1000000,
}, },
{
id: 'deep-research-pro-preview-12-2025',
pricing: {
input: 2.0,
output: 2.0,
updatedAt: '2026-02-10',
},
capabilities: {
deepResearch: true,
memory: false,
},
contextWindow: 1000000,
},
], ],
}, },
vertex: { vertex: {
@@ -1038,6 +1054,19 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
}, },
contextWindow: 1000000, contextWindow: 1000000,
}, },
{
id: 'vertex/deep-research-pro-preview-12-2025',
pricing: {
input: 2.0,
output: 2.0,
updatedAt: '2026-02-10',
},
capabilities: {
deepResearch: true,
memory: false,
},
contextWindow: 1000000,
},
], ],
}, },
deepseek: { deepseek: {
@@ -2480,6 +2509,37 @@ export function getThinkingLevelsForModel(modelId: string): string[] | null {
return capability?.levels ?? null return capability?.levels ?? null
} }
/**
* Get all models that support deep research capability
*/
export function getModelsWithDeepResearch(): string[] {
const models: string[] = []
for (const provider of Object.values(PROVIDER_DEFINITIONS)) {
for (const model of provider.models) {
if (model.capabilities.deepResearch) {
models.push(model.id)
}
}
}
return models
}
/**
* Get all models that explicitly disable memory support (memory: false).
* Models without this capability default to supporting memory.
*/
export function getModelsWithoutMemory(): string[] {
const models: string[] = []
for (const provider of Object.values(PROVIDER_DEFINITIONS)) {
for (const model of provider.models) {
if (model.capabilities.memory === false) {
models.push(model.id)
}
}
}
return models
}
/** /**
* Get the max output tokens for a specific model. * Get the max output tokens for a specific model.
* *

View File

@@ -95,6 +95,8 @@ export interface ProviderResponse {
total: number total: number
pricing: ModelPricing pricing: ModelPricing
} }
/** Interaction ID returned by the Interactions API (used for multi-turn deep research) */
interactionId?: string
} }
export type ToolUsageControl = 'auto' | 'force' | 'none' export type ToolUsageControl = 'auto' | 'force' | 'none'
@@ -169,6 +171,8 @@ export interface ProviderRequest {
verbosity?: string verbosity?: string
thinkingLevel?: string thinkingLevel?: string
isDeployedContext?: boolean isDeployedContext?: boolean
/** Previous interaction ID for multi-turn Interactions API requests (deep research follow-ups) */
previousInteractionId?: string
} }
export const providers: Record<string, ProviderConfig> = {} export const providers: Record<string, ProviderConfig> = {}

View File

@@ -12,6 +12,8 @@ import {
getMaxOutputTokensForModel as getMaxOutputTokensForModelFromDefinitions, getMaxOutputTokensForModel as getMaxOutputTokensForModelFromDefinitions,
getMaxTemperature as getMaxTempFromDefinitions, getMaxTemperature as getMaxTempFromDefinitions,
getModelPricing as getModelPricingFromDefinitions, getModelPricing as getModelPricingFromDefinitions,
getModelsWithDeepResearch,
getModelsWithoutMemory,
getModelsWithReasoningEffort, getModelsWithReasoningEffort,
getModelsWithTemperatureSupport, getModelsWithTemperatureSupport,
getModelsWithTempRange01, getModelsWithTempRange01,
@@ -953,6 +955,8 @@ export const MODELS_WITH_TEMPERATURE_SUPPORT = getModelsWithTemperatureSupport()
export const MODELS_WITH_REASONING_EFFORT = getModelsWithReasoningEffort() export const MODELS_WITH_REASONING_EFFORT = getModelsWithReasoningEffort()
export const MODELS_WITH_VERBOSITY = getModelsWithVerbosity() export const MODELS_WITH_VERBOSITY = getModelsWithVerbosity()
export const MODELS_WITH_THINKING = getModelsWithThinking() export const MODELS_WITH_THINKING = getModelsWithThinking()
export const MODELS_WITH_DEEP_RESEARCH = getModelsWithDeepResearch()
export const MODELS_WITHOUT_MEMORY = getModelsWithoutMemory()
export const PROVIDERS_WITH_TOOL_USAGE_CONTROL = getProvidersWithToolUsageControl() export const PROVIDERS_WITH_TOOL_USAGE_CONTROL = getProvidersWithToolUsageControl()
export function supportsTemperature(model: string): boolean { export function supportsTemperature(model: string): boolean {
@@ -971,6 +975,10 @@ export function supportsThinking(model: string): boolean {
return MODELS_WITH_THINKING.includes(model.toLowerCase()) return MODELS_WITH_THINKING.includes(model.toLowerCase())
} }
export function isDeepResearchModel(model: string): boolean {
return MODELS_WITH_DEEP_RESEARCH.includes(model.toLowerCase())
}
/** /**
* Get the maximum temperature value for a model * Get the maximum temperature value for a model
* @returns Maximum temperature value (1 or 2) or undefined if temperature not supported * @returns Maximum temperature value (1 or 2) or undefined if temperature not supported

View File

@@ -137,6 +137,37 @@ function handleSecurityFiltering(request: NextRequest): NextResponse | null {
return null return null
} }
const UTM_KEYS = ['utm_source', 'utm_medium', 'utm_campaign', 'utm_content'] as const
const UTM_COOKIE_NAME = 'sim_utm'
const UTM_COOKIE_MAX_AGE = 3600
/**
* Sets a `sim_utm` cookie when UTM params are present on auth pages.
* Captures UTM values, the HTTP Referer, landing page, and a timestamp
* used by the attribution API to verify the user signed up after visiting the link.
*/
function setUtmCookie(request: NextRequest, response: NextResponse): void {
const { searchParams, pathname } = request.nextUrl
const hasUtm = UTM_KEYS.some((key) => searchParams.get(key))
if (!hasUtm) return
const utmData: Record<string, string> = {}
for (const key of UTM_KEYS) {
const value = searchParams.get(key)
if (value) utmData[key] = value
}
utmData.referrer_url = request.headers.get('referer') || ''
utmData.landing_page = pathname
utmData.created_at = Date.now().toString()
response.cookies.set(UTM_COOKIE_NAME, JSON.stringify(utmData), {
path: '/',
maxAge: UTM_COOKIE_MAX_AGE,
sameSite: 'lax',
httpOnly: false, // Client-side hook needs to detect cookie presence
})
}
export async function proxy(request: NextRequest) { export async function proxy(request: NextRequest) {
const url = request.nextUrl const url = request.nextUrl
@@ -152,6 +183,7 @@ export async function proxy(request: NextRequest) {
} }
const response = NextResponse.next() const response = NextResponse.next()
response.headers.set('Content-Security-Policy', generateRuntimeCSP()) response.headers.set('Content-Security-Policy', generateRuntimeCSP())
setUtmCookie(request, response)
return response return response
} }

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

After

Width:  |  Height:  |  Size: 58 KiB

View File

@@ -129,6 +129,18 @@ export const useExecutionStore = create<ExecutionState & ExecutionActions>()((se
}) })
}, },
setCurrentExecutionId: (workflowId, executionId) => {
set({
workflowExecutions: updatedMap(get().workflowExecutions, workflowId, {
currentExecutionId: executionId,
}),
})
},
getCurrentExecutionId: (workflowId) => {
return getOrCreate(get().workflowExecutions, workflowId).currentExecutionId
},
clearRunPath: (workflowId) => { clearRunPath: (workflowId) => {
set({ set({
workflowExecutions: updatedMap(get().workflowExecutions, workflowId, { workflowExecutions: updatedMap(get().workflowExecutions, workflowId, {

View File

@@ -35,6 +35,8 @@ export interface WorkflowExecutionState {
lastRunPath: Map<string, BlockRunStatus> lastRunPath: Map<string, BlockRunStatus>
/** Maps edge IDs to their run result from the last execution */ /** Maps edge IDs to their run result from the last execution */
lastRunEdges: Map<string, EdgeRunStatus> lastRunEdges: Map<string, EdgeRunStatus>
/** The execution ID of the currently running execution */
currentExecutionId: string | null
} }
/** /**
@@ -54,6 +56,7 @@ export const defaultWorkflowExecutionState: WorkflowExecutionState = {
debugContext: null, debugContext: null,
lastRunPath: new Map(), lastRunPath: new Map(),
lastRunEdges: new Map(), lastRunEdges: new Map(),
currentExecutionId: null,
} }
/** /**
@@ -96,6 +99,10 @@ export interface ExecutionActions {
setEdgeRunStatus: (workflowId: string, edgeId: string, status: EdgeRunStatus) => void setEdgeRunStatus: (workflowId: string, edgeId: string, status: EdgeRunStatus) => void
/** Clears the run path and run edges for a workflow */ /** Clears the run path and run edges for a workflow */
clearRunPath: (workflowId: string) => void clearRunPath: (workflowId: string) => void
/** Stores the current execution ID for a workflow */
setCurrentExecutionId: (workflowId: string, executionId: string | null) => void
/** Returns the current execution ID for a workflow */
getCurrentExecutionId: (workflowId: string) => string | null
/** Resets the entire store to its initial empty state */ /** Resets the entire store to its initial empty state */
reset: () => void reset: () => void
/** Stores a serializable execution snapshot for a workflow */ /** Stores a serializable execution snapshot for a workflow */

View File

@@ -310,6 +310,50 @@ function parseModelKey(compositeKey: string): { provider: string; modelId: strin
return { provider: compositeKey.slice(0, slashIdx), modelId: compositeKey.slice(slashIdx + 1) } return { provider: compositeKey.slice(0, slashIdx), modelId: compositeKey.slice(slashIdx + 1) }
} }
/**
* Convert legacy/variant Claude IDs into the canonical ID shape used by the model catalog.
*
* Examples:
* - claude-4.5-opus -> claude-opus-4-5
* - claude-opus-4.6 -> claude-opus-4-6
* - anthropic.claude-opus-4-5-20251101-v1:0 -> claude-opus-4-5 (match key only)
*/
function canonicalizeModelMatchKey(modelId: string): string {
if (!modelId) return modelId
const normalized = modelId.trim().toLowerCase()
const toCanonicalClaude = (tier: string, version: string): string => {
const normalizedVersion = version.replace(/\./g, '-')
return `claude-${tier}-${normalizedVersion}`
}
const tierFirstExact = normalized.match(/^claude-(opus|sonnet|haiku)-(\d+(?:[.-]\d+)?)$/)
if (tierFirstExact) {
const [, tier, version] = tierFirstExact
return toCanonicalClaude(tier, version)
}
const versionFirstExact = normalized.match(/^claude-(\d+(?:[.-]\d+)?)-(opus|sonnet|haiku)$/)
if (versionFirstExact) {
const [, version, tier] = versionFirstExact
return toCanonicalClaude(tier, version)
}
const tierFirstEmbedded = normalized.match(/claude-(opus|sonnet|haiku)-(\d+(?:[.-]\d+)?)/)
if (tierFirstEmbedded) {
const [, tier, version] = tierFirstEmbedded
return toCanonicalClaude(tier, version)
}
const versionFirstEmbedded = normalized.match(/claude-(\d+(?:[.-]\d+)?)-(opus|sonnet|haiku)/)
if (versionFirstEmbedded) {
const [, version, tier] = versionFirstEmbedded
return toCanonicalClaude(tier, version)
}
return normalized
}
const MODEL_PROVIDER_PRIORITY = [ const MODEL_PROVIDER_PRIORITY = [
'anthropic', 'anthropic',
'bedrock', 'bedrock',
@@ -350,12 +394,23 @@ function normalizeSelectedModelKey(selectedModel: string, models: AvailableModel
const { provider, modelId } = parseModelKey(selectedModel) const { provider, modelId } = parseModelKey(selectedModel)
const targetModelId = modelId || selectedModel const targetModelId = modelId || selectedModel
const targetMatchKey = canonicalizeModelMatchKey(targetModelId)
const matches = models.filter((m) => m.id.endsWith(`/${targetModelId}`)) const matches = models.filter((m) => {
const candidateModelId = parseModelKey(m.id).modelId || m.id
const candidateMatchKey = canonicalizeModelMatchKey(candidateModelId)
return (
candidateModelId === targetModelId ||
m.id.endsWith(`/${targetModelId}`) ||
candidateMatchKey === targetMatchKey
)
})
if (matches.length === 0) return selectedModel if (matches.length === 0) return selectedModel
if (provider) { if (provider) {
const sameProvider = matches.find((m) => m.provider === provider) const sameProvider = matches.find(
(m) => m.provider === provider || m.id.startsWith(`${provider}/`)
)
if (sameProvider) return sameProvider.id if (sameProvider) return sameProvider.id
} }
@@ -1093,11 +1148,12 @@ export const useCopilotStore = create<CopilotStore>()(
const chatConfig = chat.config ?? {} const chatConfig = chat.config ?? {}
const chatMode = chatConfig.mode || get().mode const chatMode = chatConfig.mode || get().mode
const chatModel = chatConfig.model || get().selectedModel const chatModel = chatConfig.model || get().selectedModel
const normalizedChatModel = normalizeSelectedModelKey(chatModel, get().availableModels)
logger.debug('[Chat] Restoring chat config', { logger.debug('[Chat] Restoring chat config', {
chatId: chat.id, chatId: chat.id,
mode: chatMode, mode: chatMode,
model: chatModel, model: normalizedChatModel,
hasPlanArtifact: !!planArtifact, hasPlanArtifact: !!planArtifact,
}) })
@@ -1119,7 +1175,7 @@ export const useCopilotStore = create<CopilotStore>()(
showPlanTodos: false, showPlanTodos: false,
streamingPlanContent: planArtifact, streamingPlanContent: planArtifact,
mode: chatMode, mode: chatMode,
selectedModel: chatModel as CopilotStore['selectedModel'], selectedModel: normalizedChatModel as CopilotStore['selectedModel'],
suppressAutoSelect: false, suppressAutoSelect: false,
}) })
@@ -1292,6 +1348,10 @@ export const useCopilotStore = create<CopilotStore>()(
const refreshedConfig = updatedCurrentChat.config ?? {} const refreshedConfig = updatedCurrentChat.config ?? {}
const refreshedMode = refreshedConfig.mode || get().mode const refreshedMode = refreshedConfig.mode || get().mode
const refreshedModel = refreshedConfig.model || get().selectedModel const refreshedModel = refreshedConfig.model || get().selectedModel
const normalizedRefreshedModel = normalizeSelectedModelKey(
refreshedModel,
get().availableModels
)
const toolCallsById = buildToolCallsById(normalizedMessages) const toolCallsById = buildToolCallsById(normalizedMessages)
set({ set({
@@ -1300,7 +1360,7 @@ export const useCopilotStore = create<CopilotStore>()(
toolCallsById, toolCallsById,
streamingPlanContent: refreshedPlanArtifact, streamingPlanContent: refreshedPlanArtifact,
mode: refreshedMode, mode: refreshedMode,
selectedModel: refreshedModel as CopilotStore['selectedModel'], selectedModel: normalizedRefreshedModel as CopilotStore['selectedModel'],
}) })
} }
try { try {
@@ -1320,11 +1380,15 @@ export const useCopilotStore = create<CopilotStore>()(
const chatConfig = mostRecentChat.config ?? {} const chatConfig = mostRecentChat.config ?? {}
const chatMode = chatConfig.mode || get().mode const chatMode = chatConfig.mode || get().mode
const chatModel = chatConfig.model || get().selectedModel const chatModel = chatConfig.model || get().selectedModel
const normalizedChatModel = normalizeSelectedModelKey(
chatModel,
get().availableModels
)
logger.info('[Chat] Auto-selecting most recent chat with config', { logger.info('[Chat] Auto-selecting most recent chat with config', {
chatId: mostRecentChat.id, chatId: mostRecentChat.id,
mode: chatMode, mode: chatMode,
model: chatModel, model: normalizedChatModel,
hasPlanArtifact: !!planArtifact, hasPlanArtifact: !!planArtifact,
}) })
@@ -1336,7 +1400,7 @@ export const useCopilotStore = create<CopilotStore>()(
toolCallsById, toolCallsById,
streamingPlanContent: planArtifact, streamingPlanContent: planArtifact,
mode: chatMode, mode: chatMode,
selectedModel: chatModel as CopilotStore['selectedModel'], selectedModel: normalizedChatModel as CopilotStore['selectedModel'],
}) })
try { try {
await get().loadMessageCheckpoints(mostRecentChat.id) await get().loadMessageCheckpoints(mostRecentChat.id)
@@ -2268,7 +2332,8 @@ export const useCopilotStore = create<CopilotStore>()(
}, },
setSelectedModel: async (model) => { setSelectedModel: async (model) => {
set({ selectedModel: model }) const normalizedModel = normalizeSelectedModelKey(model, get().availableModels)
set({ selectedModel: normalizedModel as CopilotStore['selectedModel'] })
}, },
setAgentPrefetch: (prefetch) => set({ agentPrefetch: prefetch }), setAgentPrefetch: (prefetch) => set({ agentPrefetch: prefetch }),
loadAvailableModels: async () => { loadAvailableModels: async () => {

View File

@@ -224,7 +224,7 @@ export const useTerminalConsoleStore = create<ConsoleStore>()(
const newEntry = get().entries[0] const newEntry = get().entries[0]
if (newEntry?.error) { if (newEntry?.error && newEntry.blockType !== 'cancelled') {
notifyBlockError({ notifyBlockError({
error: newEntry.error, error: newEntry.error,
blockName: newEntry.blockName || 'Unknown Block', blockName: newEntry.blockName || 'Unknown Block',
@@ -243,6 +243,11 @@ export const useTerminalConsoleStore = create<ConsoleStore>()(
useExecutionStore.getState().clearRunPath(workflowId) useExecutionStore.getState().clearRunPath(workflowId)
}, },
clearExecutionEntries: (executionId: string) =>
set((state) => ({
entries: state.entries.filter((e) => e.executionId !== executionId),
})),
exportConsoleCSV: (workflowId: string) => { exportConsoleCSV: (workflowId: string) => {
const entries = get().entries.filter((entry) => entry.workflowId === workflowId) const entries = get().entries.filter((entry) => entry.workflowId === workflowId)
@@ -470,12 +475,24 @@ export const useTerminalConsoleStore = create<ConsoleStore>()(
}, },
merge: (persistedState, currentState) => { merge: (persistedState, currentState) => {
const persisted = persistedState as Partial<ConsoleStore> | undefined const persisted = persistedState as Partial<ConsoleStore> | undefined
const entries = (persisted?.entries ?? currentState.entries).map((entry, index) => { const rawEntries = persisted?.entries ?? currentState.entries
const oneHourAgo = Date.now() - 60 * 60 * 1000
const entries = rawEntries.map((entry, index) => {
let updated = entry
if (entry.executionOrder === undefined) { if (entry.executionOrder === undefined) {
return { ...entry, executionOrder: index + 1 } updated = { ...updated, executionOrder: index + 1 }
} }
return entry if (
entry.isRunning &&
entry.startedAt &&
new Date(entry.startedAt).getTime() < oneHourAgo
) {
updated = { ...updated, isRunning: false }
}
return updated
}) })
return { return {
...currentState, ...currentState,
entries, entries,

View File

@@ -51,6 +51,7 @@ export interface ConsoleStore {
isOpen: boolean isOpen: boolean
addConsole: (entry: Omit<ConsoleEntry, 'id' | 'timestamp'>) => ConsoleEntry addConsole: (entry: Omit<ConsoleEntry, 'id' | 'timestamp'>) => ConsoleEntry
clearWorkflowConsole: (workflowId: string) => void clearWorkflowConsole: (workflowId: string) => void
clearExecutionEntries: (executionId: string) => void
exportConsoleCSV: (workflowId: string) => void exportConsoleCSV: (workflowId: string) => void
getWorkflowEntries: (workflowId: string) => ConsoleEntry[] getWorkflowEntries: (workflowId: string) => ConsoleEntry[]
toggleConsole: () => void toggleConsole: () => void

View File

@@ -0,0 +1,114 @@
import { TIMESTAMP_OUTPUT } from '@/tools/confluence/types'
import type { ToolConfig } from '@/tools/types'
export interface ConfluenceDeleteLabelParams {
accessToken: string
domain: string
pageId: string
labelName: string
cloudId?: string
}
export interface ConfluenceDeleteLabelResponse {
success: boolean
output: {
ts: string
pageId: string
labelName: string
deleted: boolean
}
}
export const confluenceDeleteLabelTool: ToolConfig<
ConfluenceDeleteLabelParams,
ConfluenceDeleteLabelResponse
> = {
id: 'confluence_delete_label',
name: 'Confluence Delete Label',
description: 'Remove a label from a Confluence page.',
version: '1.0.0',
oauth: {
required: true,
provider: 'confluence',
},
params: {
accessToken: {
type: 'string',
required: true,
visibility: 'hidden',
description: 'OAuth access token for Confluence',
},
domain: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)',
},
pageId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'Confluence page ID to remove the label from',
},
labelName: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'Name of the label to remove',
},
cloudId: {
type: 'string',
required: false,
visibility: 'user-only',
description:
'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.',
},
},
request: {
url: () => '/api/tools/confluence/labels',
method: 'DELETE',
headers: (params: ConfluenceDeleteLabelParams) => ({
Accept: 'application/json',
Authorization: `Bearer ${params.accessToken}`,
}),
body: (params: ConfluenceDeleteLabelParams) => ({
domain: params.domain,
accessToken: params.accessToken,
pageId: params.pageId?.trim(),
labelName: params.labelName?.trim(),
cloudId: params.cloudId,
}),
},
transformResponse: async (response: Response) => {
const data = await response.json()
return {
success: true,
output: {
ts: new Date().toISOString(),
pageId: data.pageId ?? '',
labelName: data.labelName ?? '',
deleted: true,
},
}
},
outputs: {
ts: TIMESTAMP_OUTPUT,
pageId: {
type: 'string',
description: 'Page ID the label was removed from',
},
labelName: {
type: 'string',
description: 'Name of the removed label',
},
deleted: {
type: 'boolean',
description: 'Deletion status',
},
},
}

View File

@@ -0,0 +1,105 @@
import { TIMESTAMP_OUTPUT } from '@/tools/confluence/types'
import type { ToolConfig } from '@/tools/types'
export interface ConfluenceDeletePagePropertyParams {
accessToken: string
domain: string
pageId: string
propertyId: string
cloudId?: string
}
export interface ConfluenceDeletePagePropertyResponse {
success: boolean
output: {
ts: string
pageId: string
propertyId: string
deleted: boolean
}
}
export const confluenceDeletePagePropertyTool: ToolConfig<
ConfluenceDeletePagePropertyParams,
ConfluenceDeletePagePropertyResponse
> = {
id: 'confluence_delete_page_property',
name: 'Confluence Delete Page Property',
description: 'Delete a content property from a Confluence page by its property ID.',
version: '1.0.0',
oauth: {
required: true,
provider: 'confluence',
},
params: {
accessToken: {
type: 'string',
required: true,
visibility: 'hidden',
description: 'OAuth access token for Confluence',
},
domain: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)',
},
pageId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'The ID of the page containing the property',
},
propertyId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'The ID of the property to delete',
},
cloudId: {
type: 'string',
required: false,
visibility: 'user-only',
description:
'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.',
},
},
request: {
url: () => '/api/tools/confluence/page-properties',
method: 'DELETE',
headers: (params: ConfluenceDeletePagePropertyParams) => ({
Accept: 'application/json',
Authorization: `Bearer ${params.accessToken}`,
}),
body: (params: ConfluenceDeletePagePropertyParams) => ({
domain: params.domain,
accessToken: params.accessToken,
pageId: params.pageId?.trim(),
propertyId: params.propertyId?.trim(),
cloudId: params.cloudId,
}),
},
transformResponse: async (response: Response) => {
const data = await response.json()
return {
success: true,
output: {
ts: new Date().toISOString(),
pageId: data.pageId ?? '',
propertyId: data.propertyId ?? '',
deleted: true,
},
}
},
outputs: {
ts: TIMESTAMP_OUTPUT,
pageId: { type: 'string', description: 'ID of the page' },
propertyId: { type: 'string', description: 'ID of the deleted property' },
deleted: { type: 'boolean', description: 'Deletion status' },
},
}

View File

@@ -0,0 +1,143 @@
import { PAGE_ITEM_PROPERTIES, TIMESTAMP_OUTPUT } from '@/tools/confluence/types'
import type { ToolConfig } from '@/tools/types'
export interface ConfluenceGetPagesByLabelParams {
accessToken: string
domain: string
labelId: string
limit?: number
cursor?: string
cloudId?: string
}
export interface ConfluenceGetPagesByLabelResponse {
success: boolean
output: {
ts: string
labelId: string
pages: Array<{
id: string
title: string
status: string | null
spaceId: string | null
parentId: string | null
authorId: string | null
createdAt: string | null
version: {
number: number
message?: string
createdAt?: string
} | null
}>
nextCursor: string | null
}
}
export const confluenceGetPagesByLabelTool: ToolConfig<
ConfluenceGetPagesByLabelParams,
ConfluenceGetPagesByLabelResponse
> = {
id: 'confluence_get_pages_by_label',
name: 'Confluence Get Pages by Label',
description: 'Retrieve all pages that have a specific label applied.',
version: '1.0.0',
oauth: {
required: true,
provider: 'confluence',
},
params: {
accessToken: {
type: 'string',
required: true,
visibility: 'hidden',
description: 'OAuth access token for Confluence',
},
domain: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)',
},
labelId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'The ID of the label to get pages for',
},
limit: {
type: 'number',
required: false,
visibility: 'user-or-llm',
description: 'Maximum number of pages to return (default: 50, max: 250)',
},
cursor: {
type: 'string',
required: false,
visibility: 'user-or-llm',
description: 'Pagination cursor from previous response',
},
cloudId: {
type: 'string',
required: false,
visibility: 'user-only',
description:
'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.',
},
},
request: {
url: (params: ConfluenceGetPagesByLabelParams) => {
const query = new URLSearchParams({
domain: params.domain,
accessToken: params.accessToken,
labelId: params.labelId,
limit: String(params.limit || 50),
})
if (params.cursor) {
query.set('cursor', params.cursor)
}
if (params.cloudId) {
query.set('cloudId', params.cloudId)
}
return `/api/tools/confluence/pages-by-label?${query.toString()}`
},
method: 'GET',
headers: (params: ConfluenceGetPagesByLabelParams) => ({
Accept: 'application/json',
Authorization: `Bearer ${params.accessToken}`,
}),
},
transformResponse: async (response: Response) => {
const data = await response.json()
return {
success: true,
output: {
ts: new Date().toISOString(),
labelId: data.labelId ?? '',
pages: data.pages ?? [],
nextCursor: data.nextCursor ?? null,
},
}
},
outputs: {
ts: TIMESTAMP_OUTPUT,
labelId: { type: 'string', description: 'ID of the label' },
pages: {
type: 'array',
description: 'Array of pages with this label',
items: {
type: 'object',
properties: PAGE_ITEM_PROPERTIES,
},
},
nextCursor: {
type: 'string',
description: 'Cursor for fetching the next page of results',
optional: true,
},
},
}

View File

@@ -5,11 +5,14 @@ import { confluenceCreatePageTool } from '@/tools/confluence/create_page'
import { confluenceCreatePagePropertyTool } from '@/tools/confluence/create_page_property' import { confluenceCreatePagePropertyTool } from '@/tools/confluence/create_page_property'
import { confluenceDeleteAttachmentTool } from '@/tools/confluence/delete_attachment' import { confluenceDeleteAttachmentTool } from '@/tools/confluence/delete_attachment'
import { confluenceDeleteCommentTool } from '@/tools/confluence/delete_comment' import { confluenceDeleteCommentTool } from '@/tools/confluence/delete_comment'
import { confluenceDeleteLabelTool } from '@/tools/confluence/delete_label'
import { confluenceDeletePageTool } from '@/tools/confluence/delete_page' import { confluenceDeletePageTool } from '@/tools/confluence/delete_page'
import { confluenceDeletePagePropertyTool } from '@/tools/confluence/delete_page_property'
import { confluenceGetBlogPostTool } from '@/tools/confluence/get_blogpost' import { confluenceGetBlogPostTool } from '@/tools/confluence/get_blogpost'
import { confluenceGetPageAncestorsTool } from '@/tools/confluence/get_page_ancestors' import { confluenceGetPageAncestorsTool } from '@/tools/confluence/get_page_ancestors'
import { confluenceGetPageChildrenTool } from '@/tools/confluence/get_page_children' import { confluenceGetPageChildrenTool } from '@/tools/confluence/get_page_children'
import { confluenceGetPageVersionTool } from '@/tools/confluence/get_page_version' import { confluenceGetPageVersionTool } from '@/tools/confluence/get_page_version'
import { confluenceGetPagesByLabelTool } from '@/tools/confluence/get_pages_by_label'
import { confluenceGetSpaceTool } from '@/tools/confluence/get_space' import { confluenceGetSpaceTool } from '@/tools/confluence/get_space'
import { confluenceListAttachmentsTool } from '@/tools/confluence/list_attachments' import { confluenceListAttachmentsTool } from '@/tools/confluence/list_attachments'
import { confluenceListBlogPostsTool } from '@/tools/confluence/list_blogposts' import { confluenceListBlogPostsTool } from '@/tools/confluence/list_blogposts'
@@ -19,6 +22,7 @@ import { confluenceListLabelsTool } from '@/tools/confluence/list_labels'
import { confluenceListPagePropertiesTool } from '@/tools/confluence/list_page_properties' import { confluenceListPagePropertiesTool } from '@/tools/confluence/list_page_properties'
import { confluenceListPageVersionsTool } from '@/tools/confluence/list_page_versions' import { confluenceListPageVersionsTool } from '@/tools/confluence/list_page_versions'
import { confluenceListPagesInSpaceTool } from '@/tools/confluence/list_pages_in_space' import { confluenceListPagesInSpaceTool } from '@/tools/confluence/list_pages_in_space'
import { confluenceListSpaceLabelsTool } from '@/tools/confluence/list_space_labels'
import { confluenceListSpacesTool } from '@/tools/confluence/list_spaces' import { confluenceListSpacesTool } from '@/tools/confluence/list_spaces'
import { confluenceRetrieveTool } from '@/tools/confluence/retrieve' import { confluenceRetrieveTool } from '@/tools/confluence/retrieve'
import { confluenceSearchTool } from '@/tools/confluence/search' import { confluenceSearchTool } from '@/tools/confluence/search'
@@ -78,6 +82,7 @@ export {
// Page Properties Tools // Page Properties Tools
confluenceListPagePropertiesTool, confluenceListPagePropertiesTool,
confluenceCreatePagePropertyTool, confluenceCreatePagePropertyTool,
confluenceDeletePagePropertyTool,
// Blog Post Tools // Blog Post Tools
confluenceListBlogPostsTool, confluenceListBlogPostsTool,
confluenceGetBlogPostTool, confluenceGetBlogPostTool,
@@ -98,6 +103,9 @@ export {
// Label Tools // Label Tools
confluenceListLabelsTool, confluenceListLabelsTool,
confluenceAddLabelTool, confluenceAddLabelTool,
confluenceDeleteLabelTool,
confluenceGetPagesByLabelTool,
confluenceListSpaceLabelsTool,
// Space Tools // Space Tools
confluenceGetSpaceTool, confluenceGetSpaceTool,
confluenceListSpacesTool, confluenceListSpacesTool,

View File

@@ -0,0 +1,134 @@
import { LABEL_ITEM_PROPERTIES, TIMESTAMP_OUTPUT } from '@/tools/confluence/types'
import type { ToolConfig } from '@/tools/types'
export interface ConfluenceListSpaceLabelsParams {
accessToken: string
domain: string
spaceId: string
limit?: number
cursor?: string
cloudId?: string
}
export interface ConfluenceListSpaceLabelsResponse {
success: boolean
output: {
ts: string
spaceId: string
labels: Array<{
id: string
name: string
prefix: string
}>
nextCursor: string | null
}
}
export const confluenceListSpaceLabelsTool: ToolConfig<
ConfluenceListSpaceLabelsParams,
ConfluenceListSpaceLabelsResponse
> = {
id: 'confluence_list_space_labels',
name: 'Confluence List Space Labels',
description: 'List all labels associated with a Confluence space.',
version: '1.0.0',
oauth: {
required: true,
provider: 'confluence',
},
params: {
accessToken: {
type: 'string',
required: true,
visibility: 'hidden',
description: 'OAuth access token for Confluence',
},
domain: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'Your Confluence domain (e.g., yourcompany.atlassian.net)',
},
spaceId: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'The ID of the Confluence space to list labels from',
},
limit: {
type: 'number',
required: false,
visibility: 'user-or-llm',
description: 'Maximum number of labels to return (default: 25, max: 250)',
},
cursor: {
type: 'string',
required: false,
visibility: 'user-or-llm',
description: 'Pagination cursor from previous response',
},
cloudId: {
type: 'string',
required: false,
visibility: 'user-only',
description:
'Confluence Cloud ID for the instance. If not provided, it will be fetched using the domain.',
},
},
request: {
url: (params: ConfluenceListSpaceLabelsParams) => {
const query = new URLSearchParams({
domain: params.domain,
accessToken: params.accessToken,
spaceId: params.spaceId,
limit: String(params.limit || 25),
})
if (params.cursor) {
query.set('cursor', params.cursor)
}
if (params.cloudId) {
query.set('cloudId', params.cloudId)
}
return `/api/tools/confluence/space-labels?${query.toString()}`
},
method: 'GET',
headers: (params: ConfluenceListSpaceLabelsParams) => ({
Accept: 'application/json',
Authorization: `Bearer ${params.accessToken}`,
}),
},
transformResponse: async (response: Response) => {
const data = await response.json()
return {
success: true,
output: {
ts: new Date().toISOString(),
spaceId: data.spaceId ?? '',
labels: data.labels ?? [],
nextCursor: data.nextCursor ?? null,
},
}
},
outputs: {
ts: TIMESTAMP_OUTPUT,
spaceId: { type: 'string', description: 'ID of the space' },
labels: {
type: 'array',
description: 'Array of labels on the space',
items: {
type: 'object',
properties: LABEL_ITEM_PROPERTIES,
},
},
nextCursor: {
type: 'string',
description: 'Cursor for fetching the next page of results',
optional: true,
},
},
}

View File

@@ -118,10 +118,13 @@ import {
confluenceCreatePageTool, confluenceCreatePageTool,
confluenceDeleteAttachmentTool, confluenceDeleteAttachmentTool,
confluenceDeleteCommentTool, confluenceDeleteCommentTool,
confluenceDeleteLabelTool,
confluenceDeletePagePropertyTool,
confluenceDeletePageTool, confluenceDeletePageTool,
confluenceGetBlogPostTool, confluenceGetBlogPostTool,
confluenceGetPageAncestorsTool, confluenceGetPageAncestorsTool,
confluenceGetPageChildrenTool, confluenceGetPageChildrenTool,
confluenceGetPagesByLabelTool,
confluenceGetPageVersionTool, confluenceGetPageVersionTool,
confluenceGetSpaceTool, confluenceGetSpaceTool,
confluenceListAttachmentsTool, confluenceListAttachmentsTool,
@@ -132,6 +135,7 @@ import {
confluenceListPagePropertiesTool, confluenceListPagePropertiesTool,
confluenceListPagesInSpaceTool, confluenceListPagesInSpaceTool,
confluenceListPageVersionsTool, confluenceListPageVersionsTool,
confluenceListSpaceLabelsTool,
confluenceListSpacesTool, confluenceListSpacesTool,
confluenceRetrieveTool, confluenceRetrieveTool,
confluenceSearchInSpaceTool, confluenceSearchInSpaceTool,
@@ -2667,6 +2671,10 @@ export const tools: Record<string, ToolConfig> = {
confluence_delete_attachment: confluenceDeleteAttachmentTool, confluence_delete_attachment: confluenceDeleteAttachmentTool,
confluence_list_labels: confluenceListLabelsTool, confluence_list_labels: confluenceListLabelsTool,
confluence_add_label: confluenceAddLabelTool, confluence_add_label: confluenceAddLabelTool,
confluence_get_pages_by_label: confluenceGetPagesByLabelTool,
confluence_list_space_labels: confluenceListSpaceLabelsTool,
confluence_delete_label: confluenceDeleteLabelTool,
confluence_delete_page_property: confluenceDeletePagePropertyTool,
confluence_get_space: confluenceGetSpaceTool, confluence_get_space: confluenceGetSpaceTool,
confluence_list_spaces: confluenceListSpacesTool, confluence_list_spaces: confluenceListSpacesTool,
cursor_list_agents: cursorListAgentsTool, cursor_list_agents: cursorListAgentsTool,

View File

@@ -23,7 +23,12 @@ export const SYSTEM_SUBBLOCK_IDS: string[] = [
* with default values from the trigger definition on load, which aren't present in * with default values from the trigger definition on load, which aren't present in
* the deployed state, causing false positive change detection. * the deployed state, causing false positive change detection.
*/ */
export const TRIGGER_RUNTIME_SUBBLOCK_IDS: string[] = ['webhookId', 'triggerPath', 'triggerConfig'] export const TRIGGER_RUNTIME_SUBBLOCK_IDS: string[] = [
'webhookId',
'triggerPath',
'triggerConfig',
'triggerId',
]
/** /**
* Maximum number of consecutive failures before a trigger (schedule/webhook) is auto-disabled. * Maximum number of consecutive failures before a trigger (schedule/webhook) is auto-disabled.

View File

@@ -0,0 +1,41 @@
CREATE TABLE "referral_attribution" (
"id" text PRIMARY KEY NOT NULL,
"user_id" text NOT NULL,
"organization_id" text,
"campaign_id" text,
"utm_source" text,
"utm_medium" text,
"utm_campaign" text,
"utm_content" text,
"referrer_url" text,
"landing_page" text,
"bonus_credit_amount" numeric DEFAULT '0' NOT NULL,
"created_at" timestamp DEFAULT now() NOT NULL,
CONSTRAINT "referral_attribution_user_id_unique" UNIQUE("user_id")
);
--> statement-breakpoint
CREATE TABLE "referral_campaigns" (
"id" text PRIMARY KEY NOT NULL,
"name" text NOT NULL,
"code" text,
"utm_source" text,
"utm_medium" text,
"utm_campaign" text,
"utm_content" text,
"bonus_credit_amount" numeric NOT NULL,
"is_active" boolean DEFAULT true NOT NULL,
"created_at" timestamp DEFAULT now() NOT NULL,
"updated_at" timestamp DEFAULT now() NOT NULL,
CONSTRAINT "referral_campaigns_code_unique" UNIQUE("code")
);
--> statement-breakpoint
ALTER TABLE "referral_attribution" ADD CONSTRAINT "referral_attribution_user_id_user_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."user"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "referral_attribution" ADD CONSTRAINT "referral_attribution_organization_id_organization_id_fk" FOREIGN KEY ("organization_id") REFERENCES "public"."organization"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "referral_attribution" ADD CONSTRAINT "referral_attribution_campaign_id_referral_campaigns_id_fk" FOREIGN KEY ("campaign_id") REFERENCES "public"."referral_campaigns"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
CREATE INDEX "referral_attribution_user_id_idx" ON "referral_attribution" USING btree ("user_id");--> statement-breakpoint
CREATE UNIQUE INDEX "referral_attribution_org_unique_idx" ON "referral_attribution" USING btree ("organization_id") WHERE "referral_attribution"."organization_id" IS NOT NULL;--> statement-breakpoint
CREATE INDEX "referral_attribution_campaign_id_idx" ON "referral_attribution" USING btree ("campaign_id");--> statement-breakpoint
CREATE INDEX "referral_attribution_utm_campaign_idx" ON "referral_attribution" USING btree ("utm_campaign");--> statement-breakpoint
CREATE INDEX "referral_attribution_utm_content_idx" ON "referral_attribution" USING btree ("utm_content");--> statement-breakpoint
CREATE INDEX "referral_attribution_created_at_idx" ON "referral_attribution" USING btree ("created_at");--> statement-breakpoint
CREATE INDEX "referral_campaigns_active_idx" ON "referral_campaigns" USING btree ("is_active");

File diff suppressed because it is too large Load Diff

View File

@@ -1072,6 +1072,13 @@
"when": 1770410282842, "when": 1770410282842,
"tag": "0153_complete_arclight", "tag": "0153_complete_arclight",
"breakpoints": true "breakpoints": true
},
{
"idx": 154,
"version": "7",
"when": 1770869658697,
"tag": "0154_bumpy_living_mummy",
"breakpoints": true
} }
] ]
} }

View File

@@ -726,6 +726,61 @@ export const userStats = pgTable('user_stats', {
billingBlockedReason: billingBlockedReasonEnum('billing_blocked_reason'), billingBlockedReason: billingBlockedReasonEnum('billing_blocked_reason'),
}) })
export const referralCampaigns = pgTable(
'referral_campaigns',
{
id: text('id').primaryKey(),
name: text('name').notNull(),
code: text('code').unique(),
utmSource: text('utm_source'),
utmMedium: text('utm_medium'),
utmCampaign: text('utm_campaign'),
utmContent: text('utm_content'),
bonusCreditAmount: decimal('bonus_credit_amount').notNull(),
isActive: boolean('is_active').notNull().default(true),
createdAt: timestamp('created_at').notNull().defaultNow(),
updatedAt: timestamp('updated_at').notNull().defaultNow(),
},
(table) => ({
activeIdx: index('referral_campaigns_active_idx').on(table.isActive),
})
)
export const referralAttribution = pgTable(
'referral_attribution',
{
id: text('id').primaryKey(),
userId: text('user_id')
.notNull()
.references(() => user.id, { onDelete: 'cascade' })
.unique(),
organizationId: text('organization_id').references(() => organization.id, {
onDelete: 'set null',
}),
campaignId: text('campaign_id').references(() => referralCampaigns.id, {
onDelete: 'set null',
}),
utmSource: text('utm_source'),
utmMedium: text('utm_medium'),
utmCampaign: text('utm_campaign'),
utmContent: text('utm_content'),
referrerUrl: text('referrer_url'),
landingPage: text('landing_page'),
bonusCreditAmount: decimal('bonus_credit_amount').notNull().default('0'),
createdAt: timestamp('created_at').notNull().defaultNow(),
},
(table) => ({
userIdIdx: index('referral_attribution_user_id_idx').on(table.userId),
orgUniqueIdx: uniqueIndex('referral_attribution_org_unique_idx')
.on(table.organizationId)
.where(sql`${table.organizationId} IS NOT NULL`),
campaignIdIdx: index('referral_attribution_campaign_id_idx').on(table.campaignId),
utmCampaignIdx: index('referral_attribution_utm_campaign_idx').on(table.utmCampaign),
utmContentIdx: index('referral_attribution_utm_content_idx').on(table.utmContent),
createdAtIdx: index('referral_attribution_created_at_idx').on(table.createdAt),
})
)
export const customTools = pgTable( export const customTools = pgTable(
'custom_tools', 'custom_tools',
{ {