Compare commits

..

10 Commits

Author SHA1 Message Date
Waleed
c0f5ba75f1 v0.4.11: webhook, knowledgebase, billing fixes & redis for sessions 2025-10-10 17:46:01 -07:00
Waleed
5a943bca32 fix(ci): pin all workflows and Dockerfiles to Bun v1.2.22 (#1598) 2025-10-10 17:36:06 -07:00
Waleed
923595f57e fix(webhooks): use next public app url instead of request origin for webhook registration (#1596)
* fix(webhooks): use next public app url instead of request origin for webhook registration

* ack PR comments

* ci: pin Bun to v1.2.22 to avoid Bun 1.3 breaking changes
2025-10-10 17:19:51 -07:00
Waleed
241d9fd12d improvement(kb): encode non-ASCII headers for kb uploads (#1595)
* improvement(kb): encode non-ASCII headers for kb uploads

* cleanup

* increase timeouts to match trigger
2025-10-10 17:19:51 -07:00
Vikhyath Mondreti
97a8778449 fix test webhook url (#1594) 2025-10-10 17:19:51 -07:00
Waleed
833e700b58 feat(sessions): add redis as priority option for session data (#1592)
* feat(sessions): add redis as priority option for session data

* update chat client otp
2025-10-10 17:19:51 -07:00
Waleed
2d49892aaa feat(deployed-chat): added file upload to workflow execute API, added to deployed chat, updated chat panel (#1588)
* feat(deployed-chat): updated chat panel UI, deployed chat and API can now accept files

* added nested tag dropdown for files

* added duplicate file validation to chat panel

* update docs & SDKs

* fixed build

* rm extraneous comments

* ack PR comments, cut multiple DB roundtrips for permissions & api key checks in api/workflows

* allow read-only users to access deployment info, but not take actions

* add downloadable file to logs for files passed in via API

* protect files/serve route that is only used client-side

---------

Co-authored-by: waleed <waleed>
2025-10-10 17:19:51 -07:00
Vikhyath Mondreti
8ce5a1b7c0 feat(billing): bill by threshold to prevent cancellation edge case (#1583)
* feat(billing): bill by threshold to prevent cancellation edge case

* fix org billing

* fix idempotency key issue

* small optimization for team checks

* remove console log

* remove unused type

* fix error handling
2025-10-10 17:19:51 -07:00
Waleed
88d2e7b97b fix(env-vars): remove regex parsing from table subblock, add formatDisplayText to various subblocks that didn't have it (#1582) 2025-10-10 17:19:51 -07:00
Waleed
c04eb01aed fix(db): revert to dedicated sockets db connection establishment (#1581) 2025-10-08 08:37:58 -07:00
87 changed files with 9935 additions and 1160 deletions

View File

@@ -17,7 +17,7 @@ jobs:
- name: Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: latest
bun-version: 1.2.22
- name: Setup Node
uses: actions/setup-node@v4

View File

@@ -27,7 +27,7 @@ jobs:
- name: Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: latest
bun-version: 1.2.22
- name: Run Lingo.dev translations
env:
@@ -116,7 +116,7 @@ jobs:
- name: Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: latest
bun-version: 1.2.22
- name: Install dependencies
run: |

View File

@@ -16,7 +16,7 @@ jobs:
- name: Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: latest
bun-version: 1.2.22
- name: Install dependencies
run: bun install

View File

@@ -16,7 +16,7 @@ jobs:
- name: Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: latest
bun-version: 1.2.22
- name: Setup Node.js for npm publishing
uses: actions/setup-node@v4

View File

@@ -16,7 +16,7 @@ jobs:
- name: Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: latest
bun-version: 1.2.22
- name: Setup Node.js for npm publishing
uses: actions/setup-node@v4

View File

@@ -16,7 +16,7 @@ jobs:
- name: Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: latest
bun-version: 1.2.22
- name: Setup Node
uses: actions/setup-node@v4

View File

@@ -27,7 +27,7 @@ jobs:
- name: Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: latest
bun-version: 1.2.22
- name: Install dependencies
run: bun install

View File

@@ -166,6 +166,38 @@ Different subscription plans have different usage limits:
| **Team** | $500 (pooled) | 50 sync, 100 async |
| **Enterprise** | Custom | Custom |
## Billing Model
Sim uses a **base subscription + overage** billing model:
### How It Works
**Pro Plan ($20/month):**
- Monthly subscription includes $20 of usage
- Usage under $20 → No additional charges
- Usage over $20 → Pay the overage at month end
- Example: $35 usage = $20 (subscription) + $15 (overage)
**Team Plan ($40/seat/month):**
- Pooled usage across all team members
- Overage calculated from total team usage
- Organization owner receives one bill
**Enterprise Plans:**
- Fixed monthly price, no overages
- Custom usage limits per agreement
### Threshold Billing
When unbilled overage reaches $50, Sim automatically bills the full unbilled amount.
**Example:**
- Day 10: $70 overage → Bill $70 immediately
- Day 15: Additional $35 usage ($105 total) → Already billed, no action
- Day 20: Another $50 usage ($155 total, $85 unbilled) → Bill $85 immediately
This spreads large overage charges throughout the month instead of one large bill at period end.
## Cost Management Best Practices
1. **Monitor Regularly**: Check your usage dashboard frequently to avoid surprises

View File

@@ -593,14 +593,91 @@ async function executeClientSideWorkflow() {
});
console.log('Workflow result:', result);
// Update UI with result
document.getElementById('result')!.textContent =
document.getElementById('result')!.textContent =
JSON.stringify(result.output, null, 2);
} catch (error) {
console.error('Error:', error);
}
}
```
### File Upload
File objects are automatically detected and converted to base64 format. Include them in your input under the field name matching your workflow's API trigger input format.
The SDK converts File objects to this format:
```typescript
{
type: 'file',
data: 'data:mime/type;base64,base64data',
name: 'filename',
mime: 'mime/type'
}
```
Alternatively, you can manually provide files using the URL format:
```typescript
{
type: 'url',
data: 'https://example.com/file.pdf',
name: 'file.pdf',
mime: 'application/pdf'
}
```
<Tabs items={['Browser', 'Node.js']}>
<Tab value="Browser">
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
apiKey: process.env.NEXT_PUBLIC_SIM_API_KEY!
});
// From file input
async function handleFileUpload(event: Event) {
const input = event.target as HTMLInputElement;
const files = Array.from(input.files || []);
// Include files under the field name from your API trigger's input format
const result = await client.executeWorkflow('workflow-id', {
input: {
documents: files, // Must match your workflow's "files" field name
instructions: 'Analyze these documents'
}
});
console.log('Result:', result);
}
```
</Tab>
<Tab value="Node.js">
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
import fs from 'fs';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
// Read file and create File object
const fileBuffer = fs.readFileSync('./document.pdf');
const file = new File([fileBuffer], 'document.pdf', {
type: 'application/pdf'
});
// Include files under the field name from your API trigger's input format
const result = await client.executeWorkflow('workflow-id', {
input: {
documents: [file], // Must match your workflow's "files" field name
query: 'Summarize this document'
}
});
```
</Tab>
</Tabs>
// Attach to button click
document.getElementById('executeBtn')?.addEventListener('click', executeClientSideWorkflow);

View File

@@ -22,9 +22,17 @@ The API trigger exposes your workflow as a secure HTTP endpoint. Send JSON data
/>
</div>
Add an **Input Format** field for each parameter. Runtime output keys mirror the schema and are also available under `<api.input>`.
Add an **Input Format** field for each parameter. Supported types:
Manual runs in the editor use the `value` column so you can test without sending a request. During execution the resolver populates both `<api.userId>` and `<api.input.userId>`.
- **string** - Text values
- **number** - Numeric values
- **boolean** - True/false values
- **json** - JSON objects
- **files** - File uploads (access via `<api.fieldName[0].url>`, `<api.fieldName[0].name>`, etc.)
Runtime output keys mirror the schema and are available under `<api.input>`.
Manual runs in the editor use the `value` column so you can test without sending a request. During execution the resolver populates both `<api.fieldName>` and `<api.input.fieldName>`.
## Request Example
@@ -123,6 +131,53 @@ data: {"blockId":"agent1-uuid","chunk":" complete"}
| `<api.field>` | Field defined in the Input Format |
| `<api.input>` | Entire structured request body |
### File Upload Format
The API accepts files in two formats:
**1. Base64-encoded files** (recommended for SDKs):
```json
{
"documents": [{
"type": "file",
"data": "data:application/pdf;base64,JVBERi0xLjQK...",
"name": "document.pdf",
"mime": "application/pdf"
}]
}
```
- Maximum file size: 20MB per file
- Files are uploaded to cloud storage and converted to UserFile objects with all properties
**2. Direct URL references**:
```json
{
"documents": [{
"type": "url",
"data": "https://example.com/document.pdf",
"name": "document.pdf",
"mime": "application/pdf"
}]
}
```
- File is not uploaded, URL is passed through directly
- Useful for referencing existing files
### File Properties
For files, access all properties:
| Property | Description | Type |
|----------|-------------|------|
| `<api.fieldName[0].url>` | Signed download URL | string |
| `<api.fieldName[0].name>` | Original filename | string |
| `<api.fieldName[0].size>` | File size in bytes | number |
| `<api.fieldName[0].type>` | MIME type | string |
| `<api.fieldName[0].uploadedAt>` | Upload timestamp (ISO 8601) | string |
| `<api.fieldName[0].expiresAt>` | URL expiry timestamp (ISO 8601) | string |
For URL-referenced files, the same properties are available except `uploadedAt` and `expiresAt` since the file is not uploaded to our storage.
If no Input Format is defined, the executor exposes the raw JSON at `<api.input>` only.
<Callout type="warning">

View File

@@ -24,13 +24,24 @@ The Chat trigger creates a conversational interface for your workflow. Deploy yo
The trigger writes three fields that downstream blocks can reference:
| Reference | Description |
|-----------|-------------|
| `<chat.input>` | Latest user message |
| `<chat.conversationId>` | Conversation thread ID |
| `<chat.files>` | Optional uploaded files |
| Reference | Description | Type |
|-----------|-------------|------|
| `<chat.input>` | Latest user message | string |
| `<chat.conversationId>` | Conversation thread ID | string |
| `<chat.files>` | Optional uploaded files | files array |
Files include `name`, `mimeType`, and a signed download `url`.
### File Properties
Access individual file properties using array indexing:
| Property | Description | Type |
|----------|-------------|------|
| `<chat.files[0].url>` | Signed download URL | string |
| `<chat.files[0].name>` | Original filename | string |
| `<chat.files[0].size>` | File size in bytes | number |
| `<chat.files[0].type>` | MIME type | string |
| `<chat.files[0].uploadedAt>` | Upload timestamp (ISO 8601) | string |
| `<chat.files[0].expiresAt>` | URL expiry timestamp (ISO 8601) | string |
## Usage Notes

View File

@@ -1116,12 +1116,20 @@ export function createMockDatabase(options: MockDatabaseOptions = {}) {
const createUpdateChain = () => ({
set: vi.fn().mockImplementation(() => ({
where: vi.fn().mockImplementation(() => {
if (updateOptions.throwError) {
return Promise.reject(createDbError('update', updateOptions.errorMessage))
}
return Promise.resolve(updateOptions.results)
}),
where: vi.fn().mockImplementation(() => ({
returning: vi.fn().mockImplementation(() => {
if (updateOptions.throwError) {
return Promise.reject(createDbError('update', updateOptions.errorMessage))
}
return Promise.resolve(updateOptions.results)
}),
then: vi.fn().mockImplementation((resolve) => {
if (updateOptions.throwError) {
return Promise.reject(createDbError('update', updateOptions.errorMessage))
}
return Promise.resolve(updateOptions.results).then(resolve)
}),
})),
})),
})

View File

@@ -3,6 +3,7 @@ import { userStats } from '@sim/db/schema'
import { eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { checkAndBillOverageThreshold } from '@/lib/billing/threshold-billing'
import { checkInternalApiKey } from '@/lib/copilot/utils'
import { isBillingEnabled } from '@/lib/environment'
import { createLogger } from '@/lib/logs/console/logger'
@@ -148,6 +149,9 @@ export async function POST(req: NextRequest) {
addedTokens: totalTokens,
})
// Check if user has hit overage threshold and bill incrementally
await checkAndBillOverageThreshold(userId)
const duration = Date.now() - startTime
logger.info(`[${requestId}] Cost update completed successfully`, {

View File

@@ -6,7 +6,7 @@ import { z } from 'zod'
import { renderOTPEmail } from '@/components/emails/render-email'
import { sendEmail } from '@/lib/email/mailer'
import { createLogger } from '@/lib/logs/console/logger'
import { getRedisClient, markMessageAsProcessed, releaseLock } from '@/lib/redis'
import { getRedisClient } from '@/lib/redis'
import { generateRequestId } from '@/lib/utils'
import { addCorsHeaders, setChatAuthCookie } from '@/app/api/chat/utils'
import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/utils'
@@ -21,83 +21,52 @@ function generateOTP() {
// We use 15 minutes (900 seconds) expiry for OTPs
const OTP_EXPIRY = 15 * 60
// Store OTP in Redis
async function storeOTP(email: string, chatId: string, otp: string): Promise<void> {
async function storeOTP(email: string, chatId: string, otp: string): Promise<boolean> {
const key = `otp:${email}:${chatId}`
const redis = getRedisClient()
if (redis) {
// Use Redis if available
await redis.set(key, otp, 'EX', OTP_EXPIRY)
} else {
// Use the existing function as fallback to mark that an OTP exists
await markMessageAsProcessed(key, OTP_EXPIRY)
if (!redis) {
logger.warn('Redis not available, OTP functionality requires Redis')
return false
}
// For the fallback case, we need to handle storing the OTP value separately
// since markMessageAsProcessed only stores "1"
const valueKey = `${key}:value`
try {
// Access the in-memory cache directly - hacky but works for fallback
const inMemoryCache = (global as any).inMemoryCache
if (inMemoryCache) {
const fullKey = `processed:${valueKey}`
const expiry = OTP_EXPIRY ? Date.now() + OTP_EXPIRY * 1000 : null
inMemoryCache.set(fullKey, { value: otp, expiry })
}
} catch (error) {
logger.error('Error storing OTP in fallback cache:', error)
}
try {
await redis.set(key, otp, 'EX', OTP_EXPIRY)
return true
} catch (error) {
logger.error('Error storing OTP in Redis:', error)
return false
}
}
// Get OTP from Redis
async function getOTP(email: string, chatId: string): Promise<string | null> {
const key = `otp:${email}:${chatId}`
const redis = getRedisClient()
if (redis) {
// Use Redis if available
return await redis.get(key)
if (!redis) {
return null
}
// Use the existing function as fallback - check if it exists
const exists = await new Promise((resolve) => {
try {
// Check the in-memory cache directly - hacky but works for fallback
const inMemoryCache = (global as any).inMemoryCache
const fullKey = `processed:${key}`
const cacheEntry = inMemoryCache?.get(fullKey)
resolve(!!cacheEntry)
} catch {
resolve(false)
}
})
if (!exists) return null
// Try to get the value key
const valueKey = `${key}:value`
try {
const inMemoryCache = (global as any).inMemoryCache
const fullKey = `processed:${valueKey}`
const cacheEntry = inMemoryCache?.get(fullKey)
return cacheEntry?.value || null
} catch {
return await redis.get(key)
} catch (error) {
logger.error('Error getting OTP from Redis:', error)
return null
}
}
// Delete OTP from Redis
async function deleteOTP(email: string, chatId: string): Promise<void> {
const key = `otp:${email}:${chatId}`
const redis = getRedisClient()
if (redis) {
// Use Redis if available
if (!redis) {
return
}
try {
await redis.del(key)
} else {
// Use the existing function as fallback
await releaseLock(`processed:${key}`)
await releaseLock(`processed:${key}:value`)
} catch (error) {
logger.error('Error deleting OTP from Redis:', error)
}
}
@@ -177,7 +146,17 @@ export async function POST(
const otp = generateOTP()
await storeOTP(email, deployment.id, otp)
const stored = await storeOTP(email, deployment.id, otp)
if (!stored) {
logger.error(`[${requestId}] Failed to store OTP - Redis unavailable`)
return addCorsHeaders(
createErrorResponse(
'Email verification temporarily unavailable, please try again later',
503
),
request
)
}
const emailHtml = await renderOTPEmail(
otp,

View File

@@ -6,6 +6,7 @@ import { createLogger } from '@/lib/logs/console/logger'
import { generateRequestId } from '@/lib/utils'
import {
addCorsHeaders,
processChatFiles,
setChatAuthCookie,
validateAuthToken,
validateChatAuth,
@@ -75,7 +76,7 @@ export async function POST(
}
// Use the already parsed body
const { input, password, email, conversationId } = parsedBody
const { input, password, email, conversationId, files } = parsedBody
// If this is an authentication request (has password or email but no input),
// set auth cookie and return success
@@ -88,8 +89,8 @@ export async function POST(
return response
}
// For chat messages, create regular response
if (!input) {
// For chat messages, create regular response (allow empty input if files are present)
if (!input && (!files || files.length === 0)) {
return addCorsHeaders(createErrorResponse('No input provided', 400), request)
}
@@ -108,7 +109,6 @@ export async function POST(
}
try {
// Transform outputConfigs to selectedOutputs format (blockId_attribute format)
const selectedOutputs: string[] = []
if (deployment.outputConfigs && Array.isArray(deployment.outputConfigs)) {
for (const config of deployment.outputConfigs) {
@@ -123,11 +123,30 @@ export async function POST(
const { SSE_HEADERS } = await import('@/lib/utils')
const { createFilteredResult } = await import('@/app/api/workflows/[id]/execute/route')
const workflowInput: any = { input, conversationId }
if (files && Array.isArray(files) && files.length > 0) {
logger.debug(`[${requestId}] Processing ${files.length} attached files`)
const executionId = crypto.randomUUID()
const executionContext = {
workspaceId: deployment.userId,
workflowId: deployment.workflowId,
executionId,
}
const uploadedFiles = await processChatFiles(files, executionContext, requestId)
if (uploadedFiles.length > 0) {
workflowInput.files = uploadedFiles
logger.info(`[${requestId}] Successfully processed ${uploadedFiles.length} files`)
}
}
const stream = await createStreamingResponse({
requestId,
workflow: { id: deployment.workflowId, userId: deployment.userId, isDeployed: true },
input: { input, conversationId }, // Format for chat_trigger
executingUserId: deployment.userId, // Use workflow owner's ID for chat deployments
input: workflowInput,
executingUserId: deployment.userId,
streamConfig: {
selectedOutputs,
isSecureMode: true,

View File

@@ -6,6 +6,8 @@ import { isDev } from '@/lib/environment'
import { createLogger } from '@/lib/logs/console/logger'
import { hasAdminPermission } from '@/lib/permissions/utils'
import { decryptSecret } from '@/lib/utils'
import { uploadExecutionFile } from '@/lib/workflows/execution-file-storage'
import type { UserFile } from '@/executor/types'
const logger = createLogger('ChatAuthUtils')
@@ -263,3 +265,61 @@ export async function validateChatAuth(
// Unknown auth type
return { authorized: false, error: 'Unsupported authentication type' }
}
/**
* Process and upload chat files to execution storage
* Handles both base64 dataUrl format and direct URL pass-through
*/
export async function processChatFiles(
files: Array<{ dataUrl?: string; url?: string; name: string; type: string }>,
executionContext: { workspaceId: string; workflowId: string; executionId: string },
requestId: string
): Promise<UserFile[]> {
const uploadedFiles: UserFile[] = []
for (const file of files) {
try {
if (file.dataUrl) {
const dataUrlPrefix = 'data:'
const base64Prefix = ';base64,'
if (!file.dataUrl.startsWith(dataUrlPrefix)) {
logger.warn(`[${requestId}] Invalid dataUrl format for file: ${file.name}`)
continue
}
const base64Index = file.dataUrl.indexOf(base64Prefix)
if (base64Index === -1) {
logger.warn(
`[${requestId}] Invalid dataUrl format (no base64 marker) for file: ${file.name}`
)
continue
}
const mimeType = file.dataUrl.substring(dataUrlPrefix.length, base64Index)
const base64Data = file.dataUrl.substring(base64Index + base64Prefix.length)
const buffer = Buffer.from(base64Data, 'base64')
logger.debug(`[${requestId}] Uploading file to S3: ${file.name} (${buffer.length} bytes)`)
const userFile = await uploadExecutionFile(
executionContext,
buffer,
file.name,
mimeType || file.type
)
uploadedFiles.push(userFile)
logger.debug(`[${requestId}] Successfully uploaded ${file.name} with URL: ${userFile.url}`)
} else if (file.url) {
uploadedFiles.push(file as UserFile)
logger.debug(`[${requestId}] Using existing URL for file: ${file.name}`)
}
} catch (error) {
logger.error(`[${requestId}] Failed to process file ${file.name}:`, error)
throw new Error(`Failed to upload file: ${file.name}`)
}
}
return uploadedFiles
}

View File

@@ -1,10 +1,11 @@
import { readFile } from 'fs/promises'
import type { NextRequest, NextResponse } from 'next/server'
import type { NextRequest } from 'next/server'
import { NextResponse } from 'next/server'
import { checkHybridAuth } from '@/lib/auth/hybrid'
import { createLogger } from '@/lib/logs/console/logger'
import { downloadFile, getStorageProvider, isUsingCloudStorage } from '@/lib/uploads'
import { S3_KB_CONFIG } from '@/lib/uploads/setup'
import '@/lib/uploads/setup.server'
import {
createErrorResponse,
createFileResponse,
@@ -15,9 +16,6 @@ import {
const logger = createLogger('FilesServeAPI')
/**
* Main API route handler for serving files
*/
export async function GET(
request: NextRequest,
{ params }: { params: Promise<{ path: string[] }> }
@@ -31,27 +29,26 @@ export async function GET(
logger.info('File serve request:', { path })
// Join the path segments to get the filename or cloud key
const fullPath = path.join('/')
const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
// Check if this is a cloud file (path starts with 's3/' or 'blob/')
if (!authResult.success) {
logger.warn('Unauthorized file access attempt', { path, error: authResult.error })
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = authResult.userId
const fullPath = path.join('/')
const isS3Path = path[0] === 's3'
const isBlobPath = path[0] === 'blob'
const isCloudPath = isS3Path || isBlobPath
const cloudKey = isCloudPath ? path.slice(1).join('/') : fullPath
// Use cloud handler if in production, path explicitly specifies cloud storage, or we're using cloud storage
if (isUsingCloudStorage() || isCloudPath) {
// Extract the actual key (remove 's3/' or 'blob/' prefix if present)
const cloudKey = isCloudPath ? path.slice(1).join('/') : fullPath
// Get bucket type from query parameter
const bucketType = request.nextUrl.searchParams.get('bucket')
return await handleCloudProxy(cloudKey, bucketType)
return await handleCloudProxy(cloudKey, bucketType, userId)
}
// Use local handler for local files
return await handleLocalFile(fullPath)
return await handleLocalFile(fullPath, userId)
} catch (error) {
logger.error('Error serving file:', error)
@@ -63,10 +60,7 @@ export async function GET(
}
}
/**
* Handle local file serving
*/
async function handleLocalFile(filename: string): Promise<NextResponse> {
async function handleLocalFile(filename: string, userId?: string): Promise<NextResponse> {
try {
const filePath = findLocalFile(filename)
@@ -77,6 +71,8 @@ async function handleLocalFile(filename: string): Promise<NextResponse> {
const fileBuffer = await readFile(filePath)
const contentType = getContentType(filename)
logger.info('Local file served', { userId, filename, size: fileBuffer.length })
return createFileResponse({
buffer: fileBuffer,
contentType,
@@ -112,12 +108,10 @@ async function downloadKBFile(cloudKey: string): Promise<Buffer> {
throw new Error(`Unsupported storage provider for KB files: ${storageProvider}`)
}
/**
* Proxy cloud file through our server
*/
async function handleCloudProxy(
cloudKey: string,
bucketType?: string | null
bucketType?: string | null,
userId?: string
): Promise<NextResponse> {
try {
// Check if this is a KB file (starts with 'kb/')
@@ -156,6 +150,13 @@ async function handleCloudProxy(
const originalFilename = cloudKey.split('/').pop() || 'download'
const contentType = getContentType(originalFilename)
logger.info('Cloud file served', {
userId,
key: cloudKey,
size: fileBuffer.length,
bucket: bucketType || 'default',
})
return createFileResponse({
buffer: fileBuffer,
contentType,

View File

@@ -123,8 +123,7 @@ export async function POST(request: NextRequest) {
}
}
// Create the serve path
const servePath = `/api/files/serve/${result.key}`
const servePath = result.path
const uploadResult = {
name: originalName,

View File

@@ -307,6 +307,22 @@ function getSecureFileHeaders(filename: string, originalContentType: string) {
}
}
/**
* Encode filename for Content-Disposition header to support non-ASCII characters
* Uses RFC 5987 encoding for international characters
*/
function encodeFilenameForHeader(filename: string): string {
const hasNonAscii = /[^\x00-\x7F]/.test(filename)
if (!hasNonAscii) {
return `filename="${filename}"`
}
const encodedFilename = encodeURIComponent(filename)
const asciiSafe = filename.replace(/[^\x00-\x7F]/g, '_')
return `filename="${asciiSafe}"; filename*=UTF-8''${encodedFilename}`
}
/**
* Create a file response with appropriate security headers
*/
@@ -317,7 +333,7 @@ export function createFileResponse(file: FileResponse): NextResponse {
status: 200,
headers: {
'Content-Type': contentType,
'Content-Disposition': `${disposition}; filename="${file.filename}"`,
'Content-Disposition': `${disposition}; ${encodeFilenameForHeader(file.filename)}`,
'Cache-Control': 'public, max-age=31536000', // Cache for 1 year
'X-Content-Type-Options': 'nosniff',
'Content-Security-Policy': "default-src 'none'; style-src 'unsafe-inline'; sandbox;",

View File

@@ -3,6 +3,7 @@ import { userStats, workflow } from '@sim/db/schema'
import { eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import OpenAI, { AzureOpenAI } from 'openai'
import { checkAndBillOverageThreshold } from '@/lib/billing/threshold-billing'
import { env } from '@/lib/env'
import { getCostMultiplier, isBillingEnabled } from '@/lib/environment'
import { createLogger } from '@/lib/logs/console/logger'
@@ -133,6 +134,9 @@ async function updateUserStatsForWand(
tokensUsed: totalTokens,
costAdded: costToStore,
})
// Check if user has hit overage threshold and bill incrementally
await checkAndBillOverageThreshold(userId)
} catch (error) {
logger.error(`[${requestId}] Failed to update user stats for wand usage`, error)
}

View File

@@ -282,11 +282,13 @@ export async function DELETE(
if (!resolvedExternalId) {
try {
const requestOrigin = new URL(request.url).origin
const effectiveOrigin = requestOrigin.includes('localhost')
? env.NEXT_PUBLIC_APP_URL || requestOrigin
: requestOrigin
const expectedNotificationUrl = `${effectiveOrigin}/api/webhooks/trigger/${foundWebhook.path}`
if (!env.NEXT_PUBLIC_APP_URL) {
logger.error(
`[${requestId}] NEXT_PUBLIC_APP_URL not configured, cannot match Airtable webhook`
)
throw new Error('NEXT_PUBLIC_APP_URL must be configured')
}
const expectedNotificationUrl = `${env.NEXT_PUBLIC_APP_URL}/api/webhooks/trigger/${foundWebhook.path}`
const listUrl = `https://api.airtable.com/v0/bases/${baseId}/webhooks`
const listResp = await fetch(listUrl, {

View File

@@ -64,13 +64,13 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
return NextResponse.json({ error: 'Forbidden' }, { status: 403 })
}
const origin = new URL(request.url).origin
const effectiveOrigin = origin.includes('localhost')
? env.NEXT_PUBLIC_APP_URL || origin
: origin
if (!env.NEXT_PUBLIC_APP_URL) {
logger.error(`[${requestId}] NEXT_PUBLIC_APP_URL not configured`)
return NextResponse.json({ error: 'Server configuration error' }, { status: 500 })
}
const token = await signTestWebhookToken(id, ttlSeconds)
const url = `${effectiveOrigin}/api/webhooks/test/${id}?token=${encodeURIComponent(token)}`
const url = `${env.NEXT_PUBLIC_APP_URL}/api/webhooks/test/${id}?token=${encodeURIComponent(token)}`
logger.info(`[${requestId}] Minted test URL for webhook ${id}`)
return NextResponse.json({

View File

@@ -432,25 +432,20 @@ async function createAirtableWebhookSubscription(
logger.warn(
`[${requestId}] Could not retrieve Airtable access token for user ${userId}. Cannot create webhook in Airtable.`
)
// Instead of silently returning, throw an error with clear user guidance
throw new Error(
'Airtable account connection required. Please connect your Airtable account in the trigger configuration and try again.'
)
}
const requestOrigin = new URL(request.url).origin
// Ensure origin does not point to localhost for external API calls
const effectiveOrigin = requestOrigin.includes('localhost')
? env.NEXT_PUBLIC_APP_URL || requestOrigin // Use env var if available, fallback to original
: requestOrigin
const notificationUrl = `${effectiveOrigin}/api/webhooks/trigger/${path}`
if (effectiveOrigin !== requestOrigin) {
logger.debug(
`[${requestId}] Remapped localhost origin to ${effectiveOrigin} for notificationUrl`
if (!env.NEXT_PUBLIC_APP_URL) {
logger.error(
`[${requestId}] NEXT_PUBLIC_APP_URL not configured, cannot register Airtable webhook`
)
throw new Error('NEXT_PUBLIC_APP_URL must be configured for Airtable webhook registration')
}
const notificationUrl = `${env.NEXT_PUBLIC_APP_URL}/api/webhooks/trigger/${path}`
const airtableApiUrl = `https://api.airtable.com/v0/bases/${baseId}/webhooks`
const specification: any = {
@@ -549,19 +544,15 @@ async function createTelegramWebhookSubscription(
return // Cannot proceed without botToken
}
const requestOrigin = new URL(request.url).origin
// Ensure origin does not point to localhost for external API calls
const effectiveOrigin = requestOrigin.includes('localhost')
? env.NEXT_PUBLIC_APP_URL || requestOrigin // Use env var if available, fallback to original
: requestOrigin
const notificationUrl = `${effectiveOrigin}/api/webhooks/trigger/${path}`
if (effectiveOrigin !== requestOrigin) {
logger.debug(
`[${requestId}] Remapped localhost origin to ${effectiveOrigin} for notificationUrl`
if (!env.NEXT_PUBLIC_APP_URL) {
logger.error(
`[${requestId}] NEXT_PUBLIC_APP_URL not configured, cannot register Telegram webhook`
)
throw new Error('NEXT_PUBLIC_APP_URL must be configured for Telegram webhook registration')
}
const notificationUrl = `${env.NEXT_PUBLIC_APP_URL}/api/webhooks/trigger/${path}`
const telegramApiUrl = `https://api.telegram.org/bot${botToken}/setWebhook`
const requestBody: any = {

View File

@@ -2,6 +2,7 @@ import { db } from '@sim/db'
import { webhook } from '@sim/db/schema'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { env } from '@/lib/env'
import { createLogger } from '@/lib/logs/console/logger'
import { generateRequestId } from '@/lib/utils'
@@ -13,7 +14,6 @@ export async function GET(request: NextRequest) {
const requestId = generateRequestId()
try {
// Get the webhook ID and provider from the query parameters
const { searchParams } = new URL(request.url)
const webhookId = searchParams.get('id')
@@ -24,7 +24,6 @@ export async function GET(request: NextRequest) {
logger.debug(`[${requestId}] Testing webhook with ID: ${webhookId}`)
// Find the webhook in the database
const webhooks = await db.select().from(webhook).where(eq(webhook.id, webhookId)).limit(1)
if (webhooks.length === 0) {
@@ -36,8 +35,14 @@ export async function GET(request: NextRequest) {
const provider = foundWebhook.provider || 'generic'
const providerConfig = (foundWebhook.providerConfig as Record<string, any>) || {}
// Construct the webhook URL
const baseUrl = new URL(request.url).origin
if (!env.NEXT_PUBLIC_APP_URL) {
logger.error(`[${requestId}] NEXT_PUBLIC_APP_URL not configured, cannot test webhook`)
return NextResponse.json(
{ success: false, error: 'NEXT_PUBLIC_APP_URL must be configured' },
{ status: 500 }
)
}
const baseUrl = env.NEXT_PUBLIC_APP_URL
const webhookUrl = `${baseUrl}/api/webhooks/trigger/${foundWebhook.path}`
logger.info(`[${requestId}] Testing webhook for provider: ${provider}`, {
@@ -46,7 +51,6 @@ export async function GET(request: NextRequest) {
isActive: foundWebhook.isActive,
})
// Provider-specific test logic
switch (provider) {
case 'whatsapp': {
const verificationToken = providerConfig.verificationToken
@@ -59,10 +63,8 @@ export async function GET(request: NextRequest) {
)
}
// Generate a test challenge
const challenge = `test_${Date.now()}`
// Construct the WhatsApp verification URL
const whatsappUrl = `${webhookUrl}?hub.mode=subscribe&hub.verify_token=${verificationToken}&hub.challenge=${challenge}`
logger.debug(`[${requestId}] Testing WhatsApp webhook verification`, {
@@ -70,19 +72,16 @@ export async function GET(request: NextRequest) {
challenge,
})
// Make a request to the webhook endpoint
const response = await fetch(whatsappUrl, {
headers: {
'User-Agent': 'facebookplatform/1.0',
},
})
// Get the response details
const status = response.status
const contentType = response.headers.get('content-type')
const responseText = await response.text()
// Check if the test was successful
const success = status === 200 && responseText === challenge
if (success) {
@@ -139,7 +138,6 @@ export async function GET(request: NextRequest) {
)
}
// Test the webhook endpoint with a simple message to check if it's reachable
const testMessage = {
update_id: 12345,
message: {
@@ -165,7 +163,6 @@ export async function GET(request: NextRequest) {
url: webhookUrl,
})
// Make a test request to the webhook endpoint
const response = await fetch(webhookUrl, {
method: 'POST',
headers: {
@@ -175,16 +172,12 @@ export async function GET(request: NextRequest) {
body: JSON.stringify(testMessage),
})
// Get the response details
const status = response.status
let responseText = ''
try {
responseText = await response.text()
} catch (_e) {
// Ignore if we can't get response text
}
} catch (_e) {}
// Consider success if we get a 2xx response
const success = status >= 200 && status < 300
if (success) {
@@ -196,7 +189,6 @@ export async function GET(request: NextRequest) {
})
}
// Get webhook info from Telegram API
let webhookInfo = null
try {
const webhookInfoUrl = `https://api.telegram.org/bot${botToken}/getWebhookInfo`
@@ -215,7 +207,6 @@ export async function GET(request: NextRequest) {
logger.warn(`[${requestId}] Failed to get Telegram webhook info`, e)
}
// Format the curl command for testing
const curlCommand = [
`curl -X POST "${webhookUrl}"`,
`-H "Content-Type: application/json"`,
@@ -288,16 +279,13 @@ export async function GET(request: NextRequest) {
}
case 'generic': {
// Get the general webhook configuration
const token = providerConfig.token
const secretHeaderName = providerConfig.secretHeaderName
const requireAuth = providerConfig.requireAuth
const allowedIps = providerConfig.allowedIps
// Generate sample curl command for testing
let curlCommand = `curl -X POST "${webhookUrl}" -H "Content-Type: application/json"`
// Add auth headers to the curl command if required
if (requireAuth && token) {
if (secretHeaderName) {
curlCommand += ` -H "${secretHeaderName}: ${token}"`
@@ -306,7 +294,6 @@ export async function GET(request: NextRequest) {
}
}
// Add a sample payload
curlCommand += ` -d '{"event":"test_event","timestamp":"${new Date().toISOString()}"}'`
logger.info(`[${requestId}] General webhook test successful: ${webhookId}`)
@@ -391,7 +378,6 @@ export async function GET(request: NextRequest) {
})
}
// Add the Airtable test case
case 'airtable': {
const baseId = providerConfig.baseId
const tableId = providerConfig.tableId
@@ -408,7 +394,6 @@ export async function GET(request: NextRequest) {
)
}
// Define a sample payload structure
const samplePayload = {
webhook: {
id: 'whiYOUR_WEBHOOK_ID',
@@ -418,16 +403,15 @@ export async function GET(request: NextRequest) {
},
payloadFormat: 'v0',
actionMetadata: {
source: 'tableOrViewChange', // Example source
source: 'tableOrViewChange',
sourceMetadata: {},
},
payloads: [
{
timestamp: new Date().toISOString(),
baseTransactionNumber: Date.now(), // Example transaction number
baseTransactionNumber: Date.now(),
changedTablesById: {
[tableId]: {
// Example changes - structure may vary based on actual event
changedRecordsById: {
recSAMPLEID1: {
current: { cellValuesByFieldId: { fldSAMPLEID: 'New Value' } },
@@ -442,7 +426,6 @@ export async function GET(request: NextRequest) {
],
}
// Generate sample curl command
let curlCommand = `curl -X POST "${webhookUrl}" -H "Content-Type: application/json"`
curlCommand += ` -d '${JSON.stringify(samplePayload, null, 2)}'`
@@ -519,7 +502,6 @@ export async function GET(request: NextRequest) {
}
default: {
// Generic webhook test
logger.info(`[${requestId}] Generic webhook test successful: ${webhookId}`)
return NextResponse.json({
success: true,

View File

@@ -1,17 +1,14 @@
import { db } from '@sim/db'
import { workflow as workflowTable } from '@sim/db/schema'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { createLogger } from '@/lib/logs/console/logger'
import { getUserEntityPermissions } from '@/lib/permissions/utils'
import { generateRequestId } from '@/lib/utils'
import { applyAutoLayout } from '@/lib/workflows/autolayout'
import {
loadWorkflowFromNormalizedTables,
type NormalizedWorkflowData,
} from '@/lib/workflows/db-helpers'
import { getWorkflowAccessContext } from '@/lib/workflows/utils'
export const dynamic = 'force-dynamic'
@@ -77,11 +74,8 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
})
// Fetch the workflow to check ownership/access
const workflowData = await db
.select()
.from(workflowTable)
.where(eq(workflowTable.id, workflowId))
.then((rows) => rows[0])
const accessContext = await getWorkflowAccessContext(workflowId, userId)
const workflowData = accessContext?.workflow
if (!workflowData) {
logger.warn(`[${requestId}] Workflow ${workflowId} not found for autolayout`)
@@ -89,24 +83,12 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
}
// Check if user has permission to update this workflow
let canUpdate = false
// Case 1: User owns the workflow
if (workflowData.userId === userId) {
canUpdate = true
}
// Case 2: Workflow belongs to a workspace and user has write or admin permission
if (!canUpdate && workflowData.workspaceId) {
const userPermission = await getUserEntityPermissions(
userId,
'workspace',
workflowData.workspaceId
)
if (userPermission === 'write' || userPermission === 'admin') {
canUpdate = true
}
}
const canUpdate =
accessContext?.isOwner ||
(workflowData.workspaceId
? accessContext?.workspacePermission === 'write' ||
accessContext?.workspacePermission === 'admin'
: false)
if (!canUpdate) {
logger.warn(

View File

@@ -47,17 +47,17 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
// Duplicate workflow and all related data in a transaction
const result = await db.transaction(async (tx) => {
// First verify the source workflow exists
const sourceWorkflow = await tx
const sourceWorkflowRow = await tx
.select()
.from(workflow)
.where(eq(workflow.id, sourceWorkflowId))
.limit(1)
if (sourceWorkflow.length === 0) {
if (sourceWorkflowRow.length === 0) {
throw new Error('Source workflow not found')
}
const source = sourceWorkflow[0]
const source = sourceWorkflowRow[0]
// Check if user has permission to access the source workflow
let canAccessSource = false

View File

@@ -23,7 +23,11 @@ import {
workflowHasResponseBlock,
} from '@/lib/workflows/utils'
import { validateWorkflowAccess } from '@/app/api/workflows/middleware'
import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/utils'
import {
createErrorResponse,
createSuccessResponse,
processApiWorkflowField,
} from '@/app/api/workflows/utils'
import { Executor } from '@/executor'
import type { ExecutionResult } from '@/executor/types'
import { Serializer } from '@/serializer'
@@ -74,16 +78,13 @@ function resolveOutputIds(
return selectedOutputs
}
// UUID regex to detect if it's already in blockId_attribute format
const UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/i
return selectedOutputs.map((outputId) => {
// If it starts with a UUID, it's already in blockId_attribute format (from chat deployments)
if (UUID_REGEX.test(outputId)) {
return outputId
}
// Otherwise, it's in blockName.path format from the user/API
const dotIndex = outputId.indexOf('.')
if (dotIndex === -1) {
logger.warn(`Invalid output ID format (missing dot): ${outputId}`)
@@ -93,7 +94,6 @@ function resolveOutputIds(
const blockName = outputId.substring(0, dotIndex)
const path = outputId.substring(dotIndex + 1)
// Find the block by name (case-insensitive, ignoring spaces)
const normalizedBlockName = blockName.toLowerCase().replace(/\s+/g, '')
const block = Object.values(blocks).find((b: any) => {
const normalized = (b.name || '').toLowerCase().replace(/\s+/g, '')
@@ -137,9 +137,6 @@ export async function executeWorkflow(
const loggingSession = new LoggingSession(workflowId, executionId, 'api', requestId)
// Rate limiting is now handled before entering the sync queue
// Check if the actor has exceeded their usage limits
const usageCheck = await checkServerSideUsageLimits(actorUserId)
if (usageCheck.isExceeded) {
logger.warn(`[${requestId}] User ${workflow.userId} has exceeded usage limits`, {
@@ -151,13 +148,11 @@ export async function executeWorkflow(
)
}
// Log input to help debug
logger.info(
`[${requestId}] Executing workflow with input:`,
input ? JSON.stringify(input, null, 2) : 'No input provided'
)
// Use input directly for API workflows
const processedInput = input
logger.info(
`[${requestId}] Using input directly for workflow:`,
@@ -168,10 +163,7 @@ export async function executeWorkflow(
runningExecutions.add(executionKey)
logger.info(`[${requestId}] Starting workflow execution: ${workflowId}`)
// Load workflow data from deployed state for API executions
const deployedData = await loadDeployedWorkflowState(workflowId)
// Use deployed data as primary source for API executions
const { blocks, edges, loops, parallels } = deployedData
logger.info(`[${requestId}] Using deployed state for workflow execution: ${workflowId}`)
logger.debug(`[${requestId}] Deployed data loaded:`, {
@@ -181,10 +173,8 @@ export async function executeWorkflow(
parallelsCount: Object.keys(parallels || {}).length,
})
// Use the same execution flow as in scheduled executions
const mergedStates = mergeSubblockState(blocks)
// Load personal (for the executing user) and workspace env (workspace overrides personal)
const { personalEncrypted, workspaceEncrypted } = await getPersonalAndWorkspaceEnv(
actorUserId,
workflow.workspaceId || undefined
@@ -197,7 +187,6 @@ export async function executeWorkflow(
variables,
})
// Replace environment variables in the block states
const currentBlockStates = await Object.entries(mergedStates).reduce(
async (accPromise, [id, block]) => {
const acc = await accPromise
@@ -206,13 +195,11 @@ export async function executeWorkflow(
const subAcc = await subAccPromise
let value = subBlock.value
// If the value is a string and contains environment variable syntax
if (typeof value === 'string' && value.includes('{{') && value.includes('}}')) {
const matches = value.match(/{{([^}]+)}}/g)
if (matches) {
// Process all matches sequentially
for (const match of matches) {
const varName = match.slice(2, -2) // Remove {{ and }}
const varName = match.slice(2, -2)
const encryptedValue = variables[varName]
if (!encryptedValue) {
throw new Error(`Environment variable "${varName}" was not found`)
@@ -244,7 +231,6 @@ export async function executeWorkflow(
Promise.resolve({} as Record<string, Record<string, any>>)
)
// Create a map of decrypted environment variables
const decryptedEnvVars: Record<string, string> = {}
for (const [key, encryptedValue] of Object.entries(variables)) {
try {
@@ -256,22 +242,17 @@ export async function executeWorkflow(
}
}
// Process the block states to ensure response formats are properly parsed
const processedBlockStates = Object.entries(currentBlockStates).reduce(
(acc, [blockId, blockState]) => {
// Check if this block has a responseFormat that needs to be parsed
if (blockState.responseFormat && typeof blockState.responseFormat === 'string') {
const responseFormatValue = blockState.responseFormat.trim()
// Check for variable references like <start.input>
if (responseFormatValue.startsWith('<') && responseFormatValue.includes('>')) {
logger.debug(
`[${requestId}] Response format contains variable reference for block ${blockId}`
)
// Keep variable references as-is - they will be resolved during execution
acc[blockId] = blockState
} else if (responseFormatValue === '') {
// Empty string - remove response format
acc[blockId] = {
...blockState,
responseFormat: undefined,
@@ -279,7 +260,6 @@ export async function executeWorkflow(
} else {
try {
logger.debug(`[${requestId}] Parsing responseFormat for block ${blockId}`)
// Attempt to parse the responseFormat if it's a string
const parsedResponseFormat = JSON.parse(responseFormatValue)
acc[blockId] = {
@@ -291,7 +271,6 @@ export async function executeWorkflow(
`[${requestId}] Failed to parse responseFormat for block ${blockId}, using undefined`,
error
)
// Set to undefined instead of keeping malformed JSON - this allows execution to continue
acc[blockId] = {
...blockState,
responseFormat: undefined,
@@ -306,7 +285,6 @@ export async function executeWorkflow(
{} as Record<string, Record<string, any>>
)
// Get workflow variables - they are stored as JSON objects in the database
const workflowVariables = (workflow.variables as Record<string, any>) || {}
if (Object.keys(workflowVariables).length > 0) {
@@ -317,20 +295,15 @@ export async function executeWorkflow(
logger.debug(`[${requestId}] No workflow variables found for: ${workflowId}`)
}
// Serialize and execute the workflow
logger.debug(`[${requestId}] Serializing workflow: ${workflowId}`)
const serializedWorkflow = new Serializer().serializeWorkflow(
mergedStates,
edges,
loops,
parallels,
true // Enable validation during execution
true
)
// Determine trigger start block based on execution type
// - 'chat': For chat deployments (looks for chat_trigger block)
// - 'api': For direct API execution (looks for api_trigger block)
// streamConfig is passed from POST handler when using streaming/chat
const preferredTriggerType = streamConfig?.workflowTriggerType || 'api'
const startBlock = TriggerUtils.findStartBlock(mergedStates, preferredTriggerType, false)
@@ -346,8 +319,6 @@ export async function executeWorkflow(
const startBlockId = startBlock.blockId
const triggerBlock = startBlock.block
// Check if the API trigger has any outgoing connections (except for legacy starter blocks)
// Legacy starter blocks have their own validation in the executor
if (triggerBlock.type !== 'starter') {
const outgoingConnections = serializedWorkflow.connections.filter(
(conn) => conn.source === startBlockId
@@ -358,14 +329,12 @@ export async function executeWorkflow(
}
}
// Build context extensions
const contextExtensions: any = {
executionId,
workspaceId: workflow.workspaceId,
isDeployedContext: true,
}
// Add streaming configuration if enabled
if (streamConfig?.enabled) {
contextExtensions.stream = true
contextExtensions.selectedOutputs = streamConfig.selectedOutputs || []
@@ -386,10 +355,8 @@ export async function executeWorkflow(
contextExtensions,
})
// Set up logging on the executor
loggingSession.setupExecutor(executor)
// Execute workflow (will always return ExecutionResult since we don't use onStream)
const result = (await executor.execute(workflowId, startBlockId)) as ExecutionResult
logger.info(`[${requestId}] Workflow execution completed: ${workflowId}`, {
@@ -397,14 +364,11 @@ export async function executeWorkflow(
executionTime: result.metadata?.duration,
})
// Build trace spans from execution result (works for both success and failure)
const { traceSpans, totalDuration } = buildTraceSpans(result)
// Update workflow run counts if execution was successful
if (result.success) {
await updateWorkflowRunCounts(workflowId)
// Track API call in user stats
await db
.update(userStats)
.set({
@@ -419,9 +383,9 @@ export async function executeWorkflow(
totalDurationMs: totalDuration || 0,
finalOutput: result.output || {},
traceSpans: (traceSpans || []) as any,
workflowInput: processedInput,
})
// For non-streaming, return the execution result
return result
} catch (error: any) {
logger.error(`[${requestId}] Workflow execution failed: ${workflowId}`, error)
@@ -461,22 +425,16 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
return createErrorResponse(validation.error.message, validation.error.status)
}
// Determine trigger type based on authentication
let triggerType: TriggerType = 'manual'
const session = await getSession()
if (!session?.user?.id) {
// Check for API key
const apiKeyHeader = request.headers.get('X-API-Key')
if (apiKeyHeader) {
triggerType = 'api'
}
}
// Note: Async execution is now handled in the POST handler below
// Synchronous execution
try {
// Resolve actor user id
let actorUserId: string | null = null
if (triggerType === 'manual') {
actorUserId = session!.user!.id
@@ -491,7 +449,6 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
void updateApiKeyLastUsed(auth.keyId).catch(() => {})
}
// Check rate limits BEFORE entering execution for API requests
const userSubscription = await getHighestPrioritySubscription(actorUserId)
const rateLimiter = new RateLimiter()
const rateLimitCheck = await rateLimiter.checkRateLimitWithSubscription(
@@ -514,13 +471,11 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
actorUserId as string
)
// Check if the workflow execution contains a response block output
const hasResponseBlock = workflowHasResponseBlock(result)
if (hasResponseBlock) {
return createHttpResponseFromBlock(result)
}
// Filter out logs and workflowConnections from the API response
const filteredResult = createFilteredResult(result)
return createSuccessResponse(filteredResult)
} catch (error: any) {
@@ -536,12 +491,10 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
} catch (error: any) {
logger.error(`[${requestId}] Error executing workflow: ${id}`, error)
// Check if this is a rate limit error
if (error instanceof RateLimitError) {
return createErrorResponse(error.message, error.statusCode, 'RATE_LIMIT_EXCEEDED')
}
// Check if this is a usage limit error
if (error instanceof UsageLimitError) {
return createErrorResponse(error.message, error.statusCode, 'USAGE_LIMIT_EXCEEDED')
}
@@ -566,18 +519,15 @@ export async function POST(
const workflowId = id
try {
// Validate workflow access
const validation = await validateWorkflowAccess(request as NextRequest, id)
if (validation.error) {
logger.warn(`[${requestId}] Workflow access validation failed: ${validation.error.message}`)
return createErrorResponse(validation.error.message, validation.error.status)
}
// Check execution mode from header
const executionMode = request.headers.get('X-Execution-Mode')
const isAsync = executionMode === 'async'
// Parse request body first to check for internal parameters
const body = await request.text()
logger.info(`[${requestId}] ${body ? 'Request body provided' : 'No request body provided'}`)
@@ -616,17 +566,78 @@ export async function POST(
streamResponse,
selectedOutputs,
workflowTriggerType,
input,
input: rawInput,
} = extractExecutionParams(request as NextRequest, parsedBody)
// Get authenticated user and determine trigger type
let processedInput = rawInput
logger.info(`[${requestId}] Raw input received:`, JSON.stringify(rawInput, null, 2))
try {
const deployedData = await loadDeployedWorkflowState(workflowId)
const blocks = deployedData.blocks || {}
logger.info(`[${requestId}] Loaded ${Object.keys(blocks).length} blocks from workflow`)
const apiTriggerBlock = Object.values(blocks).find(
(block: any) => block.type === 'api_trigger'
) as any
logger.info(`[${requestId}] API trigger block found:`, !!apiTriggerBlock)
if (apiTriggerBlock?.subBlocks?.inputFormat?.value) {
const inputFormat = apiTriggerBlock.subBlocks.inputFormat.value as Array<{
name: string
type: 'string' | 'number' | 'boolean' | 'object' | 'array' | 'files'
}>
logger.info(
`[${requestId}] Input format fields:`,
inputFormat.map((f) => `${f.name}:${f.type}`).join(', ')
)
const fileFields = inputFormat.filter((field) => field.type === 'files')
logger.info(`[${requestId}] Found ${fileFields.length} file-type fields`)
if (fileFields.length > 0 && typeof rawInput === 'object' && rawInput !== null) {
const executionContext = {
workspaceId: validation.workflow.workspaceId,
workflowId,
}
for (const fileField of fileFields) {
const fieldValue = rawInput[fileField.name]
if (fieldValue && typeof fieldValue === 'object') {
const uploadedFiles = await processApiWorkflowField(
fieldValue,
executionContext,
requestId
)
if (uploadedFiles.length > 0) {
processedInput = {
...processedInput,
[fileField.name]: uploadedFiles,
}
logger.info(
`[${requestId}] Successfully processed ${uploadedFiles.length} file(s) for field: ${fileField.name}`
)
}
}
}
}
}
} catch (error) {
logger.error(`[${requestId}] Failed to process file uploads:`, error)
const errorMessage = error instanceof Error ? error.message : 'Failed to process file uploads'
return createErrorResponse(errorMessage, 400)
}
const input = processedInput
let authenticatedUserId: string
let triggerType: TriggerType = 'manual'
// For internal calls (chat deployments), use the workflow owner's ID
if (finalIsSecureMode) {
authenticatedUserId = validation.workflow.userId
triggerType = 'manual' // Chat deployments use manual trigger type (no rate limit)
triggerType = 'manual'
} else {
const session = await getSession()
const apiKeyHeader = request.headers.get('X-API-Key')
@@ -649,7 +660,6 @@ export async function POST(
}
}
// Get user subscription (checks both personal and org subscriptions)
const userSubscription = await getHighestPrioritySubscription(authenticatedUserId)
if (isAsync) {
@@ -659,7 +669,7 @@ export async function POST(
authenticatedUserId,
userSubscription,
'api',
true // isAsync = true
true
)
if (!rateLimitCheck.allowed) {
@@ -683,7 +693,6 @@ export async function POST(
)
}
// Rate limit passed - always use Trigger.dev for async executions
const handle = await tasks.trigger('workflow-execution', {
workflowId,
userId: authenticatedUserId,
@@ -723,7 +732,7 @@ export async function POST(
authenticatedUserId,
userSubscription,
triggerType,
false // isAsync = false for sync calls
false
)
if (!rateLimitCheck.allowed) {
@@ -732,15 +741,12 @@ export async function POST(
)
}
// Handle streaming response - wrap execution in SSE stream
if (streamResponse) {
// Load workflow blocks to resolve output IDs from blockName.attribute to blockId_attribute format
const deployedData = await loadDeployedWorkflowState(workflowId)
const resolvedSelectedOutputs = selectedOutputs
? resolveOutputIds(selectedOutputs, deployedData.blocks || {})
: selectedOutputs
// Use shared streaming response creator
const { createStreamingResponse } = await import('@/lib/workflows/streaming')
const { SSE_HEADERS } = await import('@/lib/utils')
@@ -763,7 +769,6 @@ export async function POST(
})
}
// Non-streaming execution
const result = await executeWorkflow(
validation.workflow,
requestId,
@@ -772,13 +777,11 @@ export async function POST(
undefined
)
// Non-streaming response
const hasResponseBlock = workflowHasResponseBlock(result)
if (hasResponseBlock) {
return createHttpResponseFromBlock(result)
}
// Filter out logs and workflowConnections from the API response
const filteredResult = createFilteredResult(result)
return createSuccessResponse(filteredResult)
} catch (error: any) {
@@ -794,17 +797,14 @@ export async function POST(
} catch (error: any) {
logger.error(`[${requestId}] Error executing workflow: ${workflowId}`, error)
// Check if this is a rate limit error
if (error instanceof RateLimitError) {
return createErrorResponse(error.message, error.statusCode, 'RATE_LIMIT_EXCEEDED')
}
// Check if this is a usage limit error
if (error instanceof UsageLimitError) {
return createErrorResponse(error.message, error.statusCode, 'USAGE_LIMIT_EXCEEDED')
}
// Check if this is a rate limit error (string match for backward compatibility)
if (error.message?.includes('Rate limit exceeded')) {
return createErrorResponse(error.message, 429, 'RATE_LIMIT_EXCEEDED')
}

View File

@@ -16,6 +16,9 @@ describe('Workflow By ID API Route', () => {
error: vi.fn(),
}
const mockGetWorkflowById = vi.fn()
const mockGetWorkflowAccessContext = vi.fn()
beforeEach(() => {
vi.resetModules()
@@ -30,6 +33,20 @@ describe('Workflow By ID API Route', () => {
vi.doMock('@/lib/workflows/db-helpers', () => ({
loadWorkflowFromNormalizedTables: vi.fn().mockResolvedValue(null),
}))
mockGetWorkflowById.mockReset()
mockGetWorkflowAccessContext.mockReset()
vi.doMock('@/lib/workflows/utils', async () => {
const actual =
await vi.importActual<typeof import('@/lib/workflows/utils')>('@/lib/workflows/utils')
return {
...actual,
getWorkflowById: mockGetWorkflowById,
getWorkflowAccessContext: mockGetWorkflowAccessContext,
}
})
})
afterEach(() => {
@@ -60,17 +77,14 @@ describe('Workflow By ID API Route', () => {
}),
}))
vi.doMock('@sim/db', () => ({
db: {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
then: vi.fn().mockResolvedValue(undefined),
}),
}),
}),
},
}))
mockGetWorkflowById.mockResolvedValueOnce(null)
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: null,
workspaceOwnerId: null,
workspacePermission: null,
isOwner: false,
isWorkspaceOwner: false,
})
const req = new NextRequest('http://localhost:3000/api/workflows/nonexistent')
const params = Promise.resolve({ id: 'nonexistent' })
@@ -105,22 +119,28 @@ describe('Workflow By ID API Route', () => {
}),
}))
vi.doMock('@sim/db', () => ({
db: {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
then: vi.fn().mockResolvedValue(mockWorkflow),
}),
}),
}),
},
}))
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: null,
workspacePermission: null,
isOwner: true,
isWorkspaceOwner: false,
})
vi.doMock('@/lib/workflows/db-helpers', () => ({
loadWorkflowFromNormalizedTables: vi.fn().mockResolvedValue(mockNormalizedData),
}))
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: null,
workspacePermission: null,
isOwner: true,
isWorkspaceOwner: false,
})
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123')
const params = Promise.resolve({ id: 'workflow-123' })
@@ -154,22 +174,28 @@ describe('Workflow By ID API Route', () => {
}),
}))
vi.doMock('@sim/db', () => ({
db: {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
then: vi.fn().mockResolvedValue(mockWorkflow),
}),
}),
}),
},
}))
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: 'workspace-456',
workspacePermission: 'admin',
isOwner: false,
isWorkspaceOwner: false,
})
vi.doMock('@/lib/workflows/db-helpers', () => ({
loadWorkflowFromNormalizedTables: vi.fn().mockResolvedValue(mockNormalizedData),
}))
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: 'workspace-456',
workspacePermission: 'read',
isOwner: false,
isWorkspaceOwner: false,
})
vi.doMock('@/lib/permissions/utils', () => ({
getUserEntityPermissions: vi.fn().mockResolvedValue('read'),
hasAdminPermission: vi.fn().mockResolvedValue(false),
@@ -200,22 +226,14 @@ describe('Workflow By ID API Route', () => {
}),
}))
vi.doMock('@sim/db', () => ({
db: {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
then: vi.fn().mockResolvedValue(mockWorkflow),
}),
}),
}),
},
}))
vi.doMock('@/lib/permissions/utils', () => ({
getUserEntityPermissions: vi.fn().mockResolvedValue(null),
hasAdminPermission: vi.fn().mockResolvedValue(false),
}))
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: 'workspace-456',
workspacePermission: null,
isOwner: false,
isWorkspaceOwner: false,
})
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123')
const params = Promise.resolve({ id: 'workflow-123' })
@@ -250,17 +268,14 @@ describe('Workflow By ID API Route', () => {
}),
}))
vi.doMock('@sim/db', () => ({
db: {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
then: vi.fn().mockResolvedValue(mockWorkflow),
}),
}),
}),
},
}))
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: null,
workspacePermission: null,
isOwner: true,
isWorkspaceOwner: false,
})
vi.doMock('@/lib/workflows/db-helpers', () => ({
loadWorkflowFromNormalizedTables: vi.fn().mockResolvedValue(mockNormalizedData),
@@ -294,19 +309,22 @@ describe('Workflow By ID API Route', () => {
}),
}))
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: null,
workspacePermission: null,
isOwner: true,
isWorkspaceOwner: false,
})
vi.doMock('@sim/db', () => ({
db: {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
then: vi.fn().mockResolvedValue(mockWorkflow),
}),
}),
}),
delete: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue(undefined),
where: vi.fn().mockResolvedValue([{ id: 'workflow-123' }]),
}),
},
workflow: {},
}))
global.fetch = vi.fn().mockResolvedValue({
@@ -340,24 +358,22 @@ describe('Workflow By ID API Route', () => {
}),
}))
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: 'workspace-456',
workspacePermission: 'admin',
isOwner: false,
isWorkspaceOwner: false,
})
vi.doMock('@sim/db', () => ({
db: {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
then: vi.fn().mockResolvedValue(mockWorkflow),
}),
}),
}),
delete: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue(undefined),
where: vi.fn().mockResolvedValue([{ id: 'workflow-123' }]),
}),
},
}))
vi.doMock('@/lib/permissions/utils', () => ({
getUserEntityPermissions: vi.fn().mockResolvedValue('admin'),
hasAdminPermission: vi.fn().mockResolvedValue(true),
workflow: {},
}))
global.fetch = vi.fn().mockResolvedValue({
@@ -391,22 +407,14 @@ describe('Workflow By ID API Route', () => {
}),
}))
vi.doMock('@sim/db', () => ({
db: {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
then: vi.fn().mockResolvedValue(mockWorkflow),
}),
}),
}),
},
}))
vi.doMock('@/lib/permissions/utils', () => ({
getUserEntityPermissions: vi.fn().mockResolvedValue('read'),
hasAdminPermission: vi.fn().mockResolvedValue(false),
}))
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: 'workspace-456',
workspacePermission: null,
isOwner: false,
isWorkspaceOwner: false,
})
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123', {
method: 'DELETE',
@@ -432,6 +440,7 @@ describe('Workflow By ID API Route', () => {
}
const updateData = { name: 'Updated Workflow' }
const updatedWorkflow = { ...mockWorkflow, ...updateData, updatedAt: new Date() }
vi.doMock('@/lib/auth', () => ({
getSession: vi.fn().mockResolvedValue({
@@ -439,23 +448,26 @@ describe('Workflow By ID API Route', () => {
}),
}))
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: null,
workspacePermission: null,
isOwner: true,
isWorkspaceOwner: false,
})
vi.doMock('@sim/db', () => ({
db: {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
then: vi.fn().mockResolvedValue(mockWorkflow),
}),
}),
}),
update: vi.fn().mockReturnValue({
set: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
returning: vi.fn().mockResolvedValue([{ ...mockWorkflow, ...updateData }]),
returning: vi.fn().mockResolvedValue([updatedWorkflow]),
}),
}),
}),
},
workflow: {},
}))
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123', {
@@ -481,6 +493,7 @@ describe('Workflow By ID API Route', () => {
}
const updateData = { name: 'Updated Workflow' }
const updatedWorkflow = { ...mockWorkflow, ...updateData, updatedAt: new Date() }
vi.doMock('@/lib/auth', () => ({
getSession: vi.fn().mockResolvedValue({
@@ -488,28 +501,26 @@ describe('Workflow By ID API Route', () => {
}),
}))
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: 'workspace-456',
workspacePermission: 'write',
isOwner: false,
isWorkspaceOwner: false,
})
vi.doMock('@sim/db', () => ({
db: {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
then: vi.fn().mockResolvedValue(mockWorkflow),
}),
}),
}),
update: vi.fn().mockReturnValue({
set: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
returning: vi.fn().mockResolvedValue([{ ...mockWorkflow, ...updateData }]),
returning: vi.fn().mockResolvedValue([updatedWorkflow]),
}),
}),
}),
},
}))
vi.doMock('@/lib/permissions/utils', () => ({
getUserEntityPermissions: vi.fn().mockResolvedValue('write'),
hasAdminPermission: vi.fn().mockResolvedValue(false),
workflow: {},
}))
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123', {
@@ -542,22 +553,14 @@ describe('Workflow By ID API Route', () => {
}),
}))
vi.doMock('@sim/db', () => ({
db: {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
then: vi.fn().mockResolvedValue(mockWorkflow),
}),
}),
}),
},
}))
vi.doMock('@/lib/permissions/utils', () => ({
getUserEntityPermissions: vi.fn().mockResolvedValue('read'),
hasAdminPermission: vi.fn().mockResolvedValue(false),
}))
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: 'workspace-456',
workspacePermission: 'read',
isOwner: false,
isWorkspaceOwner: false,
})
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123', {
method: 'PUT',
@@ -587,17 +590,14 @@ describe('Workflow By ID API Route', () => {
}),
}))
vi.doMock('@sim/db', () => ({
db: {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
then: vi.fn().mockResolvedValue(mockWorkflow),
}),
}),
}),
},
}))
mockGetWorkflowById.mockResolvedValueOnce(mockWorkflow)
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: null,
workspacePermission: null,
isOwner: true,
isWorkspaceOwner: false,
})
// Invalid data - empty name
const invalidData = { name: '' }
@@ -625,17 +625,7 @@ describe('Workflow By ID API Route', () => {
}),
}))
vi.doMock('@sim/db', () => ({
db: {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
then: vi.fn().mockRejectedValue(new Error('Database connection timeout')),
}),
}),
}),
},
}))
mockGetWorkflowById.mockRejectedValueOnce(new Error('Database connection timeout'))
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123')
const params = Promise.resolve({ id: 'workflow-123' })

View File

@@ -8,9 +8,9 @@ import { getSession } from '@/lib/auth'
import { verifyInternalToken } from '@/lib/auth/internal'
import { env } from '@/lib/env'
import { createLogger } from '@/lib/logs/console/logger'
import { getUserEntityPermissions, hasAdminPermission } from '@/lib/permissions/utils'
import { generateRequestId } from '@/lib/utils'
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/db-helpers'
import { getWorkflowAccessContext, getWorkflowById } from '@/lib/workflows/utils'
const logger = createLogger('WorkflowByIdAPI')
@@ -74,12 +74,8 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
userId = authenticatedUserId
}
// Fetch the workflow
const workflowData = await db
.select()
.from(workflow)
.where(eq(workflow.id, workflowId))
.then((rows) => rows[0])
let accessContext = null
let workflowData = await getWorkflowById(workflowId)
if (!workflowData) {
logger.warn(`[${requestId}] Workflow ${workflowId} not found`)
@@ -94,18 +90,21 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
hasAccess = true
} else {
// Case 1: User owns the workflow
if (workflowData.userId === userId) {
hasAccess = true
}
if (workflowData) {
accessContext = await getWorkflowAccessContext(workflowId, userId ?? undefined)
// Case 2: Workflow belongs to a workspace the user has permissions for
if (!hasAccess && workflowData.workspaceId && userId) {
const userPermission = await getUserEntityPermissions(
userId,
'workspace',
workflowData.workspaceId
)
if (userPermission !== null) {
if (!accessContext) {
logger.warn(`[${requestId}] Workflow ${workflowId} not found`)
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
}
workflowData = accessContext.workflow
if (accessContext.isOwner) {
hasAccess = true
}
if (!hasAccess && workflowData.workspaceId && accessContext.workspacePermission) {
hasAccess = true
}
}
@@ -179,11 +178,8 @@ export async function DELETE(
const userId = session.user.id
const workflowData = await db
.select()
.from(workflow)
.where(eq(workflow.id, workflowId))
.then((rows) => rows[0])
const accessContext = await getWorkflowAccessContext(workflowId, userId)
const workflowData = accessContext?.workflow || (await getWorkflowById(workflowId))
if (!workflowData) {
logger.warn(`[${requestId}] Workflow ${workflowId} not found for deletion`)
@@ -200,8 +196,8 @@ export async function DELETE(
// Case 2: Workflow belongs to a workspace and user has admin permission
if (!canDelete && workflowData.workspaceId) {
const hasAdmin = await hasAdminPermission(userId, workflowData.workspaceId)
if (hasAdmin) {
const context = accessContext || (await getWorkflowAccessContext(workflowId, userId))
if (context?.workspacePermission === 'admin') {
canDelete = true
}
}
@@ -320,11 +316,8 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
const updates = UpdateWorkflowSchema.parse(body)
// Fetch the workflow to check ownership/access
const workflowData = await db
.select()
.from(workflow)
.where(eq(workflow.id, workflowId))
.then((rows) => rows[0])
const accessContext = await getWorkflowAccessContext(workflowId, userId)
const workflowData = accessContext?.workflow || (await getWorkflowById(workflowId))
if (!workflowData) {
logger.warn(`[${requestId}] Workflow ${workflowId} not found for update`)
@@ -341,12 +334,8 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
// Case 2: Workflow belongs to a workspace and user has write or admin permission
if (!canUpdate && workflowData.workspaceId) {
const userPermission = await getUserEntityPermissions(
userId,
'workspace',
workflowData.workspaceId
)
if (userPermission === 'write' || userPermission === 'admin') {
const context = accessContext || (await getWorkflowAccessContext(workflowId, userId))
if (context?.workspacePermission === 'write' || context?.workspacePermission === 'admin') {
canUpdate = true
}
}

View File

@@ -5,10 +5,10 @@ import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { createLogger } from '@/lib/logs/console/logger'
import { getUserEntityPermissions } from '@/lib/permissions/utils'
import { generateRequestId } from '@/lib/utils'
import { extractAndPersistCustomTools } from '@/lib/workflows/custom-tools-persistence'
import { saveWorkflowToNormalizedTables } from '@/lib/workflows/db-helpers'
import { getWorkflowAccessContext } from '@/lib/workflows/utils'
import { sanitizeAgentToolsInBlocks } from '@/lib/workflows/validation'
const logger = createLogger('WorkflowStateAPI')
@@ -124,11 +124,8 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
const state = WorkflowStateSchema.parse(body)
// Fetch the workflow to check ownership/access
const workflowData = await db
.select()
.from(workflow)
.where(eq(workflow.id, workflowId))
.then((rows) => rows[0])
const accessContext = await getWorkflowAccessContext(workflowId, userId)
const workflowData = accessContext?.workflow
if (!workflowData) {
logger.warn(`[${requestId}] Workflow ${workflowId} not found for state update`)
@@ -136,24 +133,12 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
}
// Check if user has permission to update this workflow
let canUpdate = false
// Case 1: User owns the workflow
if (workflowData.userId === userId) {
canUpdate = true
}
// Case 2: Workflow belongs to a workspace and user has write or admin permission
if (!canUpdate && workflowData.workspaceId) {
const userPermission = await getUserEntityPermissions(
userId,
'workspace',
workflowData.workspaceId
)
if (userPermission === 'write' || userPermission === 'admin') {
canUpdate = true
}
}
const canUpdate =
accessContext?.isOwner ||
(workflowData.workspaceId
? accessContext?.workspacePermission === 'write' ||
accessContext?.workspacePermission === 'admin'
: false)
if (!canUpdate) {
logger.warn(

View File

@@ -18,12 +18,18 @@ import {
describe('Workflow Variables API Route', () => {
let authMocks: ReturnType<typeof mockAuth>
let databaseMocks: ReturnType<typeof createMockDatabase>
const mockGetWorkflowAccessContext = vi.fn()
beforeEach(() => {
vi.resetModules()
setupCommonApiMocks()
mockCryptoUuid('mock-request-id-12345678')
authMocks = mockAuth(mockUser)
mockGetWorkflowAccessContext.mockReset()
vi.doMock('@/lib/workflows/utils', () => ({
getWorkflowAccessContext: mockGetWorkflowAccessContext,
}))
})
afterEach(() => {
@@ -47,9 +53,7 @@ describe('Workflow Variables API Route', () => {
it('should return 404 when workflow does not exist', async () => {
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
databaseMocks = createMockDatabase({
select: { results: [[]] }, // No workflow found
})
mockGetWorkflowAccessContext.mockResolvedValueOnce(null)
const req = new NextRequest('http://localhost:3000/api/workflows/nonexistent/variables')
const params = Promise.resolve({ id: 'nonexistent' })
@@ -73,8 +77,12 @@ describe('Workflow Variables API Route', () => {
}
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
databaseMocks = createMockDatabase({
select: { results: [[mockWorkflow]] },
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: null,
workspacePermission: null,
isOwner: true,
isWorkspaceOwner: false,
})
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123/variables')
@@ -99,14 +107,14 @@ describe('Workflow Variables API Route', () => {
}
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
databaseMocks = createMockDatabase({
select: { results: [[mockWorkflow]] },
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: 'workspace-owner',
workspacePermission: 'read',
isOwner: false,
isWorkspaceOwner: false,
})
vi.doMock('@/lib/permissions/utils', () => ({
getUserEntityPermissions: vi.fn().mockResolvedValue('read'),
}))
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123/variables')
const params = Promise.resolve({ id: 'workflow-123' })
@@ -116,14 +124,6 @@ describe('Workflow Variables API Route', () => {
expect(response.status).toBe(200)
const data = await response.json()
expect(data.data).toEqual(mockWorkflow.variables)
// Verify permissions check was called
const { getUserEntityPermissions } = await import('@/lib/permissions/utils')
expect(getUserEntityPermissions).toHaveBeenCalledWith(
'user-123',
'workspace',
'workspace-456'
)
})
it('should deny access when user has no workspace permissions', async () => {
@@ -135,14 +135,14 @@ describe('Workflow Variables API Route', () => {
}
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
databaseMocks = createMockDatabase({
select: { results: [[mockWorkflow]] },
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: 'workspace-owner',
workspacePermission: null,
isOwner: false,
isWorkspaceOwner: false,
})
vi.doMock('@/lib/permissions/utils', () => ({
getUserEntityPermissions: vi.fn().mockResolvedValue(null),
}))
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123/variables')
const params = Promise.resolve({ id: 'workflow-123' })
@@ -165,8 +165,12 @@ describe('Workflow Variables API Route', () => {
}
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
databaseMocks = createMockDatabase({
select: { results: [[mockWorkflow]] },
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: null,
workspacePermission: null,
isOwner: true,
isWorkspaceOwner: false,
})
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123/variables')
@@ -191,8 +195,15 @@ describe('Workflow Variables API Route', () => {
}
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: null,
workspacePermission: null,
isOwner: true,
isWorkspaceOwner: false,
})
databaseMocks = createMockDatabase({
select: { results: [[mockWorkflow]] },
update: { results: [{}] },
})
@@ -223,14 +234,14 @@ describe('Workflow Variables API Route', () => {
}
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
databaseMocks = createMockDatabase({
select: { results: [[mockWorkflow]] },
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: 'workspace-owner',
workspacePermission: null,
isOwner: false,
isWorkspaceOwner: false,
})
vi.doMock('@/lib/permissions/utils', () => ({
getUserEntityPermissions: vi.fn().mockResolvedValue(null),
}))
const variables = [
{ id: 'var-1', workflowId: 'workflow-123', name: 'test', type: 'string', value: 'hello' },
]
@@ -258,8 +269,12 @@ describe('Workflow Variables API Route', () => {
}
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
databaseMocks = createMockDatabase({
select: { results: [[mockWorkflow]] },
mockGetWorkflowAccessContext.mockResolvedValueOnce({
workflow: mockWorkflow,
workspaceOwnerId: null,
workspacePermission: null,
isOwner: true,
isWorkspaceOwner: false,
})
// Invalid data - missing required fields
@@ -283,9 +298,7 @@ describe('Workflow Variables API Route', () => {
describe('Error handling', () => {
it.concurrent('should handle database errors gracefully', async () => {
authMocks.setAuthenticated({ id: 'user-123', email: 'test@example.com' })
databaseMocks = createMockDatabase({
select: { throwError: true, errorMessage: 'Database connection failed' },
})
mockGetWorkflowAccessContext.mockRejectedValueOnce(new Error('Database connection failed'))
const req = new NextRequest('http://localhost:3000/api/workflows/workflow-123/variables')
const params = Promise.resolve({ id: 'workflow-123' })

View File

@@ -5,8 +5,8 @@ import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { createLogger } from '@/lib/logs/console/logger'
import { getUserEntityPermissions } from '@/lib/permissions/utils'
import { generateRequestId } from '@/lib/utils'
import { getWorkflowAccessContext } from '@/lib/workflows/utils'
import type { Variable } from '@/stores/panel/variables/types'
const logger = createLogger('WorkflowVariablesAPI')
@@ -35,32 +35,18 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
}
// Get the workflow record
const workflowRecord = await db
.select()
.from(workflow)
.where(eq(workflow.id, workflowId))
.limit(1)
const accessContext = await getWorkflowAccessContext(workflowId, session.user.id)
const workflowData = accessContext?.workflow
if (!workflowRecord.length) {
if (!workflowData) {
logger.warn(`[${requestId}] Workflow not found: ${workflowId}`)
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
}
const workflowData = workflowRecord[0]
const workspaceId = workflowData.workspaceId
// Check authorization - either the user owns the workflow or has workspace permissions
let isAuthorized = workflowData.userId === session.user.id
// If not authorized by ownership and the workflow belongs to a workspace, check workspace permissions
if (!isAuthorized && workspaceId) {
const userPermission = await getUserEntityPermissions(
session.user.id,
'workspace',
workspaceId
)
isAuthorized = userPermission !== null
}
const isAuthorized =
accessContext?.isOwner || (workspaceId ? accessContext?.workspacePermission !== null : false)
if (!isAuthorized) {
logger.warn(
@@ -125,32 +111,18 @@ export async function GET(req: NextRequest, { params }: { params: Promise<{ id:
}
// Get the workflow record
const workflowRecord = await db
.select()
.from(workflow)
.where(eq(workflow.id, workflowId))
.limit(1)
const accessContext = await getWorkflowAccessContext(workflowId, session.user.id)
const workflowData = accessContext?.workflow
if (!workflowRecord.length) {
if (!workflowData) {
logger.warn(`[${requestId}] Workflow not found: ${workflowId}`)
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
}
const workflowData = workflowRecord[0]
const workspaceId = workflowData.workspaceId
// Check authorization - either the user owns the workflow or has workspace permissions
let isAuthorized = workflowData.userId === session.user.id
// If not authorized by ownership and the workflow belongs to a workspace, check workspace permissions
if (!isAuthorized && workspaceId) {
const userPermission = await getUserEntityPermissions(
session.user.id,
'workspace',
workspaceId
)
isAuthorized = userPermission !== null
}
const isAuthorized =
accessContext?.isOwner || (workspaceId ? accessContext?.workspacePermission !== null : false)
if (!isAuthorized) {
logger.warn(

View File

@@ -1,9 +1,14 @@
import { NextResponse } from 'next/server'
import { v4 as uuidv4 } from 'uuid'
import { createLogger } from '@/lib/logs/console/logger'
import { getUserEntityPermissions } from '@/lib/permissions/utils'
import { uploadExecutionFile } from '@/lib/workflows/execution-file-storage'
import type { UserFile } from '@/executor/types'
const logger = createLogger('WorkflowUtils')
const MAX_FILE_SIZE = 20 * 1024 * 1024 // 20MB
export function createErrorResponse(error: string, status: number, code?: string) {
return NextResponse.json(
{
@@ -37,3 +42,99 @@ export async function verifyWorkspaceMembership(
return null
}
}
/**
* Process API workflow files - handles both base64 ('file' type) and URL pass-through ('url' type)
*/
export async function processApiWorkflowFiles(
file: { type: string; data: string; name: string; mime?: string },
executionContext: { workspaceId: string; workflowId: string; executionId: string },
requestId: string
): Promise<UserFile | null> {
if (file.type === 'file' && file.data && file.name) {
const dataUrlPrefix = 'data:'
const base64Prefix = ';base64,'
if (!file.data.startsWith(dataUrlPrefix)) {
logger.warn(`[${requestId}] Invalid data format for file: ${file.name}`)
return null
}
const base64Index = file.data.indexOf(base64Prefix)
if (base64Index === -1) {
logger.warn(`[${requestId}] Invalid data format (no base64 marker) for file: ${file.name}`)
return null
}
const mimeType = file.data.substring(dataUrlPrefix.length, base64Index)
const base64Data = file.data.substring(base64Index + base64Prefix.length)
const buffer = Buffer.from(base64Data, 'base64')
if (buffer.length > MAX_FILE_SIZE) {
const fileSizeMB = (buffer.length / (1024 * 1024)).toFixed(2)
throw new Error(
`File "${file.name}" exceeds the maximum size limit of 20MB (actual size: ${fileSizeMB}MB)`
)
}
logger.debug(`[${requestId}] Uploading file: ${file.name} (${buffer.length} bytes)`)
const userFile = await uploadExecutionFile(
executionContext,
buffer,
file.name,
mimeType || file.mime || 'application/octet-stream'
)
logger.debug(`[${requestId}] Successfully uploaded ${file.name}`)
return userFile
}
if (file.type === 'url' && file.data) {
return {
id: uuidv4(),
url: file.data,
name: file.name,
size: 0,
type: file.mime || 'application/octet-stream',
key: `url/${file.name}`,
uploadedAt: new Date().toISOString(),
expiresAt: new Date(Date.now() + 7 * 24 * 60 * 60 * 1000).toISOString(),
}
}
return null
}
/**
* Process all files for a given field in the API workflow input
*/
export async function processApiWorkflowField(
fieldValue: any,
executionContext: { workspaceId: string; workflowId: string },
requestId: string
): Promise<UserFile[]> {
if (!fieldValue || typeof fieldValue !== 'object') {
return []
}
const files = Array.isArray(fieldValue) ? fieldValue : [fieldValue]
const uploadedFiles: UserFile[] = []
const executionId = uuidv4()
const fullContext = { ...executionContext, executionId }
for (const file of files) {
try {
const userFile = await processApiWorkflowFiles(file, fullContext, requestId)
if (userFile) {
uploadedFiles.push(userFile)
}
} catch (error) {
logger.error(`[${requestId}] Failed to process file ${file.name}:`, error)
throw new Error(`Failed to upload file: ${file.name}`)
}
}
return uploadedFiles
}

View File

@@ -45,6 +45,18 @@ const DEFAULT_VOICE_SETTINGS = {
voiceId: 'EXAVITQu4vr4xnSDxMaL', // Default ElevenLabs voice (Bella)
}
/**
* Converts a File object to a base64 data URL
*/
function fileToBase64(file: File): Promise<string> {
return new Promise((resolve, reject) => {
const reader = new FileReader()
reader.onload = () => resolve(reader.result as string)
reader.onerror = reject
reader.readAsDataURL(file)
})
}
/**
* Creates an audio stream handler for text-to-speech conversion
* @param streamTextToAudio - Function to stream text to audio
@@ -265,20 +277,43 @@ export default function ChatClient({ identifier }: { identifier: string }) {
}
// Handle sending a message
const handleSendMessage = async (messageParam?: string, isVoiceInput = false) => {
const handleSendMessage = async (
messageParam?: string,
isVoiceInput = false,
files?: Array<{
id: string
name: string
size: number
type: string
file: File
dataUrl?: string
}>
) => {
const messageToSend = messageParam ?? inputValue
if (!messageToSend.trim() || isLoading) return
if ((!messageToSend.trim() && (!files || files.length === 0)) || isLoading) return
logger.info('Sending message:', { messageToSend, isVoiceInput, conversationId })
logger.info('Sending message:', {
messageToSend,
isVoiceInput,
conversationId,
filesCount: files?.length,
})
// Reset userHasScrolled when sending a new message
setUserHasScrolled(false)
const userMessage: ChatMessage = {
id: crypto.randomUUID(),
content: messageToSend,
content: messageToSend || (files && files.length > 0 ? `Sent ${files.length} file(s)` : ''),
type: 'user',
timestamp: new Date(),
attachments: files?.map((file) => ({
id: file.id,
name: file.name,
type: file.type,
size: file.size,
dataUrl: file.dataUrl || '',
})),
}
// Add the user's message to the chat
@@ -299,7 +334,7 @@ export default function ChatClient({ identifier }: { identifier: string }) {
try {
// Send structured payload to maintain chat context
const payload = {
const payload: any = {
input:
typeof userMessage.content === 'string'
? userMessage.content
@@ -307,7 +342,22 @@ export default function ChatClient({ identifier }: { identifier: string }) {
conversationId,
}
logger.info('API payload:', payload)
// Add files if present (convert to base64 for JSON transmission)
if (files && files.length > 0) {
payload.files = await Promise.all(
files.map(async (file) => ({
name: file.name,
size: file.size,
type: file.type,
dataUrl: file.dataUrl || (await fileToBase64(file.file)),
}))
)
}
logger.info('API payload:', {
...payload,
files: payload.files ? `${payload.files.length} files` : undefined,
})
const response = await fetch(`/api/chat/${identifier}`, {
method: 'POST',
@@ -499,8 +549,8 @@ export default function ChatClient({ identifier }: { identifier: string }) {
<div className='relative p-3 pb-4 md:p-4 md:pb-6'>
<div className='relative mx-auto max-w-3xl md:max-w-[748px]'>
<ChatInput
onSubmit={(value, isVoiceInput) => {
void handleSendMessage(value, isVoiceInput)
onSubmit={(value, isVoiceInput, files) => {
void handleSendMessage(value, isVoiceInput, files)
}}
isStreaming={isStreamingResponse}
onStopStreaming={() => stopStreaming(setMessages)}

View File

@@ -3,7 +3,7 @@
import type React from 'react'
import { useEffect, useRef, useState } from 'react'
import { motion } from 'framer-motion'
import { Send, Square } from 'lucide-react'
import { AlertCircle, Paperclip, Send, Square, X } from 'lucide-react'
import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from '@/components/ui/tooltip'
import { VoiceInput } from '@/app/chat/components/input/voice-input'
@@ -12,8 +12,17 @@ const PLACEHOLDER_DESKTOP = 'Enter a message or click the mic to speak'
const MAX_TEXTAREA_HEIGHT = 120 // Max height in pixels (e.g., for about 3-4 lines)
const MAX_TEXTAREA_HEIGHT_MOBILE = 100 // Smaller for mobile
interface AttachedFile {
id: string
name: string
size: number
type: string
file: File
dataUrl?: string
}
export const ChatInput: React.FC<{
onSubmit?: (value: string, isVoiceInput?: boolean) => void
onSubmit?: (value: string, isVoiceInput?: boolean, files?: AttachedFile[]) => void
isStreaming?: boolean
onStopStreaming?: () => void
onVoiceStart?: () => void
@@ -21,8 +30,11 @@ export const ChatInput: React.FC<{
}> = ({ onSubmit, isStreaming = false, onStopStreaming, onVoiceStart, voiceOnly = false }) => {
const wrapperRef = useRef<HTMLDivElement>(null)
const textareaRef = useRef<HTMLTextAreaElement>(null) // Ref for the textarea
const fileInputRef = useRef<HTMLInputElement>(null)
const [isActive, setIsActive] = useState(false)
const [inputValue, setInputValue] = useState('')
const [attachedFiles, setAttachedFiles] = useState<AttachedFile[]>([])
const [uploadErrors, setUploadErrors] = useState<string[]>([])
// Check if speech-to-text is available in the browser
const isSttAvailable =
@@ -85,10 +97,75 @@ export const ChatInput: React.FC<{
// Focus is now handled by the useEffect above
}
// Handle file selection
const handleFileSelect = async (selectedFiles: FileList | null) => {
if (!selectedFiles) return
const newFiles: AttachedFile[] = []
const maxSize = 10 * 1024 * 1024 // 10MB limit
const maxFiles = 5
for (let i = 0; i < selectedFiles.length; i++) {
if (attachedFiles.length + newFiles.length >= maxFiles) break
const file = selectedFiles[i]
// Check file size
if (file.size > maxSize) {
setUploadErrors((prev) => [...prev, `${file.name} is too large (max 10MB)`])
continue
}
// Check for duplicates
const isDuplicate = attachedFiles.some(
(existingFile) => existingFile.name === file.name && existingFile.size === file.size
)
if (isDuplicate) {
setUploadErrors((prev) => [...prev, `${file.name} already added`])
continue
}
// Read file as data URL if it's an image
let dataUrl: string | undefined
if (file.type.startsWith('image/')) {
try {
dataUrl = await new Promise<string>((resolve, reject) => {
const reader = new FileReader()
reader.onload = () => resolve(reader.result as string)
reader.onerror = reject
reader.readAsDataURL(file)
})
} catch (error) {
console.error('Error reading file:', error)
}
}
newFiles.push({
id: crypto.randomUUID(),
name: file.name,
size: file.size,
type: file.type,
file,
dataUrl,
})
}
if (newFiles.length > 0) {
setAttachedFiles([...attachedFiles, ...newFiles])
setUploadErrors([]) // Clear errors when files are successfully added
}
}
const handleRemoveFile = (fileId: string) => {
setAttachedFiles(attachedFiles.filter((f) => f.id !== fileId))
}
const handleSubmit = () => {
if (!inputValue.trim()) return
onSubmit?.(inputValue.trim(), false) // false = not voice input
if (!inputValue.trim() && attachedFiles.length === 0) return
onSubmit?.(inputValue.trim(), false, attachedFiles) // false = not voice input
setInputValue('')
setAttachedFiles([])
setUploadErrors([]) // Clear errors when sending message
if (textareaRef.current) {
textareaRef.current.style.height = 'auto' // Reset height after submit
textareaRef.current.style.overflowY = 'hidden' // Ensure overflow is hidden
@@ -132,6 +209,29 @@ export const ChatInput: React.FC<{
<>
<div className='fixed right-0 bottom-0 left-0 flex w-full items-center justify-center bg-gradient-to-t from-white to-transparent px-4 pb-4 text-black md:px-0 md:pb-4'>
<div ref={wrapperRef} className='w-full max-w-3xl md:max-w-[748px]'>
{/* Error Messages */}
{uploadErrors.length > 0 && (
<div className='mb-3'>
<div className='rounded-lg border border-red-200 bg-red-50 p-3 dark:border-red-800/50 dark:bg-red-950/20'>
<div className='flex items-start gap-2'>
<AlertCircle className='mt-0.5 h-4 w-4 shrink-0 text-red-600 dark:text-red-400' />
<div className='flex-1'>
<div className='mb-1 font-medium text-red-800 text-sm dark:text-red-300'>
File upload error
</div>
<div className='space-y-1'>
{uploadErrors.map((error, idx) => (
<div key={idx} className='text-red-700 text-sm dark:text-red-400'>
{error}
</div>
))}
</div>
</div>
</div>
</div>
</div>
)}
{/* Text Input Area with Controls */}
<motion.div
className='rounded-2xl border border-gray-200 bg-white shadow-sm md:rounded-3xl'
@@ -140,27 +240,99 @@ export const ChatInput: React.FC<{
animate={{ opacity: 1, y: 0 }}
transition={{ duration: 0.2 }}
>
<div className='flex items-center gap-2 p-3 md:p-4'>
{/* Voice Input */}
{isSttAvailable && (
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<div>
<VoiceInput
onVoiceStart={handleVoiceStart}
disabled={isStreaming}
minimal
{/* File Previews */}
{attachedFiles.length > 0 && (
<div className='mb-2 flex flex-wrap gap-2 px-3 pt-3 md:px-4'>
{attachedFiles.map((file) => {
const formatFileSize = (bytes: number) => {
if (bytes === 0) return '0 B'
const k = 1024
const sizes = ['B', 'KB', 'MB', 'GB']
const i = Math.floor(Math.log(bytes) / Math.log(k))
return `${Math.round((bytes / k ** i) * 10) / 10} ${sizes[i]}`
}
return (
<div
key={file.id}
className={`group relative overflow-hidden rounded-lg border border-gray-200 bg-white dark:border-gray-700 dark:bg-gray-800 ${
file.dataUrl
? 'h-16 w-16 md:h-20 md:w-20'
: 'flex h-16 min-w-[120px] max-w-[200px] items-center gap-2 px-2 md:h-20 md:min-w-[140px] md:max-w-[220px] md:px-3'
}`}
title=''
>
{file.dataUrl ? (
<img
src={file.dataUrl}
alt={file.name}
className='h-full w-full object-cover'
/>
</div>
</TooltipTrigger>
<TooltipContent side='top'>
<p>Start voice conversation</p>
<span className='text-gray-500 text-xs'>Click to enter voice mode</span>
</TooltipContent>
</Tooltip>
</TooltipProvider>
)}
) : (
<>
<div className='flex h-8 w-8 flex-shrink-0 items-center justify-center rounded bg-gray-100 md:h-10 md:w-10 dark:bg-gray-700'>
<Paperclip
size={16}
className='text-gray-500 md:h-5 md:w-5 dark:text-gray-400'
/>
</div>
<div className='min-w-0 flex-1'>
<div className='truncate font-medium text-gray-800 text-xs dark:text-gray-200'>
{file.name}
</div>
<div className='text-[10px] text-gray-500 dark:text-gray-400'>
{formatFileSize(file.size)}
</div>
</div>
</>
)}
<button
type='button'
onClick={() => handleRemoveFile(file.id)}
className='absolute top-1 right-1 rounded-full bg-gray-800/80 p-1 text-white opacity-0 transition-opacity hover:bg-gray-800/80 hover:text-white group-hover:opacity-100 dark:bg-black/70 dark:hover:bg-black/70 dark:hover:text-white'
>
<X size={12} />
</button>
</div>
)
})}
</div>
)}
<div className='flex items-center gap-2 p-3 md:p-4'>
{/* Paperclip Button */}
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<button
type='button'
onClick={() => fileInputRef.current?.click()}
disabled={isStreaming || attachedFiles.length >= 5}
className='flex items-center justify-center rounded-full p-1.5 text-gray-600 transition-colors hover:bg-gray-100 disabled:cursor-not-allowed disabled:opacity-50 md:p-2'
>
<Paperclip size={16} className='md:h-5 md:w-5' />
</button>
</TooltipTrigger>
<TooltipContent side='top'>
<p>Attach files</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
{/* Hidden file input */}
<input
ref={fileInputRef}
type='file'
multiple
onChange={(e) => {
handleFileSelect(e.target.files)
if (fileInputRef.current) {
fileInputRef.current.value = ''
}
}}
className='hidden'
disabled={isStreaming}
/>
{/* Text Input Container */}
<div className='relative flex-1'>
@@ -208,10 +380,30 @@ export const ChatInput: React.FC<{
</div>
</div>
{/* Voice Input */}
{isSttAvailable && (
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<div>
<VoiceInput
onVoiceStart={handleVoiceStart}
disabled={isStreaming}
minimal
/>
</div>
</TooltipTrigger>
<TooltipContent side='top'>
<p>Start voice conversation</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
)}
{/* Send Button */}
<button
className={`flex items-center justify-center rounded-full p-1.5 text-white transition-colors md:p-2 ${
inputValue.trim()
inputValue.trim() || attachedFiles.length > 0
? 'bg-black hover:bg-zinc-700'
: 'cursor-default bg-gray-300 hover:bg-gray-400'
}`}

View File

@@ -72,19 +72,17 @@ export function VoiceInput({
if (minimal) {
return (
<motion.button
<button
type='button'
onClick={handleVoiceClick}
disabled={disabled}
className={`flex items-center justify-center p-1 transition-colors duration-200 ${
disabled ? 'cursor-not-allowed opacity-50' : 'cursor-pointer hover:text-gray-600'
className={`flex items-center justify-center rounded-full p-1.5 text-gray-600 transition-colors duration-200 hover:bg-gray-100 md:p-2 ${
disabled ? 'cursor-not-allowed opacity-50' : 'cursor-pointer'
}`}
whileHover={{ scale: 1.05 }}
whileTap={{ scale: 0.95 }}
title='Start voice conversation'
>
<Mic size={18} className='text-gray-500' />
</motion.button>
<Mic size={16} className='md:h-5 md:w-5' />
</button>
)
}

View File

@@ -1,10 +1,18 @@
'use client'
import { memo, useMemo, useState } from 'react'
import { Check, Copy } from 'lucide-react'
import { Check, Copy, File as FileIcon, FileText, Image as ImageIcon } from 'lucide-react'
import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from '@/components/ui/tooltip'
import MarkdownRenderer from './components/markdown-renderer'
export interface ChatAttachment {
id: string
name: string
type: string
dataUrl: string
size?: number
}
export interface ChatMessage {
id: string
content: string | Record<string, unknown>
@@ -12,6 +20,7 @@ export interface ChatMessage {
timestamp: Date
isInitialMessage?: boolean
isStreaming?: boolean
attachments?: ChatAttachment[]
}
function EnhancedMarkdownRenderer({ content }: { content: string }) {
@@ -39,15 +48,96 @@ export const ClientChatMessage = memo(
return (
<div className='px-4 py-5' data-message-id={message.id}>
<div className='mx-auto max-w-3xl'>
{/* File attachments displayed above the message */}
{message.attachments && message.attachments.length > 0 && (
<div className='mb-2 flex justify-end'>
<div className='flex flex-wrap gap-2'>
{message.attachments.map((attachment) => {
const isImage = attachment.type.startsWith('image/')
const getFileIcon = (type: string) => {
if (type.includes('pdf'))
return (
<FileText className='h-5 w-5 text-gray-500 md:h-6 md:w-6 dark:text-gray-400' />
)
if (type.startsWith('image/'))
return (
<ImageIcon className='h-5 w-5 text-gray-500 md:h-6 md:w-6 dark:text-gray-400' />
)
if (type.includes('text') || type.includes('json'))
return (
<FileText className='h-5 w-5 text-gray-500 md:h-6 md:w-6 dark:text-gray-400' />
)
return (
<FileIcon className='h-5 w-5 text-gray-500 md:h-6 md:w-6 dark:text-gray-400' />
)
}
const formatFileSize = (bytes?: number) => {
if (!bytes || bytes === 0) return ''
const k = 1024
const sizes = ['B', 'KB', 'MB', 'GB']
const i = Math.floor(Math.log(bytes) / Math.log(k))
return `${Math.round((bytes / k ** i) * 10) / 10} ${sizes[i]}`
}
return (
<div
key={attachment.id}
className={`relative overflow-hidden rounded-2xl border border-gray-200 bg-gray-50 dark:border-gray-700 dark:bg-gray-800 ${
attachment.dataUrl?.trim() ? 'cursor-pointer' : ''
} ${
isImage
? 'h-16 w-16 md:h-20 md:w-20'
: 'flex h-16 min-w-[140px] max-w-[220px] items-center gap-2 px-3 md:h-20 md:min-w-[160px] md:max-w-[240px]'
}`}
onClick={(e) => {
if (attachment.dataUrl?.trim()) {
e.preventDefault()
window.open(attachment.dataUrl, '_blank')
}
}}
>
{isImage ? (
<img
src={attachment.dataUrl}
alt={attachment.name}
className='h-full w-full object-cover'
/>
) : (
<>
<div className='flex h-10 w-10 flex-shrink-0 items-center justify-center rounded bg-gray-100 md:h-12 md:w-12 dark:bg-gray-700'>
{getFileIcon(attachment.type)}
</div>
<div className='min-w-0 flex-1'>
<div className='truncate font-medium text-gray-800 text-xs md:text-sm dark:text-gray-200'>
{attachment.name}
</div>
{attachment.size && (
<div className='text-[10px] text-gray-500 md:text-xs dark:text-gray-400'>
{formatFileSize(attachment.size)}
</div>
)}
</div>
</>
)}
</div>
)
})}
</div>
</div>
)}
<div className='flex justify-end'>
<div className='max-w-[80%] rounded-3xl bg-[#F4F4F4] px-4 py-3 dark:bg-gray-600'>
<div className='whitespace-pre-wrap break-words text-base text-gray-800 leading-relaxed dark:text-gray-100'>
{isJsonObject ? (
<pre>{JSON.stringify(message.content, null, 2)}</pre>
) : (
<span>{message.content as string}</span>
)}
</div>
{/* Render text content if present and not just file count message */}
{message.content && !String(message.content).startsWith('Sent') && (
<div className='whitespace-pre-wrap break-words text-base text-gray-800 leading-relaxed dark:text-gray-100'>
{isJsonObject ? (
<pre>{JSON.stringify(message.content, null, 2)}</pre>
) : (
<span>{message.content as string}</span>
)}
</div>
)}
</div>
</div>
</div>

View File

@@ -236,8 +236,8 @@ export function ExampleCommand({
</div>
)}
<div className='group relative rounded-md border bg-background transition-colors hover:bg-muted/50'>
<pre className='whitespace-pre-wrap p-3 font-mono text-xs'>{getDisplayCommand()}</pre>
<div className='group relative overflow-x-auto rounded-md border bg-background transition-colors hover:bg-muted/50'>
<pre className='whitespace-pre p-3 font-mono text-xs'>{getDisplayCommand()}</pre>
<CopyButton text={getActualCommand()} />
</div>
</div>

View File

@@ -139,6 +139,16 @@ export function DeployModal({
case 'array':
exampleData[field.name] = [1, 2, 3]
break
case 'files':
exampleData[field.name] = [
{
data: 'data:application/pdf;base64,...',
type: 'file',
name: 'document.pdf',
mime: 'application/pdf',
},
]
break
}
}
})

View File

@@ -1,10 +1,9 @@
'use client'
import { type KeyboardEvent, useCallback, useEffect, useMemo, useRef, useState } from 'react'
import { ArrowDown, ArrowUp } from 'lucide-react'
import { AlertCircle, ArrowDown, ArrowUp, File, FileText, Image, Paperclip, X } from 'lucide-react'
import { Button } from '@/components/ui/button'
import { Input } from '@/components/ui/input'
import { Notice } from '@/components/ui/notice'
import { ScrollArea } from '@/components/ui/scroll-area'
import { createLogger } from '@/lib/logs/console/logger'
import {
@@ -13,7 +12,6 @@ import {
parseOutputContentSafely,
} from '@/lib/response-format'
import {
ChatFileUpload,
ChatMessage,
OutputSelect,
} from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/chat/components'
@@ -261,12 +259,40 @@ export function Chat({ chatMessage, setChatMessage }: ChatProps) {
let result: any = null
try {
// Add user message
// Read files as data URLs for display in chat (only images to avoid localStorage quota issues)
const attachmentsWithData = await Promise.all(
chatFiles.map(async (file) => {
let dataUrl = ''
// Only read images as data URLs to avoid storing large files in localStorage
if (file.type.startsWith('image/')) {
try {
dataUrl = await new Promise<string>((resolve, reject) => {
const reader = new FileReader()
reader.onload = () => resolve(reader.result as string)
reader.onerror = reject
reader.readAsDataURL(file.file)
})
} catch (error) {
logger.error('Error reading file as data URL:', error)
}
}
return {
id: file.id,
name: file.name,
type: file.type,
size: file.size,
dataUrl,
}
})
)
// Add user message with attachments (include all files, even non-images without dataUrl)
addMessage({
content:
sentMessage || (chatFiles.length > 0 ? `Uploaded ${chatFiles.length} file(s)` : ''),
workflowId: activeWorkflowId,
type: 'user',
attachments: attachmentsWithData,
})
// Prepare workflow input
@@ -626,66 +652,212 @@ export function Chat({ chatMessage, setChatMessage }: ChatProps) {
if (validNewFiles.length > 0) {
setChatFiles([...chatFiles, ...validNewFiles])
setUploadErrors([]) // Clear errors when files are successfully added
}
}
}
}}
>
{/* File upload section */}
<div className='mb-2'>
{uploadErrors.length > 0 && (
<div className='mb-2'>
<Notice variant='error' title='File upload error'>
<ul className='list-disc pl-5'>
{uploadErrors.map((err, idx) => (
<li key={idx}>{err}</li>
))}
</ul>
</Notice>
{/* Error messages */}
{uploadErrors.length > 0 && (
<div className='mb-2'>
<div className='rounded-lg border border-red-200 bg-red-50 p-3 dark:border-red-800/50 dark:bg-red-950/20'>
<div className='flex items-start gap-2'>
<AlertCircle className='mt-0.5 h-4 w-4 shrink-0 text-red-600 dark:text-red-400' />
<div className='flex-1'>
<div className='mb-1 font-medium text-red-800 text-sm dark:text-red-300'>
File upload error
</div>
<div className='space-y-1'>
{uploadErrors.map((err, idx) => (
<div key={idx} className='text-red-700 text-sm dark:text-red-400'>
{err}
</div>
))}
</div>
</div>
</div>
</div>
</div>
)}
{/* Combined input container matching copilot style */}
<div
className={`rounded-[8px] border border-[#E5E5E5] bg-[#FFFFFF] p-2 shadow-xs transition-all duration-200 dark:border-[#414141] dark:bg-[var(--surface-elevated)] ${
isDragOver
? 'border-[var(--brand-primary-hover-hex)] bg-purple-50/50 dark:border-[var(--brand-primary-hover-hex)] dark:bg-purple-950/20'
: ''
}`}
>
{/* File thumbnails */}
{chatFiles.length > 0 && (
<div className='mb-2 flex flex-wrap gap-1.5'>
{chatFiles.map((file) => {
const isImage = file.type.startsWith('image/')
const previewUrl = isImage ? URL.createObjectURL(file.file) : null
const getFileIcon = (type: string) => {
if (type.includes('pdf'))
return <FileText className='h-5 w-5 text-muted-foreground' />
if (type.startsWith('image/'))
return <Image className='h-5 w-5 text-muted-foreground' />
if (type.includes('text') || type.includes('json'))
return <FileText className='h-5 w-5 text-muted-foreground' />
return <File className='h-5 w-5 text-muted-foreground' />
}
const formatFileSize = (bytes: number) => {
if (bytes === 0) return '0 B'
const k = 1024
const sizes = ['B', 'KB', 'MB', 'GB']
const i = Math.floor(Math.log(bytes) / Math.log(k))
return `${Math.round((bytes / k ** i) * 10) / 10} ${sizes[i]}`
}
return (
<div
key={file.id}
className={`group relative overflow-hidden rounded-md border border-border/50 bg-muted/20 ${
previewUrl
? 'h-16 w-16'
: 'flex h-16 min-w-[120px] max-w-[200px] items-center gap-2 px-2'
}`}
>
{previewUrl ? (
<img
src={previewUrl}
alt={file.name}
className='h-full w-full object-cover'
/>
) : (
<>
<div className='flex h-8 w-8 flex-shrink-0 items-center justify-center rounded bg-background/50'>
{getFileIcon(file.type)}
</div>
<div className='min-w-0 flex-1'>
<div className='truncate font-medium text-foreground text-xs'>
{file.name}
</div>
<div className='text-[10px] text-muted-foreground'>
{formatFileSize(file.size)}
</div>
</div>
</>
)}
{/* Remove button */}
<Button
variant='ghost'
size='icon'
onClick={(e) => {
e.stopPropagation()
if (previewUrl) URL.revokeObjectURL(previewUrl)
setChatFiles(chatFiles.filter((f) => f.id !== file.id))
}}
className='absolute top-0.5 right-0.5 h-5 w-5 bg-gray-800/80 p-0 text-white opacity-0 transition-opacity hover:bg-gray-800/80 hover:text-white group-hover:opacity-100 dark:bg-black/70 dark:hover:bg-black/70 dark:hover:text-white'
>
<X className='h-3 w-3' />
</Button>
</div>
)
})}
</div>
)}
<ChatFileUpload
files={chatFiles}
onFilesChange={(files) => {
setChatFiles(files)
}}
maxFiles={5}
maxSize={10}
disabled={!activeWorkflowId || isExecuting || isUploadingFiles}
onError={(errors) => setUploadErrors(errors)}
/>
</div>
<div className='flex gap-2'>
<Input
ref={inputRef}
value={chatMessage}
onChange={(e) => {
setChatMessage(e.target.value)
setHistoryIndex(-1) // Reset history index when typing
}}
onKeyDown={handleKeyPress}
placeholder={isDragOver ? 'Drop files here...' : 'Type a message...'}
className={`h-9 flex-1 rounded-lg border-[#E5E5E5] bg-[#FFFFFF] text-muted-foreground shadow-xs focus-visible:ring-0 focus-visible:ring-offset-0 dark:border-[#414141] dark:bg-[var(--surface-elevated)] ${
isDragOver
? 'border-[var(--brand-primary-hover-hex)] bg-purple-50/50 dark:border-[var(--brand-primary-hover-hex)] dark:bg-purple-950/20'
: ''
}`}
disabled={!activeWorkflowId || isExecuting || isUploadingFiles}
/>
<Button
onClick={handleSendMessage}
size='icon'
disabled={
(!chatMessage.trim() && chatFiles.length === 0) ||
!activeWorkflowId ||
isExecuting ||
isUploadingFiles
}
className='h-9 w-9 rounded-lg bg-[var(--brand-primary-hover-hex)] text-white shadow-[0_0_0_0_var(--brand-primary-hover-hex)] transition-all duration-200 hover:bg-[var(--brand-primary-hover-hex)] hover:shadow-[0_0_0_4px_rgba(127,47,255,0.15)]'
>
<ArrowUp className='h-4 w-4' />
</Button>
{/* Input row */}
<div className='flex items-center gap-1'>
{/* Attach button */}
<Button
variant='ghost'
size='icon'
onClick={() => document.getElementById('chat-file-input')?.click()}
disabled={
!activeWorkflowId || isExecuting || isUploadingFiles || chatFiles.length >= 5
}
className='h-6 w-6 shrink-0 text-muted-foreground hover:text-foreground'
title='Attach files'
>
<Paperclip className='h-3 w-3' />
</Button>
{/* Hidden file input */}
<input
id='chat-file-input'
type='file'
multiple
onChange={(e) => {
const files = e.target.files
if (!files) return
const newFiles: ChatFile[] = []
const errors: string[] = []
for (let i = 0; i < files.length; i++) {
if (chatFiles.length + newFiles.length >= 5) {
errors.push('Maximum 5 files allowed')
break
}
const file = files[i]
if (file.size > 10 * 1024 * 1024) {
errors.push(`${file.name} is too large (max 10MB)`)
continue
}
// Check for duplicates
const isDuplicate = chatFiles.some(
(existingFile) =>
existingFile.name === file.name && existingFile.size === file.size
)
if (isDuplicate) {
errors.push(`${file.name} already added`)
continue
}
newFiles.push({
id: crypto.randomUUID(),
name: file.name,
size: file.size,
type: file.type,
file,
})
}
if (errors.length > 0) setUploadErrors(errors)
if (newFiles.length > 0) {
setChatFiles([...chatFiles, ...newFiles])
setUploadErrors([]) // Clear errors when files are successfully added
}
e.target.value = ''
}}
className='hidden'
disabled={!activeWorkflowId || isExecuting || isUploadingFiles}
/>
{/* Text input */}
<Input
ref={inputRef}
value={chatMessage}
onChange={(e) => {
setChatMessage(e.target.value)
setHistoryIndex(-1)
}}
onKeyDown={handleKeyPress}
placeholder={isDragOver ? 'Drop files here...' : 'Type a message...'}
className='h-8 flex-1 border-0 bg-transparent font-sans text-foreground text-sm shadow-none placeholder:text-muted-foreground focus-visible:ring-0 focus-visible:ring-offset-0'
disabled={!activeWorkflowId || isExecuting || isUploadingFiles}
/>
{/* Send button */}
<Button
onClick={handleSendMessage}
size='icon'
disabled={
(!chatMessage.trim() && chatFiles.length === 0) ||
!activeWorkflowId ||
isExecuting ||
isUploadingFiles
}
className='h-6 w-6 shrink-0 rounded-full bg-[var(--brand-primary-hover-hex)] text-white shadow-[0_0_0_0_var(--brand-primary-hover-hex)] transition-all duration-200 hover:bg-[var(--brand-primary-hover-hex)] hover:shadow-[0_0_0_4px_rgba(127,47,255,0.15)]'
>
<ArrowUp className='h-3 w-3' />
</Button>
</div>
</div>
</div>
</div>

View File

@@ -1,4 +1,13 @@
import { useMemo } from 'react'
import { File, FileText, Image as ImageIcon } from 'lucide-react'
interface ChatAttachment {
id: string
name: string
type: string
dataUrl: string
size?: number
}
interface ChatMessageProps {
message: {
@@ -7,6 +16,7 @@ interface ChatMessageProps {
timestamp: string | Date
type: 'user' | 'workflow'
isStreaming?: boolean
attachments?: ChatAttachment[]
}
}
@@ -58,12 +68,81 @@ export function ChatMessage({ message }: ChatMessageProps) {
if (message.type === 'user') {
return (
<div className='w-full py-2'>
{/* File attachments displayed above the message, completely separate from message box */}
{message.attachments && message.attachments.length > 0 && (
<div className='mb-1 flex justify-end'>
<div className='flex flex-wrap gap-1.5'>
{message.attachments.map((attachment) => {
const isImage = attachment.type.startsWith('image/')
const getFileIcon = (type: string) => {
if (type.includes('pdf'))
return <FileText className='h-5 w-5 text-muted-foreground' />
if (type.startsWith('image/'))
return <ImageIcon className='h-5 w-5 text-muted-foreground' />
if (type.includes('text') || type.includes('json'))
return <FileText className='h-5 w-5 text-muted-foreground' />
return <File className='h-5 w-5 text-muted-foreground' />
}
const formatFileSize = (bytes?: number) => {
if (!bytes || bytes === 0) return ''
const k = 1024
const sizes = ['B', 'KB', 'MB', 'GB']
const i = Math.floor(Math.log(bytes) / Math.log(k))
return `${Math.round((bytes / k ** i) * 10) / 10} ${sizes[i]}`
}
return (
<div
key={attachment.id}
className={`relative overflow-hidden rounded-md border border-border/50 bg-muted/20 ${
attachment.dataUrl?.trim() ? 'cursor-pointer' : ''
} ${isImage ? 'h-16 w-16' : 'flex h-16 min-w-[120px] max-w-[200px] items-center gap-2 px-2'}`}
onClick={(e) => {
if (attachment.dataUrl?.trim()) {
e.preventDefault()
window.open(attachment.dataUrl, '_blank')
}
}}
>
{isImage && attachment.dataUrl ? (
<img
src={attachment.dataUrl}
alt={attachment.name}
className='h-full w-full object-cover'
/>
) : (
<>
<div className='flex h-8 w-8 flex-shrink-0 items-center justify-center rounded bg-background/50'>
{getFileIcon(attachment.type)}
</div>
<div className='min-w-0 flex-1'>
<div className='truncate font-medium text-foreground text-xs'>
{attachment.name}
</div>
{attachment.size && (
<div className='text-[10px] text-muted-foreground'>
{formatFileSize(attachment.size)}
</div>
)}
</div>
</>
)}
</div>
)
})}
</div>
</div>
)}
<div className='flex justify-end'>
<div className='max-w-[80%]'>
<div className='rounded-[10px] bg-secondary px-3 py-2'>
<div className='whitespace-pre-wrap break-words font-normal text-foreground text-sm leading-normal'>
<WordWrap text={formattedContent} />
</div>
{/* Render text content if present and not just file count message */}
{formattedContent && !formattedContent.startsWith('Uploaded') && (
<div className='whitespace-pre-wrap break-words font-normal text-foreground text-sm leading-normal'>
<WordWrap text={formattedContent} />
</div>
)}
</div>
</div>
</div>
@@ -78,7 +157,7 @@ export function ChatMessage({ message }: ChatMessageProps) {
<div className='whitespace-pre-wrap break-words text-foreground'>
<WordWrap text={formattedContent} />
{message.isStreaming && (
<span className='ml-1 inline-block h-4 w-2 animate-pulse bg-primary' />
<span className='ml-1 inline-block h-4 w-2 animate-pulse bg-gray-400 dark:bg-gray-300' />
)}
</div>
</div>

View File

@@ -9,6 +9,7 @@ import { checkTagTrigger, TagDropdown } from '@/components/ui/tag-dropdown'
import { MAX_TAG_SLOTS } from '@/lib/knowledge/consts'
import { cn } from '@/lib/utils'
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/hooks/use-sub-block-value'
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
import type { SubBlockConfig } from '@/blocks/types'
import { useKnowledgeBaseTagDefinitions } from '@/hooks/use-knowledge-base-tag-definitions'
import { useTagSelection } from '@/hooks/use-tag-selection'
@@ -40,6 +41,7 @@ export function DocumentTagEntry({
isConnecting = false,
}: DocumentTagEntryProps) {
const [storeValue, setStoreValue] = useSubBlockValue<string>(blockId, subBlock.id)
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
// Get the knowledge base ID from other sub-blocks
const [knowledgeBaseIdValue] = useSubBlockValue(blockId, 'knowledgeBaseId')
@@ -301,7 +303,12 @@ export function DocumentTagEntry({
)}
/>
<div className='pointer-events-none absolute inset-0 flex items-center overflow-hidden bg-transparent px-3 text-sm'>
<div className='whitespace-pre'>{formatDisplayText(cellValue)}</div>
<div className='whitespace-pre'>
{formatDisplayText(cellValue, {
accessiblePrefixes,
highlightAll: !accessiblePrefixes,
})}
</div>
</div>
{showDropdown && availableTagDefinitions.length > 0 && (
<div className='absolute top-full left-0 z-[100] mt-1 w-full'>
@@ -389,7 +396,10 @@ export function DocumentTagEntry({
/>
<div className='pointer-events-none absolute inset-0 flex items-center overflow-hidden bg-transparent px-3 text-sm'>
<div className='whitespace-pre text-muted-foreground'>
{formatDisplayText(cellValue)}
{formatDisplayText(cellValue, {
accessiblePrefixes,
highlightAll: !accessiblePrefixes,
})}
</div>
</div>
{showTypeDropdown && !isReadOnly && (
@@ -469,7 +479,12 @@ export function DocumentTagEntry({
className='w-full border-0 text-transparent caret-foreground placeholder:text-muted-foreground/50 focus-visible:ring-0 focus-visible:ring-offset-0'
/>
<div className='pointer-events-none absolute inset-0 flex items-center overflow-hidden bg-transparent px-3 text-sm'>
<div className='whitespace-pre'>{formatDisplayText(cellValue)}</div>
<div className='whitespace-pre'>
{formatDisplayText(cellValue, {
accessiblePrefixes,
highlightAll: !accessiblePrefixes,
})}
</div>
</div>
</div>
</td>

View File

@@ -239,7 +239,12 @@ export function KnowledgeTagFilters({
onBlur={handleBlur}
/>
<div className='pointer-events-none absolute inset-0 flex items-center overflow-hidden bg-transparent px-3 text-sm'>
<div className='whitespace-pre'>{formatDisplayText(cellValue || 'Select tag')}</div>
<div className='whitespace-pre'>
{formatDisplayText(cellValue || 'Select tag', {
accessiblePrefixes,
highlightAll: !accessiblePrefixes,
})}
</div>
</div>
{showDropdown && tagDefinitions.length > 0 && (
<div className='absolute top-full left-0 z-[100] mt-1 w-full'>

View File

@@ -1,5 +1,6 @@
import { useCallback } from 'react'
import { useParams } from 'next/navigation'
import { formatDisplayText } from '@/components/ui/formatted-text'
import { Input } from '@/components/ui/input'
import { Label } from '@/components/ui/label'
import {
@@ -14,6 +15,7 @@ import { Switch } from '@/components/ui/switch'
import { Textarea } from '@/components/ui/textarea'
import { cn } from '@/lib/utils'
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/hooks/use-sub-block-value'
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
import { useMcpTools } from '@/hooks/use-mcp-tools'
import { formatParameterLabel } from '@/tools/params'
@@ -37,6 +39,7 @@ export function McpDynamicArgs({
const { mcpTools } = useMcpTools(workspaceId)
const [selectedTool] = useSubBlockValue(blockId, 'tool')
const [toolArgs, setToolArgs] = useSubBlockValue(blockId, subBlockId)
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
const selectedToolConfig = mcpTools.find((tool) => tool.id === selectedTool)
const toolSchema = selectedToolConfig?.inputSchema
@@ -180,7 +183,7 @@ export function McpDynamicArgs({
case 'long-input':
return (
<div key={`${paramName}-long`}>
<div key={`${paramName}-long`} className='relative'>
<Textarea
value={value || ''}
onChange={(e) => updateParameter(paramName, e.target.value, paramSchema)}
@@ -192,8 +195,14 @@ export function McpDynamicArgs({
}
disabled={disabled}
rows={4}
className='min-h-[80px] resize-none'
className='min-h-[80px] resize-none text-transparent caret-foreground'
/>
<div className='pointer-events-none absolute inset-0 overflow-auto whitespace-pre-wrap break-words p-3 text-sm'>
{formatDisplayText(value || '', {
accessiblePrefixes,
highlightAll: !accessiblePrefixes,
})}
</div>
</div>
)
@@ -203,9 +212,10 @@ export function McpDynamicArgs({
paramName.toLowerCase().includes('password') ||
paramName.toLowerCase().includes('token')
const isNumeric = paramSchema.type === 'number' || paramSchema.type === 'integer'
const isTextInput = !isPassword && !isNumeric
return (
<div key={`${paramName}-short`}>
<div key={`${paramName}-short`} className={isTextInput ? 'relative' : ''}>
<Input
type={isPassword ? 'password' : isNumeric ? 'number' : 'text'}
value={value || ''}
@@ -231,7 +241,18 @@ export function McpDynamicArgs({
`Enter ${formatParameterLabel(paramName).toLowerCase()}`
}
disabled={disabled}
className={isTextInput ? 'text-transparent caret-foreground' : ''}
/>
{isTextInput && (
<div className='pointer-events-none absolute inset-0 flex items-center overflow-hidden bg-transparent px-3 text-sm'>
<div className='whitespace-pre'>
{formatDisplayText(value?.toString() || '', {
accessiblePrefixes,
highlightAll: !accessiblePrefixes,
})}
</div>
</div>
)}
</div>
)
}

View File

@@ -3,7 +3,7 @@ import { ResponseFormat as SharedResponseFormat } from '@/app/workspace/[workspa
export interface JSONProperty {
id: string
key: string
type: 'string' | 'number' | 'boolean' | 'object' | 'array'
type: 'string' | 'number' | 'boolean' | 'object' | 'array' | 'file'
value?: any
collapsed?: boolean
}

View File

@@ -1,5 +1,5 @@
import { useEffect, useRef, useState } from 'react'
import { ChevronDown, Plus, Trash } from 'lucide-react'
import { ChevronDown, Paperclip, Plus, Trash } from 'lucide-react'
import { Badge } from '@/components/ui/badge'
import { Button } from '@/components/ui/button'
import {
@@ -27,7 +27,7 @@ import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/
interface Field {
id: string
name: string
type?: 'string' | 'number' | 'boolean' | 'object' | 'array'
type?: 'string' | 'number' | 'boolean' | 'object' | 'array' | 'files'
value?: string
collapsed?: boolean
}
@@ -339,37 +339,46 @@ export function FieldFormat({
onClick={() => updateField(field.id, 'type', 'string')}
className='cursor-pointer'
>
<span className='mr-2 font-mono'>Aa</span>
<span className='mr-2 w-6 text-center font-mono'>Aa</span>
<span>String</span>
</DropdownMenuItem>
<DropdownMenuItem
onClick={() => updateField(field.id, 'type', 'number')}
className='cursor-pointer'
>
<span className='mr-2 font-mono'>123</span>
<span className='mr-2 w-6 text-center font-mono'>123</span>
<span>Number</span>
</DropdownMenuItem>
<DropdownMenuItem
onClick={() => updateField(field.id, 'type', 'boolean')}
className='cursor-pointer'
>
<span className='mr-2 font-mono'>0/1</span>
<span className='mr-2 w-6 text-center font-mono'>0/1</span>
<span>Boolean</span>
</DropdownMenuItem>
<DropdownMenuItem
onClick={() => updateField(field.id, 'type', 'object')}
className='cursor-pointer'
>
<span className='mr-2 font-mono'>{'{}'}</span>
<span className='mr-2 w-6 text-center font-mono'>{'{}'}</span>
<span>Object</span>
</DropdownMenuItem>
<DropdownMenuItem
onClick={() => updateField(field.id, 'type', 'array')}
className='cursor-pointer'
>
<span className='mr-2 font-mono'>[]</span>
<span className='mr-2 w-6 text-center font-mono'>[]</span>
<span>Array</span>
</DropdownMenuItem>
<DropdownMenuItem
onClick={() => updateField(field.id, 'type', 'files')}
className='cursor-pointer'
>
<div className='mr-2 flex w-6 justify-center'>
<Paperclip className='h-4 w-4' />
</div>
<span>Files</span>
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
</div>

View File

@@ -8,6 +8,7 @@ import { Input } from '@/components/ui/input'
import { checkTagTrigger, TagDropdown } from '@/components/ui/tag-dropdown'
import { cn } from '@/lib/utils'
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/hooks/use-sub-block-value'
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
interface TableProps {
blockId: string
@@ -34,6 +35,7 @@ export function Table({
const params = useParams()
const workspaceId = params.workspaceId as string
const [storeValue, setStoreValue] = useSubBlockValue<TableRow[]>(blockId, subBlockId)
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
// Use preview value when in preview mode, otherwise use store value
const value = isPreview ? previewValue : storeValue
@@ -240,7 +242,12 @@ export function Table({
data-overlay={cellKey}
className='pointer-events-none absolute inset-0 flex items-center overflow-hidden bg-transparent px-3 text-sm'
>
<div className='whitespace-pre'>{formatDisplayText(cellValue)}</div>
<div className='whitespace-pre'>
{formatDisplayText(cellValue, {
accessiblePrefixes,
highlightAll: !accessiblePrefixes,
})}
</div>
</div>
</div>
</td>

View File

@@ -10,6 +10,7 @@ import {
CommandItem,
CommandList,
} from '@/components/ui/command'
import { formatDisplayText } from '@/components/ui/formatted-text'
import { Input } from '@/components/ui/input'
import { Label } from '@/components/ui/label'
import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover'
@@ -23,9 +24,11 @@ import {
import { Switch } from '@/components/ui/switch'
import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from '@/components/ui/tooltip'
import { cn } from '@/lib/utils'
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
import type { TriggerConfig } from '@/triggers/types'
interface TriggerConfigSectionProps {
blockId: string
triggerDef: TriggerConfig
config: Record<string, any>
onChange: (fieldId: string, value: any) => void
@@ -34,6 +37,7 @@ interface TriggerConfigSectionProps {
}
export function TriggerConfigSection({
blockId,
triggerDef,
config,
onChange,
@@ -42,6 +46,7 @@ export function TriggerConfigSection({
}: TriggerConfigSectionProps) {
const [showSecrets, setShowSecrets] = useState<Record<string, boolean>>({})
const [copied, setCopied] = useState<string | null>(null)
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
const copyToClipboard = (text: string, type: string) => {
navigator.clipboard.writeText(text)
@@ -258,9 +263,20 @@ export function TriggerConfigSection({
className={cn(
'h-9 rounded-[8px]',
isSecret ? 'pr-32' : '',
'focus-visible:ring-2 focus-visible:ring-primary/20'
'focus-visible:ring-2 focus-visible:ring-primary/20',
!isSecret && 'text-transparent caret-foreground'
)}
/>
{!isSecret && (
<div className='pointer-events-none absolute inset-0 flex items-center overflow-hidden bg-transparent px-3 text-sm'>
<div className='whitespace-pre'>
{formatDisplayText(value?.toString() || '', {
accessiblePrefixes,
highlightAll: !accessiblePrefixes,
})}
</div>
</div>
)}
{isSecret && (
<div className='absolute top-0.5 right-0.5 flex h-8 items-center gap-1 pr-1'>
<Button

View File

@@ -467,6 +467,7 @@ export function TriggerModal({
)}
<TriggerConfigSection
blockId={blockId}
triggerDef={triggerDef}
config={config}
onChange={handleConfigChange}

View File

@@ -26,7 +26,7 @@ export type DocumentProcessingPayload = {
export const processDocument = task({
id: 'knowledge-process-document',
maxDuration: env.KB_CONFIG_MAX_DURATION || 300,
maxDuration: env.KB_CONFIG_MAX_DURATION || 600,
retry: {
maxAttempts: env.KB_CONFIG_MAX_ATTEMPTS || 3,
factor: env.KB_CONFIG_RETRY_FACTOR || 2,

View File

@@ -25,7 +25,7 @@ export const ChatTriggerBlock: BlockConfig = {
outputs: {
input: { type: 'string', description: 'User message' },
conversationId: { type: 'string', description: 'Conversation ID' },
files: { type: 'array', description: 'Uploaded files' },
files: { type: 'files', description: 'Uploaded files' },
},
triggers: {
enabled: true,

View File

@@ -3,7 +3,14 @@ import type { ToolResponse } from '@/tools/types'
export type BlockIcon = (props: SVGProps<SVGSVGElement>) => JSX.Element
export type ParamType = 'string' | 'number' | 'boolean' | 'json'
export type PrimitiveValueType = 'string' | 'number' | 'boolean' | 'json' | 'array' | 'any'
export type PrimitiveValueType =
| 'string'
| 'number'
| 'boolean'
| 'json'
| 'array'
| 'files'
| 'any'
export type BlockCategory = 'blocks' | 'tools' | 'triggers'

View File

@@ -155,7 +155,7 @@ const getOutputTypeForPath = (
const chatModeTypes: Record<string, string> = {
input: 'string',
conversationId: 'string',
files: 'array',
files: 'files',
}
return chatModeTypes[outputPath] || 'any'
}
@@ -336,10 +336,8 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
const combinedAccessiblePrefixes = useMemo(() => {
if (!rawAccessiblePrefixes) return new Set<string>()
const normalized = new Set<string>(rawAccessiblePrefixes)
normalized.add(normalizeBlockName(blockId))
return normalized
}, [rawAccessiblePrefixes, blockId])
return new Set<string>(rawAccessiblePrefixes)
}, [rawAccessiblePrefixes])
// Subscribe to live subblock values for the active workflow to react to input format changes
const workflowSubBlockValues = useSubBlockStore((state) =>
@@ -997,6 +995,33 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
let processedTag = tag
// Check if this is a file property and add [0] automatically
const fileProperties = ['url', 'name', 'size', 'type', 'key', 'uploadedAt', 'expiresAt']
const parts = tag.split('.')
if (parts.length >= 2 && fileProperties.includes(parts[parts.length - 1])) {
const fieldName = parts[parts.length - 2]
if (blockGroup) {
const block = useWorkflowStore.getState().blocks[blockGroup.blockId]
const blockConfig = block ? (getBlock(block.type) ?? null) : null
const mergedSubBlocks = getMergedSubBlocks(blockGroup.blockId)
const fieldType = getOutputTypeForPath(
block,
blockConfig,
blockGroup.blockId,
fieldName,
mergedSubBlocks
)
if (fieldType === 'files') {
const blockAndField = parts.slice(0, -1).join('.')
const property = parts[parts.length - 1]
processedTag = `${blockAndField}[0].${property}`
}
}
}
if (tag.startsWith(TAG_PREFIXES.VARIABLE)) {
const variableName = tag.substring(TAG_PREFIXES.VARIABLE.length)
const variableObj = workflowVariables.find(

View File

@@ -165,7 +165,7 @@ describe('ConditionBlockHandler', () => {
mockContext,
mockBlock
)
expect(mockResolver.resolveEnvVariables).toHaveBeenCalledWith('context.value > 5', true)
expect(mockResolver.resolveEnvVariables).toHaveBeenCalledWith('context.value > 5')
expect(result).toEqual(expectedOutput)
expect(mockContext.decisions.condition.get(mockBlock.id)).toBe('cond1')
})
@@ -205,7 +205,7 @@ describe('ConditionBlockHandler', () => {
mockContext,
mockBlock
)
expect(mockResolver.resolveEnvVariables).toHaveBeenCalledWith('context.value < 0', true)
expect(mockResolver.resolveEnvVariables).toHaveBeenCalledWith('context.value < 0')
expect(result).toEqual(expectedOutput)
expect(mockContext.decisions.condition.get(mockBlock.id)).toBe('else1')
})
@@ -241,7 +241,7 @@ describe('ConditionBlockHandler', () => {
mockContext,
mockBlock
)
expect(mockResolver.resolveEnvVariables).toHaveBeenCalledWith('10 > 5', true)
expect(mockResolver.resolveEnvVariables).toHaveBeenCalledWith('10 > 5')
expect(mockContext.decisions.condition.get(mockBlock.id)).toBe('cond1')
})
@@ -268,7 +268,7 @@ describe('ConditionBlockHandler', () => {
mockContext,
mockBlock
)
expect(mockResolver.resolveEnvVariables).toHaveBeenCalledWith('"john" !== null', true)
expect(mockResolver.resolveEnvVariables).toHaveBeenCalledWith('"john" !== null')
expect(mockContext.decisions.condition.get(mockBlock.id)).toBe('cond1')
})
@@ -295,7 +295,7 @@ describe('ConditionBlockHandler', () => {
mockContext,
mockBlock
)
expect(mockResolver.resolveEnvVariables).toHaveBeenCalledWith('{{POOP}} === "hi"', true)
expect(mockResolver.resolveEnvVariables).toHaveBeenCalledWith('{{POOP}} === "hi"')
expect(mockContext.decisions.condition.get(mockBlock.id)).toBe('cond1')
})

View File

@@ -109,7 +109,7 @@ export class ConditionBlockHandler implements BlockHandler {
// Use full resolution pipeline: variables -> block references -> env vars
const resolvedVars = this.resolver.resolveVariableReferences(conditionValueString, block)
const resolvedRefs = this.resolver.resolveBlockReferences(resolvedVars, context, block)
resolvedConditionValue = this.resolver.resolveEnvVariables(resolvedRefs, true)
resolvedConditionValue = this.resolver.resolveEnvVariables(resolvedRefs)
logger.info(
`Resolved condition "${condition.title}" (${condition.id}): from "${conditionValueString}" to "${resolvedConditionValue}"`
)

View File

@@ -9,7 +9,7 @@ const logger = createLogger('ResponseBlockHandler')
interface JSONProperty {
id: string
name: string
type: 'string' | 'number' | 'boolean' | 'object' | 'array'
type: 'string' | 'number' | 'boolean' | 'object' | 'array' | 'files'
value: any
collapsed?: boolean
}
@@ -140,6 +140,9 @@ export class ResponseBlockHandler implements BlockHandler {
return this.convertNumberValue(prop.value)
case 'boolean':
return this.convertBooleanValue(prop.value)
case 'files':
// File values should be passed through as-is (UserFile objects)
return prop.value
default:
return prop.value
}

View File

@@ -432,7 +432,7 @@ describe('InputResolver', () => {
expect(result.apiKey).toBe('test-api-key')
expect(result.url).toBe('https://example.com?key=test-api-key')
expect(result.regularParam).toBe('Base URL is: {{BASE_URL}}')
expect(result.regularParam).toBe('Base URL is: https://api.example.com')
})
it('should resolve explicit environment variables', () => {
@@ -458,7 +458,7 @@ describe('InputResolver', () => {
expect(result.explicitEnv).toBe('https://api.example.com')
})
it('should not resolve environment variables in regular contexts', () => {
it('should resolve environment variables in all contexts', () => {
const block: SerializedBlock = {
id: 'test-block',
metadata: { id: 'generic', name: 'Test Block' },
@@ -478,7 +478,7 @@ describe('InputResolver', () => {
const result = resolver.resolveInputs(block, mockContext)
expect(result.regularParam).toBe('Value with {{API_KEY}} embedded')
expect(result.regularParam).toBe('Value with test-api-key embedded')
})
})

View File

@@ -313,7 +313,7 @@ export class InputResolver {
}
// Handle objects and arrays recursively
else if (typeof value === 'object') {
result[key] = this.processObjectValue(value, key, context, block)
result[key] = this.processObjectValue(value, context, block)
}
// Pass through other value types
else {
@@ -684,7 +684,7 @@ export class InputResolver {
}
// Standard block reference resolution with connection validation
const validation = this.validateBlockReference(blockRef, currentBlock.id, context)
const validation = this.validateBlockReference(blockRef, currentBlock.id)
if (!validation.isValid) {
throw new Error(validation.errorMessage!)
@@ -845,142 +845,42 @@ export class InputResolver {
return resolvedValue
}
/**
* Validates if a match with < and > is actually a variable reference.
* Valid variable references must:
* - Have no space after the opening <
* - Contain a dot (.)
* - Have no spaces until the closing >
* - Not be comparison operators or HTML tags
*
* @param match - The matched string including < and >
* @returns Whether this is a valid variable reference
*/
private isValidVariableReference(match: string): boolean {
const innerContent = match.slice(1, -1)
if (innerContent.startsWith(' ')) {
return false
}
if (innerContent.match(/^\s*[<>=!]+\s*$/) || innerContent.match(/\s[<>=!]+\s/)) {
return false
}
if (innerContent.match(/^[<>=!]+\s/)) {
return false
}
if (innerContent.includes('.')) {
const dotIndex = innerContent.indexOf('.')
const beforeDot = innerContent.substring(0, dotIndex)
const afterDot = innerContent.substring(dotIndex + 1)
if (afterDot.includes(' ')) {
return false
}
if (beforeDot.match(/[+*/=<>!]/) || afterDot.match(/[+\-*/=<>!]/)) {
return false
}
} else {
if (
innerContent.match(/[+\-*/=<>!]/) ||
innerContent.match(/^\d/) ||
innerContent.match(/\s\d/)
) {
return false
}
}
return true
}
/**
* Determines if a string contains a properly formatted environment variable reference.
* Valid references are either:
* 1. A standalone env var (entire string is just {{ENV_VAR}})
* 2. An explicit env var with clear boundaries (usually within a URL or similar)
*
* @param value - The string to check
* @returns Whether this contains a properly formatted env var reference
*/
private containsProperEnvVarReference(value: string): boolean {
if (!value || typeof value !== 'string') return false
// Case 1: String is just a single environment variable
if (value.trim().match(/^\{\{[^{}]+\}\}$/)) {
return true
}
// Case 2: Check for environment variables in specific contexts
// For example, in URLs, bearer tokens, etc.
const properContextPatterns = [
// Auth header patterns
/Bearer\s+\{\{[^{}]+\}\}/i,
/Authorization:\s+Bearer\s+\{\{[^{}]+\}\}/i,
/Authorization:\s+\{\{[^{}]+\}\}/i,
// API key in URL patterns
/[?&]api[_-]?key=\{\{[^{}]+\}\}/i,
/[?&]key=\{\{[^{}]+\}\}/i,
/[?&]token=\{\{[^{}]+\}\}/i,
// API key in header patterns
/X-API-Key:\s+\{\{[^{}]+\}\}/i,
/api[_-]?key:\s+\{\{[^{}]+\}\}/i,
]
return properContextPatterns.some((pattern) => pattern.test(value))
}
/**
* Resolves environment variables in any value ({{ENV_VAR}}).
* Only processes environment variables in apiKey fields or when explicitly needed.
*
* @param value - Value that may contain environment variable references
* @param isApiKey - Whether this is an API key field (requires special env var handling)
* @returns Value with environment variables resolved
* @throws Error if referenced environment variable is not found
*/
resolveEnvVariables(value: any, isApiKey = false): any {
resolveEnvVariables(value: any): any {
if (typeof value === 'string') {
// Only process environment variables if:
// 1. This is an API key field
// 2. String is a complete environment variable reference ({{ENV_VAR}})
// 3. String contains environment variable references in proper contexts (auth headers, URLs)
const isExplicitEnvVar = value.trim().startsWith('{{') && value.trim().endsWith('}}')
const hasProperEnvVarReferences = this.containsProperEnvVarReference(value)
const envMatches = value.match(/\{\{([^}]+)\}\}/g)
if (envMatches) {
let resolvedValue = value
for (const match of envMatches) {
const envKey = match.slice(2, -2)
const envValue = this.environmentVariables[envKey]
if (isApiKey || isExplicitEnvVar || hasProperEnvVarReferences) {
const envMatches = value.match(/\{\{([^}]+)\}\}/g)
if (envMatches) {
let resolvedValue = value
for (const match of envMatches) {
const envKey = match.slice(2, -2)
const envValue = this.environmentVariables[envKey]
if (envValue === undefined) {
throw new Error(`Environment variable "${envKey}" was not found.`)
}
resolvedValue = resolvedValue.replace(match, envValue)
if (envValue === undefined) {
throw new Error(`Environment variable "${envKey}" was not found.`)
}
return resolvedValue
resolvedValue = resolvedValue.replace(match, envValue)
}
return resolvedValue
}
return value
}
if (Array.isArray(value)) {
return value.map((item) => this.resolveEnvVariables(item, isApiKey))
return value.map((item) => this.resolveEnvVariables(item))
}
if (value && typeof value === 'object') {
return Object.entries(value).reduce(
(acc, [k, v]) => ({
...acc,
[k]: this.resolveEnvVariables(v, k.toLowerCase() === 'apikey'),
[k]: this.resolveEnvVariables(v),
}),
{}
)
@@ -1016,11 +916,8 @@ export class InputResolver {
// Then resolve block references
const resolvedReferences = this.resolveBlockReferences(resolvedVars, context, currentBlock)
// Check if this is an API key field
const isApiKey = this.isApiKeyField(currentBlock, value)
// Then resolve environment variables with the API key flag
return this.resolveEnvVariables(resolvedReferences, isApiKey)
// Then resolve environment variables
return this.resolveEnvVariables(resolvedReferences)
}
// Handle arrays
@@ -1032,7 +929,6 @@ export class InputResolver {
if (typeof value === 'object') {
const result: Record<string, any> = {}
for (const [k, v] of Object.entries(value)) {
const _isApiKey = k.toLowerCase() === 'apikey'
result[k] = this.resolveNestedStructure(v, context, currentBlock)
}
return result
@@ -1042,38 +938,6 @@ export class InputResolver {
return value
}
/**
* Determines if a given field in a block is an API key field.
*
* @param block - Block containing the field
* @param value - Value to check
* @returns Whether this appears to be an API key field
*/
private isApiKeyField(block: SerializedBlock, value: string): boolean {
// Check if the block is an API or agent block (which typically have API keys)
const blockType = block.metadata?.id
if (blockType !== 'api' && blockType !== 'agent') {
return false
}
// Look for the value in the block params
for (const [key, paramValue] of Object.entries(block.config.params)) {
if (paramValue === value) {
// Check if key name suggests it's an API key
const normalizedKey = key.toLowerCase().replace(/[_\-\s]/g, '')
return (
normalizedKey === 'apikey' ||
normalizedKey.includes('apikey') ||
normalizedKey.includes('secretkey') ||
normalizedKey.includes('accesskey') ||
normalizedKey.includes('token')
)
}
}
return false
}
/**
* Formats a value for use in condition blocks.
* Handles strings, null, undefined, and objects appropriately.
@@ -1291,41 +1155,17 @@ export class InputResolver {
return [...new Set(names)] // Remove duplicates
}
/**
* Checks if a block reference could potentially be valid without throwing errors.
* Used to filter out non-block patterns like <test> from block reference resolution.
*
* @param blockRef - The block reference to check
* @param currentBlockId - ID of the current block
* @returns Whether this could be a valid block reference
*/
private isAccessibleBlockReference(blockRef: string, currentBlockId: string): boolean {
// Special cases that are always allowed
const specialRefs = ['start', 'loop', 'parallel']
if (specialRefs.includes(blockRef.toLowerCase())) {
return true
}
// Get all accessible block names for this block
const accessibleNames = this.getAccessibleBlockNames(currentBlockId)
// Check if the reference matches any accessible block name
return accessibleNames.includes(blockRef) || accessibleNames.includes(blockRef.toLowerCase())
}
/**
* Validates if a block reference is accessible from the current block.
* Checks existence and connection-based access rules.
*
* @param blockRef - Name or ID of the referenced block
* @param currentBlockId - ID of the block making the reference
* @param context - Current execution context
* @returns Validation result with success status and resolved block ID or error message
*/
private validateBlockReference(
blockRef: string,
currentBlockId: string,
context: ExecutionContext
currentBlockId: string
): { isValid: boolean; resolvedBlockId?: string; errorMessage?: string } {
// Special case: 'start' is always allowed
if (blockRef.toLowerCase() === 'start') {
@@ -1709,7 +1549,7 @@ export class InputResolver {
} else if (parallel.nodes.includes(currentBlock.id)) {
// Fallback: if we're inside a parallel execution but don't have currentVirtualBlockId
// This shouldn't happen in normal execution but provides backward compatibility
for (const [virtualId, mapping] of context.parallelBlockMapping || new Map()) {
for (const [_, mapping] of context.parallelBlockMapping || new Map()) {
if (mapping.originalBlockId === currentBlock.id && mapping.parallelId === parallelId) {
const iterationKey = `${parallelId}_iteration_${mapping.iterationIndex}`
const iterationItem = context.loopItems.get(iterationKey)
@@ -1899,11 +1739,8 @@ export class InputResolver {
// Then resolve block references
const resolvedReferences = this.resolveBlockReferences(resolvedVars, context, block)
// Check if this is an API key field
const isApiKey = this.isApiKeyField(block, value)
// Then resolve environment variables
const resolvedEnv = this.resolveEnvVariables(resolvedReferences, isApiKey)
const resolvedEnv = this.resolveEnvVariables(resolvedReferences)
// Special handling for different block types
const blockType = block.metadata?.id
@@ -1927,17 +1764,11 @@ export class InputResolver {
* Handles special cases like table-like arrays with cells.
*
* @param value - Object or array to process
* @param key - The parameter key
* @param context - Current execution context
* @param block - Block containing the value
* @returns Processed object/array
*/
private processObjectValue(
value: any,
key: string,
context: ExecutionContext,
block: SerializedBlock
): any {
private processObjectValue(value: any, context: ExecutionContext, block: SerializedBlock): any {
// Special handling for table-like arrays (e.g., from API params/headers)
if (
Array.isArray(value) &&

View File

@@ -44,6 +44,7 @@ import { quickValidateEmail } from '@/lib/email/validation'
import { env, isTruthy } from '@/lib/env'
import { isBillingEnabled, isEmailVerificationEnabled } from '@/lib/environment'
import { createLogger } from '@/lib/logs/console/logger'
import { getRedisClient } from '@/lib/redis'
import { SSO_TRUSTED_PROVIDERS } from './sso/consts'
const logger = createLogger('Auth')
@@ -59,6 +60,40 @@ if (validStripeKey) {
})
}
// Configure Redis secondary storage for session data (optional)
const redis = getRedisClient()
const redisSecondaryStorage = redis
? {
get: async (key: string) => {
try {
const value = await redis.get(key)
return value
} catch (error) {
logger.error('Redis get error in secondaryStorage', { key, error })
return null
}
},
set: async (key: string, value: string, ttl?: number) => {
try {
if (ttl) {
await redis.set(key, value, 'EX', ttl)
} else {
await redis.set(key, value)
}
} catch (error) {
logger.error('Redis set error in secondaryStorage', { key, ttl, error })
}
},
delete: async (key: string) => {
try {
await redis.del(key)
} catch (error) {
logger.error('Redis delete error in secondaryStorage', { key, error })
}
},
}
: undefined
export const auth = betterAuth({
baseURL: getBaseURL(),
trustedOrigins: [
@@ -69,6 +104,8 @@ export const auth = betterAuth({
provider: 'pg',
schema,
}),
// Conditionally add secondaryStorage only if Redis is available
...(redisSecondaryStorage ? { secondaryStorage: redisSecondaryStorage } : {}),
session: {
cookieCache: {
enabled: true,

View File

@@ -19,3 +19,9 @@ export const DEFAULT_ENTERPRISE_TIER_COST_LIMIT = 200
* This charge is applied regardless of whether the workflow uses AI models
*/
export const BASE_EXECUTION_CHARGE = 0.001
/**
* Default threshold (in dollars) for incremental overage billing
* When unbilled overage reaches this amount, an invoice item is created
*/
export const DEFAULT_OVERAGE_THRESHOLD = 50

View File

@@ -0,0 +1,419 @@
import { db } from '@sim/db'
import { member, subscription, userStats } from '@sim/db/schema'
import { and, eq, inArray, sql } from 'drizzle-orm'
import type Stripe from 'stripe'
import { DEFAULT_OVERAGE_THRESHOLD } from '@/lib/billing/constants'
import { calculateSubscriptionOverage, getPlanPricing } from '@/lib/billing/core/billing'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import { requireStripeClient } from '@/lib/billing/stripe-client'
import { env } from '@/lib/env'
import { createLogger } from '@/lib/logs/console/logger'
const logger = createLogger('ThresholdBilling')
const OVERAGE_THRESHOLD = env.OVERAGE_THRESHOLD_DOLLARS || DEFAULT_OVERAGE_THRESHOLD
function parseDecimal(value: string | number | null | undefined): number {
if (value === null || value === undefined) return 0
return Number.parseFloat(value.toString())
}
async function createAndFinalizeOverageInvoice(
stripe: ReturnType<typeof requireStripeClient>,
params: {
customerId: string
stripeSubscriptionId: string
amountCents: number
description: string
itemDescription: string
metadata: Record<string, string>
idempotencyKey: string
}
): Promise<string> {
const getPaymentMethodId = (
pm: string | Stripe.PaymentMethod | null | undefined
): string | undefined => (typeof pm === 'string' ? pm : pm?.id)
let defaultPaymentMethod: string | undefined
try {
const stripeSub = await stripe.subscriptions.retrieve(params.stripeSubscriptionId)
const subDpm = getPaymentMethodId(stripeSub.default_payment_method)
if (subDpm) {
defaultPaymentMethod = subDpm
} else {
const custObj = await stripe.customers.retrieve(params.customerId)
if (custObj && !('deleted' in custObj)) {
const cust = custObj as Stripe.Customer
const custDpm = getPaymentMethodId(cust.invoice_settings?.default_payment_method)
if (custDpm) defaultPaymentMethod = custDpm
}
}
} catch (e) {
logger.error('Failed to retrieve subscription or customer', { error: e })
}
const invoice = await stripe.invoices.create(
{
customer: params.customerId,
collection_method: 'charge_automatically',
auto_advance: false,
description: params.description,
metadata: params.metadata,
...(defaultPaymentMethod ? { default_payment_method: defaultPaymentMethod } : {}),
},
{ idempotencyKey: `${params.idempotencyKey}-invoice` }
)
await stripe.invoiceItems.create(
{
customer: params.customerId,
invoice: invoice.id,
amount: params.amountCents,
currency: 'usd',
description: params.itemDescription,
metadata: params.metadata,
},
{ idempotencyKey: params.idempotencyKey }
)
if (invoice.id) {
const finalized = await stripe.invoices.finalizeInvoice(invoice.id)
if (finalized.status === 'open' && finalized.id) {
try {
await stripe.invoices.pay(finalized.id, {
payment_method: defaultPaymentMethod,
})
} catch (payError) {
logger.error('Failed to auto-pay threshold overage invoice', {
error: payError,
invoiceId: finalized.id,
})
}
}
}
return invoice.id || ''
}
export async function checkAndBillOverageThreshold(userId: string): Promise<void> {
try {
const threshold = OVERAGE_THRESHOLD
const userSubscription = await getHighestPrioritySubscription(userId)
if (!userSubscription || userSubscription.status !== 'active') {
logger.debug('No active subscription for threshold billing', { userId })
return
}
if (
!userSubscription.plan ||
userSubscription.plan === 'free' ||
userSubscription.plan === 'enterprise'
) {
return
}
if (userSubscription.plan === 'team') {
logger.debug('Team plan detected - triggering org-level threshold billing', {
userId,
organizationId: userSubscription.referenceId,
})
await checkAndBillOrganizationOverageThreshold(userSubscription.referenceId)
return
}
await db.transaction(async (tx) => {
const statsRecords = await tx
.select()
.from(userStats)
.where(eq(userStats.userId, userId))
.for('update')
.limit(1)
if (statsRecords.length === 0) {
logger.warn('User stats not found for threshold billing', { userId })
return
}
const stats = statsRecords[0]
const currentOverage = await calculateSubscriptionOverage({
id: userSubscription.id,
plan: userSubscription.plan,
referenceId: userSubscription.referenceId,
seats: userSubscription.seats,
})
const billedOverageThisPeriod = parseDecimal(stats.billedOverageThisPeriod)
const unbilledOverage = Math.max(0, currentOverage - billedOverageThisPeriod)
logger.debug('Threshold billing check', {
userId,
plan: userSubscription.plan,
currentOverage,
billedOverageThisPeriod,
unbilledOverage,
threshold,
})
if (unbilledOverage < threshold) {
return
}
const amountToBill = unbilledOverage
const stripeSubscriptionId = userSubscription.stripeSubscriptionId
if (!stripeSubscriptionId) {
logger.error('No Stripe subscription ID found', { userId })
return
}
const stripe = requireStripeClient()
const stripeSubscription = await stripe.subscriptions.retrieve(stripeSubscriptionId)
const customerId =
typeof stripeSubscription.customer === 'string'
? stripeSubscription.customer
: stripeSubscription.customer.id
const periodEnd = userSubscription.periodEnd
? Math.floor(userSubscription.periodEnd.getTime() / 1000)
: Math.floor(Date.now() / 1000)
const billingPeriod = new Date(periodEnd * 1000).toISOString().slice(0, 7)
const amountCents = Math.round(amountToBill * 100)
const totalOverageCents = Math.round(currentOverage * 100)
const idempotencyKey = `threshold-overage:${customerId}:${stripeSubscriptionId}:${billingPeriod}:${totalOverageCents}:${amountCents}`
logger.info('Creating threshold overage invoice', {
userId,
plan: userSubscription.plan,
amountToBill,
billingPeriod,
idempotencyKey,
})
const cents = amountCents
const invoiceId = await createAndFinalizeOverageInvoice(stripe, {
customerId,
stripeSubscriptionId,
amountCents: cents,
description: `Threshold overage billing ${billingPeriod}`,
itemDescription: `Usage overage ($${amountToBill.toFixed(2)})`,
metadata: {
type: 'overage_threshold_billing',
userId,
subscriptionId: stripeSubscriptionId,
billingPeriod,
totalOverageAtTimeOfBilling: currentOverage.toFixed(2),
},
idempotencyKey,
})
await tx
.update(userStats)
.set({
billedOverageThisPeriod: sql`${userStats.billedOverageThisPeriod} + ${amountToBill}`,
})
.where(eq(userStats.userId, userId))
logger.info('Successfully created and finalized threshold overage invoice', {
userId,
amountBilled: amountToBill,
invoiceId,
newBilledTotal: billedOverageThisPeriod + amountToBill,
})
})
} catch (error) {
logger.error('Error in threshold billing check', {
userId,
error,
})
}
}
export async function checkAndBillOrganizationOverageThreshold(
organizationId: string
): Promise<void> {
logger.info('=== ENTERED checkAndBillOrganizationOverageThreshold ===', { organizationId })
try {
const threshold = OVERAGE_THRESHOLD
logger.debug('Starting organization threshold billing check', { organizationId, threshold })
const orgSubscriptions = await db
.select()
.from(subscription)
.where(and(eq(subscription.referenceId, organizationId), eq(subscription.status, 'active')))
.limit(1)
if (orgSubscriptions.length === 0) {
logger.debug('No active subscription for organization', { organizationId })
return
}
const orgSubscription = orgSubscriptions[0]
logger.debug('Found organization subscription', {
organizationId,
plan: orgSubscription.plan,
seats: orgSubscription.seats,
stripeSubscriptionId: orgSubscription.stripeSubscriptionId,
})
if (orgSubscription.plan !== 'team') {
logger.debug('Organization plan is not team, skipping', {
organizationId,
plan: orgSubscription.plan,
})
return
}
const members = await db
.select({ userId: member.userId, role: member.role })
.from(member)
.where(eq(member.organizationId, organizationId))
logger.debug('Found organization members', {
organizationId,
memberCount: members.length,
members: members.map((m) => ({ userId: m.userId, role: m.role })),
})
if (members.length === 0) {
logger.warn('No members found for organization', { organizationId })
return
}
const owner = members.find((m) => m.role === 'owner')
if (!owner) {
logger.error('No owner found for organization', { organizationId })
return
}
logger.debug('Found organization owner, starting transaction', {
organizationId,
ownerId: owner.userId,
})
await db.transaction(async (tx) => {
const ownerStatsLock = await tx
.select()
.from(userStats)
.where(eq(userStats.userId, owner.userId))
.for('update')
.limit(1)
if (ownerStatsLock.length === 0) {
logger.error('Owner stats not found', { organizationId, ownerId: owner.userId })
return
}
let totalTeamUsage = parseDecimal(ownerStatsLock[0].currentPeriodCost)
const totalBilledOverage = parseDecimal(ownerStatsLock[0].billedOverageThisPeriod)
const nonOwnerIds = members.filter((m) => m.userId !== owner.userId).map((m) => m.userId)
if (nonOwnerIds.length > 0) {
const memberStatsRows = await tx
.select({
userId: userStats.userId,
currentPeriodCost: userStats.currentPeriodCost,
})
.from(userStats)
.where(inArray(userStats.userId, nonOwnerIds))
for (const stats of memberStatsRows) {
totalTeamUsage += parseDecimal(stats.currentPeriodCost)
}
}
const { basePrice: basePricePerSeat } = getPlanPricing(orgSubscription.plan)
const basePrice = basePricePerSeat * (orgSubscription.seats || 1)
const currentOverage = Math.max(0, totalTeamUsage - basePrice)
const unbilledOverage = Math.max(0, currentOverage - totalBilledOverage)
logger.debug('Organization threshold billing check', {
organizationId,
totalTeamUsage,
basePrice,
currentOverage,
totalBilledOverage,
unbilledOverage,
threshold,
})
if (unbilledOverage < threshold) {
return
}
const amountToBill = unbilledOverage
const stripeSubscriptionId = orgSubscription.stripeSubscriptionId
if (!stripeSubscriptionId) {
logger.error('No Stripe subscription ID for organization', { organizationId })
return
}
const stripe = requireStripeClient()
const stripeSubscription = await stripe.subscriptions.retrieve(stripeSubscriptionId)
const customerId =
typeof stripeSubscription.customer === 'string'
? stripeSubscription.customer
: stripeSubscription.customer.id
const periodEnd = orgSubscription.periodEnd
? Math.floor(orgSubscription.periodEnd.getTime() / 1000)
: Math.floor(Date.now() / 1000)
const billingPeriod = new Date(periodEnd * 1000).toISOString().slice(0, 7)
const amountCents = Math.round(amountToBill * 100)
const totalOverageCents = Math.round(currentOverage * 100)
const idempotencyKey = `threshold-overage-org:${customerId}:${stripeSubscriptionId}:${billingPeriod}:${totalOverageCents}:${amountCents}`
logger.info('Creating organization threshold overage invoice', {
organizationId,
amountToBill,
billingPeriod,
})
const cents = amountCents
const invoiceId = await createAndFinalizeOverageInvoice(stripe, {
customerId,
stripeSubscriptionId,
amountCents: cents,
description: `Team threshold overage billing ${billingPeriod}`,
itemDescription: `Team usage overage ($${amountToBill.toFixed(2)})`,
metadata: {
type: 'overage_threshold_billing_org',
organizationId,
subscriptionId: stripeSubscriptionId,
billingPeriod,
totalOverageAtTimeOfBilling: currentOverage.toFixed(2),
},
idempotencyKey,
})
await tx
.update(userStats)
.set({
billedOverageThisPeriod: sql`${userStats.billedOverageThisPeriod} + ${amountToBill}`,
})
.where(eq(userStats.userId, owner.userId))
logger.info('Successfully created and finalized organization threshold overage invoice', {
organizationId,
ownerId: owner.userId,
amountBilled: amountToBill,
invoiceId,
})
})
} catch (error) {
logger.error('Error in organization threshold billing', {
organizationId,
error,
})
}
}

View File

@@ -1,6 +1,6 @@
import { db } from '@sim/db'
import { member, subscription as subscriptionTable, userStats } from '@sim/db/schema'
import { eq } from 'drizzle-orm'
import { eq, inArray } from 'drizzle-orm'
import type Stripe from 'stripe'
import { calculateSubscriptionOverage } from '@/lib/billing/core/billing'
import { requireStripeClient } from '@/lib/billing/stripe-client'
@@ -8,6 +8,64 @@ import { createLogger } from '@/lib/logs/console/logger'
const logger = createLogger('StripeInvoiceWebhooks')
const OVERAGE_INVOICE_TYPES = new Set<string>([
'overage_billing',
'overage_threshold_billing',
'overage_threshold_billing_org',
])
function parseDecimal(value: string | number | null | undefined): number {
if (value === null || value === undefined) return 0
return Number.parseFloat(value.toString())
}
/**
* Get total billed overage for a subscription, handling team vs individual plans
* For team plans: sums billedOverageThisPeriod across all members
* For other plans: gets billedOverageThisPeriod for the user
*/
export async function getBilledOverageForSubscription(sub: {
plan: string | null
referenceId: string
}): Promise<number> {
let billedOverage = 0
if (sub.plan === 'team') {
const members = await db
.select({ userId: member.userId })
.from(member)
.where(eq(member.organizationId, sub.referenceId))
const memberIds = members.map((m) => m.userId)
if (memberIds.length > 0) {
const memberStatsRows = await db
.select({
userId: userStats.userId,
billedOverageThisPeriod: userStats.billedOverageThisPeriod,
})
.from(userStats)
.where(inArray(userStats.userId, memberIds))
for (const stats of memberStatsRows) {
billedOverage += parseDecimal(stats.billedOverageThisPeriod)
}
}
} else {
const userStatsRecords = await db
.select({ billedOverageThisPeriod: userStats.billedOverageThisPeriod })
.from(userStats)
.where(eq(userStats.userId, sub.referenceId))
.limit(1)
if (userStatsRecords.length > 0) {
billedOverage = parseDecimal(userStatsRecords[0].billedOverageThisPeriod)
}
}
return billedOverage
}
export async function resetUsageForSubscription(sub: { plan: string | null; referenceId: string }) {
if (sub.plan === 'team' || sub.plan === 'enterprise') {
const membersRows = await db
@@ -25,7 +83,11 @@ export async function resetUsageForSubscription(sub: { plan: string | null; refe
const current = currentStats[0].current || '0'
await db
.update(userStats)
.set({ lastPeriodCost: current, currentPeriodCost: '0' })
.set({
lastPeriodCost: current,
currentPeriodCost: '0',
billedOverageThisPeriod: '0',
})
.where(eq(userStats.userId, m.userId))
}
}
@@ -50,6 +112,7 @@ export async function resetUsageForSubscription(sub: { plan: string | null; refe
lastPeriodCost: totalLastPeriod,
currentPeriodCost: '0',
proPeriodCostSnapshot: '0', // Clear snapshot at period end
billedOverageThisPeriod: '0', // Clear threshold billing tracker at period end
})
.where(eq(userStats.userId, sub.referenceId))
}
@@ -88,16 +151,14 @@ export async function handleInvoicePaymentSucceeded(event: Stripe.Event) {
.select({ userId: member.userId })
.from(member)
.where(eq(member.organizationId, sub.referenceId))
for (const m of membersRows) {
const row = await db
const memberIds = membersRows.map((m) => m.userId)
if (memberIds.length > 0) {
const blockedRows = await db
.select({ blocked: userStats.billingBlocked })
.from(userStats)
.where(eq(userStats.userId, m.userId))
.limit(1)
if (row.length > 0 && row[0].blocked) {
wasBlocked = true
break
}
.where(inArray(userStats.userId, memberIds))
wasBlocked = blockedRows.some((row) => !!row.blocked)
}
} else {
const row = await db
@@ -113,11 +174,13 @@ export async function handleInvoicePaymentSucceeded(event: Stripe.Event) {
.select({ userId: member.userId })
.from(member)
.where(eq(member.organizationId, sub.referenceId))
for (const m of members) {
const memberIds = members.map((m) => m.userId)
if (memberIds.length > 0) {
await db
.update(userStats)
.set({ billingBlocked: false })
.where(eq(userStats.userId, m.userId))
.where(inArray(userStats.userId, memberIds))
}
} else {
await db
@@ -143,7 +206,8 @@ export async function handleInvoicePaymentFailed(event: Stripe.Event) {
try {
const invoice = event.data.object as Stripe.Invoice
const isOverageInvoice = invoice.metadata?.type === 'overage_billing'
const invoiceType = invoice.metadata?.type
const isOverageInvoice = !!(invoiceType && OVERAGE_INVOICE_TYPES.has(invoiceType))
let stripeSubscriptionId: string | undefined
if (isOverageInvoice) {
@@ -203,11 +267,13 @@ export async function handleInvoicePaymentFailed(event: Stripe.Event) {
.select({ userId: member.userId })
.from(member)
.where(eq(member.organizationId, sub.referenceId))
for (const m of members) {
const memberIds = members.map((m) => m.userId)
if (memberIds.length > 0) {
await db
.update(userStats)
.set({ billingBlocked: true })
.where(eq(userStats.userId, m.userId))
.where(inArray(userStats.userId, memberIds))
}
logger.info('Blocked team/enterprise members due to payment failure', {
organizationId: sub.referenceId,
@@ -281,9 +347,23 @@ export async function handleInvoiceFinalized(event: Stripe.Event) {
// Compute overage (only for team and pro plans), before resetting usage
const totalOverage = await calculateSubscriptionOverage(sub)
if (totalOverage > 0) {
// Get already-billed overage from threshold billing
const billedOverage = await getBilledOverageForSubscription(sub)
// Only bill the remaining unbilled overage
const remainingOverage = Math.max(0, totalOverage - billedOverage)
logger.info('Invoice finalized overage calculation', {
subscriptionId: sub.id,
totalOverage,
billedOverage,
remainingOverage,
billingPeriod,
})
if (remainingOverage > 0) {
const customerId = String(invoice.customer)
const cents = Math.round(totalOverage * 100)
const cents = Math.round(remainingOverage * 100)
const itemIdemKey = `overage-item:${customerId}:${stripeSubscriptionId}:${billingPeriod}`
const invoiceIdemKey = `overage-invoice:${customerId}:${stripeSubscriptionId}:${billingPeriod}`

View File

@@ -4,7 +4,7 @@ import { and, eq, ne } from 'drizzle-orm'
import { calculateSubscriptionOverage } from '@/lib/billing/core/billing'
import { requireStripeClient } from '@/lib/billing/stripe-client'
import { createLogger } from '@/lib/logs/console/logger'
import { resetUsageForSubscription } from './invoices'
import { getBilledOverageForSubscription, resetUsageForSubscription } from './invoices'
const logger = createLogger('StripeSubscriptionWebhooks')
@@ -104,11 +104,24 @@ export async function handleSubscriptionDeleted(subscription: {
return
}
// Get already-billed overage from threshold billing
const billedOverage = await getBilledOverageForSubscription(subscription)
// Only bill the remaining unbilled overage
const remainingOverage = Math.max(0, totalOverage - billedOverage)
logger.info('Subscription deleted overage calculation', {
subscriptionId: subscription.id,
totalOverage,
billedOverage,
remainingOverage,
})
// Create final overage invoice if needed
if (totalOverage > 0 && stripeSubscriptionId) {
if (remainingOverage > 0 && stripeSubscriptionId) {
const stripeSubscription = await stripe.subscriptions.retrieve(stripeSubscriptionId)
const customerId = stripeSubscription.customer as string
const cents = Math.round(totalOverage * 100)
const cents = Math.round(remainingOverage * 100)
// Use the subscription end date for the billing period
const endedAt = stripeSubscription.ended_at || Math.floor(Date.now() / 1000)
@@ -145,7 +158,9 @@ export async function handleSubscriptionDeleted(subscription: {
description: `Usage overage for ${subscription.plan} plan (Final billing period)`,
metadata: {
type: 'final_usage_overage',
usage: totalOverage.toFixed(2),
usage: remainingOverage.toFixed(2),
totalOverage: totalOverage.toFixed(2),
billedOverage: billedOverage.toFixed(2),
billingPeriod,
},
},
@@ -161,7 +176,9 @@ export async function handleSubscriptionDeleted(subscription: {
subscriptionId: subscription.id,
stripeSubscriptionId,
invoiceId: overageInvoice.id,
overageAmount: totalOverage,
totalOverage,
billedOverage,
remainingOverage,
cents,
billingPeriod,
})
@@ -169,7 +186,9 @@ export async function handleSubscriptionDeleted(subscription: {
logger.error('Failed to create final overage invoice', {
subscriptionId: subscription.id,
stripeSubscriptionId,
overageAmount: totalOverage,
totalOverage,
billedOverage,
remainingOverage,
error: invoiceError,
})
// Don't throw - we don't want to fail the webhook

View File

@@ -49,6 +49,7 @@ export const env = createEnv({
STRIPE_ENTERPRISE_PRICE_ID: z.string().min(1).optional(), // Stripe price ID for enterprise tier
ENTERPRISE_TIER_COST_LIMIT: z.number().optional(), // Cost limit for enterprise tier users
BILLING_ENABLED: z.boolean().optional(), // Enable billing enforcement and usage tracking
OVERAGE_THRESHOLD_DOLLARS: z.number().optional().default(50), // Dollar threshold for incremental overage billing (default: $50)
// Email & Communication
EMAIL_VERIFICATION_ENABLED: z.boolean().optional(), // Enable email verification for user registration and login (defaults to false)
@@ -145,7 +146,7 @@ export const env = createEnv({
RATE_LIMIT_ENTERPRISE_ASYNC: z.string().optional().default('1000'), // Enterprise tier async API executions per minute
// Knowledge Base Processing Configuration - Shared across all processing methods
KB_CONFIG_MAX_DURATION: z.number().optional().default(300), // Max processing duration in s
KB_CONFIG_MAX_DURATION: z.number().optional().default(600), // Max processing duration in seconds (10 minutes)
KB_CONFIG_MAX_ATTEMPTS: z.number().optional().default(3), // Max retry attempts
KB_CONFIG_RETRY_FACTOR: z.number().optional().default(2), // Retry backoff factor
KB_CONFIG_MIN_TIMEOUT: z.number().optional().default(1000), // Min timeout in ms

View File

@@ -180,7 +180,9 @@ async function parseDocument(
}
async function handleFileForOCR(fileUrl: string, filename: string, mimeType: string) {
if (fileUrl.startsWith('https://')) {
const isExternalHttps = fileUrl.startsWith('https://') && !fileUrl.includes('/api/files/serve/')
if (isExternalHttps) {
return { httpsUrl: fileUrl }
}
@@ -207,7 +209,16 @@ async function downloadFileWithTimeout(fileUrl: string): Promise<Buffer> {
const timeoutId = setTimeout(() => controller.abort(), TIMEOUTS.FILE_DOWNLOAD)
try {
const response = await fetch(fileUrl, { signal: controller.signal })
const isInternalFileServe = fileUrl.includes('/api/files/serve/')
const headers: HeadersInit = {}
if (isInternalFileServe) {
const { generateInternalToken } = await import('@/lib/auth/internal')
const token = await generateInternalToken()
headers.Authorization = `Bearer ${token}`
}
const response = await fetch(fileUrl, { signal: controller.signal, headers })
clearTimeout(timeoutId)
if (!response.ok) {

View File

@@ -17,14 +17,14 @@ import type { DocumentSortField, SortOrder } from './types'
const logger = createLogger('DocumentService')
const TIMEOUTS = {
OVERALL_PROCESSING: (env.KB_CONFIG_MAX_DURATION || 600) * 1000, // Increased to 10 minutes to match Trigger's timeout
OVERALL_PROCESSING: (env.KB_CONFIG_MAX_DURATION || 600) * 1000, // Default 10 minutes for KB document processing
EMBEDDINGS_API: (env.KB_CONFIG_MAX_TIMEOUT || 10000) * 18,
} as const
// Configuration for handling large documents
const LARGE_DOC_CONFIG = {
MAX_CHUNKS_PER_BATCH: 500, // Insert embeddings in batches of 500
MAX_EMBEDDING_BATCH: 50, // Generate embeddings in batches of 50
MAX_EMBEDDING_BATCH: 500, // Generate embeddings in batches of 500
MAX_FILE_SIZE: 100 * 1024 * 1024, // 100MB max file size
MAX_CHUNKS_PER_DOCUMENT: 100000, // Maximum chunks allowed per document
}

View File

@@ -11,6 +11,7 @@ import { eq, sql } from 'drizzle-orm'
import { v4 as uuidv4 } from 'uuid'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import { checkUsageStatus, maybeSendUsageThresholdEmail } from '@/lib/billing/core/usage'
import { checkAndBillOverageThreshold } from '@/lib/billing/threshold-billing'
import { isBillingEnabled } from '@/lib/environment'
import { createLogger } from '@/lib/logs/console/logger'
import { emitWorkflowExecutionCompleted } from '@/lib/logs/events'
@@ -125,8 +126,17 @@ export class ExecutionLogger implements IExecutionLoggerService {
}
finalOutput: BlockOutputData
traceSpans?: TraceSpan[]
workflowInput?: any
}): Promise<WorkflowExecutionLog> {
const { executionId, endedAt, totalDurationMs, costSummary, finalOutput, traceSpans } = params
const {
executionId,
endedAt,
totalDurationMs,
costSummary,
finalOutput,
traceSpans,
workflowInput,
} = params
logger.debug(`Completing workflow execution ${executionId}`)
@@ -144,8 +154,8 @@ export class ExecutionLogger implements IExecutionLoggerService {
const level = hasErrors ? 'error' : 'info'
// Extract files from trace spans and final output
const executionFiles = this.extractFilesFromExecution(traceSpans, finalOutput)
// Extract files from trace spans, final output, and workflow input
const executionFiles = this.extractFilesFromExecution(traceSpans, finalOutput, workflowInput)
const [updatedLog] = await db
.update(workflowExecutionLogs)
@@ -441,6 +451,9 @@ export class ExecutionLogger implements IExecutionLoggerService {
addedCost: costToStore,
addedTokens: costSummary.totalTokens,
})
// Check if user has hit overage threshold and bill incrementally
await checkAndBillOverageThreshold(userId)
} catch (error) {
logger.error('Error updating user stats with cost information', {
workflowId,
@@ -452,9 +465,13 @@ export class ExecutionLogger implements IExecutionLoggerService {
}
/**
* Extract file references from execution trace spans and final output
* Extract file references from execution trace spans, final output, and workflow input
*/
private extractFilesFromExecution(traceSpans?: any[], finalOutput?: any): any[] {
private extractFilesFromExecution(
traceSpans?: any[],
finalOutput?: any,
workflowInput?: any
): any[] {
const files: any[] = []
const seenFileIds = new Set<string>()
@@ -554,6 +571,15 @@ export class ExecutionLogger implements IExecutionLoggerService {
extractFilesFromObject(finalOutput, 'final_output')
}
// Extract files from workflow input
if (workflowInput) {
extractFilesFromObject(workflowInput, 'workflow_input')
}
logger.debug(`Extracted ${files.length} file(s) from execution`, {
fileNames: files.map((f) => f.name),
})
return files
}
}

View File

@@ -28,6 +28,7 @@ export interface SessionCompleteParams {
totalDurationMs?: number
finalOutput?: any
traceSpans?: any[]
workflowInput?: any
}
export interface SessionErrorCompleteParams {
@@ -106,7 +107,7 @@ export class LoggingSession {
}
async complete(params: SessionCompleteParams = {}): Promise<void> {
const { endedAt, totalDurationMs, finalOutput, traceSpans } = params
const { endedAt, totalDurationMs, finalOutput, traceSpans, workflowInput } = params
try {
const costSummary = calculateCostSummary(traceSpans || [])
@@ -118,6 +119,7 @@ export class LoggingSession {
costSummary,
finalOutput: finalOutput || {},
traceSpans: traceSpans || [],
workflowInput,
})
if (this.requestId) {

View File

@@ -4,25 +4,19 @@ import { createLogger } from '@/lib/logs/console/logger'
const logger = createLogger('Redis')
// Only use Redis if explicitly configured
const redisUrl = env.REDIS_URL
// Global Redis client for connection pooling
let globalRedisClient: Redis | null = null
// Fallback in-memory cache for when Redis is not available
const inMemoryCache = new Map<string, { value: string; expiry: number | null }>()
const MAX_CACHE_SIZE = 1000
const MESSAGE_ID_PREFIX = 'processed:'
const MESSAGE_ID_EXPIRY = 60 * 60 * 24 * 7
/**
* Get a Redis client instance
* Uses connection pooling to avoid creating a new connection for each request
*/
export function getRedisClient(): Redis | null {
// For server-side only
if (typeof window !== 'undefined') return null
// Return null immediately if no Redis URL is configured
if (!redisUrl) {
return null
}
@@ -30,25 +24,19 @@ export function getRedisClient(): Redis | null {
if (globalRedisClient) return globalRedisClient
try {
// Create a new Redis client with optimized settings for serverless
globalRedisClient = new Redis(redisUrl, {
// Keep alive is critical for serverless to reuse connections
keepAlive: 1000,
// Faster connection timeout for serverless
connectTimeout: 5000,
// Disable reconnection attempts in serverless
maxRetriesPerRequest: 3,
// Retry strategy with exponential backoff
retryStrategy: (times) => {
if (times > 5) {
logger.warn('Redis connection failed after 5 attempts, using fallback')
return null // Stop retrying
logger.warn('Redis connection failed after 5 attempts')
return null
}
return Math.min(times * 200, 2000) // Exponential backoff
return Math.min(times * 200, 2000)
},
})
// Handle connection events
globalRedisClient.on('error', (err: any) => {
logger.error('Redis connection error:', { err })
if (err.code === 'ECONNREFUSED' || err.code === 'ETIMEDOUT') {
@@ -65,144 +53,90 @@ export function getRedisClient(): Redis | null {
}
}
// Message ID cache functions
const MESSAGE_ID_PREFIX = 'processed:' // Generic prefix
const MESSAGE_ID_EXPIRY = 60 * 60 * 24 * 7 // 7 days in seconds
/**
* Check if a key exists in Redis or fallback cache.
* @param key The key to check (e.g., messageId, lockKey).
* @returns True if the key exists and hasn't expired, false otherwise.
* Check if a key exists in Redis
* @param key The key to check
* @returns True if the key exists, false otherwise
*/
export async function hasProcessedMessage(key: string): Promise<boolean> {
const redis = getRedisClient()
if (!redis) {
return false
}
try {
const redis = getRedisClient()
const fullKey = `${MESSAGE_ID_PREFIX}${key}` // Use generic prefix
if (redis) {
// Use Redis if available
const result = await redis.exists(fullKey)
return result === 1
}
// Fallback to in-memory cache
const cacheEntry = inMemoryCache.get(fullKey)
if (!cacheEntry) return false
// Check if the entry has expired
if (cacheEntry.expiry && cacheEntry.expiry < Date.now()) {
inMemoryCache.delete(fullKey)
return false
}
return true
const fullKey = `${MESSAGE_ID_PREFIX}${key}`
const result = await redis.exists(fullKey)
return result === 1
} catch (error) {
logger.error(`Error checking key ${key}:`, { error })
// Fallback to in-memory cache on error
const fullKey = `${MESSAGE_ID_PREFIX}${key}`
const cacheEntry = inMemoryCache.get(fullKey)
return !!cacheEntry && (!cacheEntry.expiry || cacheEntry.expiry > Date.now())
return false
}
}
/**
* Mark a key as processed/present in Redis or fallback cache.
* @param key The key to mark (e.g., messageId, lockKey).
* @param expirySeconds Optional expiry time in seconds (defaults to 7 days).
* Mark a key as processed in Redis
* @param key The key to mark
* @param expirySeconds Optional expiry time in seconds (defaults to 7 days)
*/
export async function markMessageAsProcessed(
key: string,
expirySeconds: number = MESSAGE_ID_EXPIRY
): Promise<void> {
const redis = getRedisClient()
if (!redis) {
logger.warn(`Cannot mark message as processed - Redis unavailable: ${key}`)
return
}
try {
const redis = getRedisClient()
const fullKey = `${MESSAGE_ID_PREFIX}${key}` // Use generic prefix
if (redis) {
// Use Redis if available - use pipelining for efficiency
await redis.set(fullKey, '1', 'EX', expirySeconds)
} else {
// Fallback to in-memory cache
const expiry = expirySeconds ? Date.now() + expirySeconds * 1000 : null
inMemoryCache.set(fullKey, { value: '1', expiry })
// Clean up old message IDs if cache gets too large
if (inMemoryCache.size > MAX_CACHE_SIZE) {
const now = Date.now()
// First try to remove expired entries
for (const [cacheKey, entry] of inMemoryCache.entries()) {
if (entry.expiry && entry.expiry < now) {
inMemoryCache.delete(cacheKey)
}
}
// If still too large, remove oldest entries (FIFO based on insertion order)
if (inMemoryCache.size > MAX_CACHE_SIZE) {
const keysToDelete = Array.from(inMemoryCache.keys()).slice(
0,
inMemoryCache.size - MAX_CACHE_SIZE
)
for (const keyToDelete of keysToDelete) {
inMemoryCache.delete(keyToDelete)
}
}
}
}
const fullKey = `${MESSAGE_ID_PREFIX}${key}`
await redis.set(fullKey, '1', 'EX', expirySeconds)
} catch (error) {
logger.error(`Error marking key ${key} as processed:`, { error })
// Fallback to in-memory cache on error
const fullKey = `${MESSAGE_ID_PREFIX}${key}`
const expiry = expirySeconds ? Date.now() + expirySeconds * 1000 : null
inMemoryCache.set(fullKey, { value: '1', expiry })
}
}
/**
* Attempts to acquire a lock using Redis SET NX command.
* @param lockKey The key to use for the lock.
* @param value The value to set (e.g., a unique identifier for the process holding the lock).
* @param expirySeconds The lock's time-to-live in seconds.
* @returns True if the lock was acquired successfully, false otherwise.
* Attempt to acquire a distributed lock using Redis SET NX command
* @param lockKey The key to use for the lock
* @param value The value to set (e.g., a unique identifier for the process holding the lock)
* @param expirySeconds The lock's time-to-live in seconds
* @returns True if the lock was acquired successfully, false otherwise
*/
export async function acquireLock(
lockKey: string,
value: string,
expirySeconds: number
): Promise<boolean> {
const redis = getRedisClient()
if (!redis) {
logger.warn('Redis client not available, cannot acquire lock.')
return false
}
try {
const redis = getRedisClient()
if (!redis) {
logger.warn('Redis client not available, cannot acquire lock.')
// Fallback behavior: maybe allow processing but log a warning?
// Or treat as lock acquired if no Redis? Depends on desired behavior.
return true // Or false, depending on safety requirements
}
// Use SET key value EX expirySeconds NX
// Returns "OK" if successful, null if key already exists (lock held)
const result = await redis.set(lockKey, value, 'EX', expirySeconds, 'NX')
return result === 'OK'
} catch (error) {
logger.error(`Error acquiring lock for key ${lockKey}:`, { error })
// Treat errors as failure to acquire lock for safety
return false
}
}
/**
* Retrieves the value of a key from Redis.
* @param key The key to retrieve.
* @returns The value of the key, or null if the key doesn't exist or an error occurs.
* Retrieve the value of a key from Redis
* @param key The key to retrieve
* @returns The value of the key, or null if the key doesn't exist or an error occurs
*/
export async function getLockValue(key: string): Promise<string | null> {
const redis = getRedisClient()
if (!redis) {
logger.warn('Redis client not available, cannot get lock value.')
return null
}
try {
const redis = getRedisClient()
if (!redis) {
logger.warn('Redis client not available, cannot get lock value.')
return null // Cannot determine lock value
}
return await redis.get(key)
} catch (error) {
logger.error(`Error getting value for key ${key}:`, { error })
@@ -211,20 +145,18 @@ export async function getLockValue(key: string): Promise<string | null> {
}
/**
* Releases a lock by deleting the key.
* Ideally, use Lua script for safe release (check value before deleting),
* but simple DEL is often sufficient if lock expiry is handled well.
* @param lockKey The key of the lock to release.
* Release a lock by deleting the key
* @param lockKey The key of the lock to release
*/
export async function releaseLock(lockKey: string): Promise<void> {
const redis = getRedisClient()
if (!redis) {
logger.warn('Redis client not available, cannot release lock.')
return
}
try {
const redis = getRedisClient()
if (redis) {
await redis.del(lockKey)
} else {
logger.warn('Redis client not available, cannot release lock.')
// No fallback needed for releasing if using in-memory cache for locking wasn't implemented
}
await redis.del(lockKey)
} catch (error) {
logger.error(`Error releasing lock for key ${lockKey}:`, { error })
}

View File

@@ -37,7 +37,7 @@ export function getBlockOutputs(
return {
input: { type: 'string', description: 'User message' },
conversationId: { type: 'string', description: 'Conversation ID' },
files: { type: 'array', description: 'Uploaded files' },
files: { type: 'files', description: 'Uploaded files' },
}
}
if (
@@ -136,7 +136,20 @@ export function getBlockOutputPaths(
// If value has 'type' property, it's a leaf node (output definition)
if (value && typeof value === 'object' && 'type' in value) {
paths.push(path)
// Special handling for 'files' type - expand to show array element properties
if (value.type === 'files') {
// Show properties without [0] for cleaner display
// The tag dropdown will add [0] automatically when inserting
paths.push(`${path}.url`)
paths.push(`${path}.name`)
paths.push(`${path}.size`)
paths.push(`${path}.type`)
paths.push(`${path}.key`)
paths.push(`${path}.uploadedAt`)
paths.push(`${path}.expiresAt`)
} else {
paths.push(path)
}
}
// If value is an object without 'type', recurse into it
else if (value && typeof value === 'object' && !Array.isArray(value)) {
@@ -164,8 +177,33 @@ export function getBlockOutputType(
): string {
const outputs = getBlockOutputs(blockType, subBlocks, triggerMode)
// Navigate through nested path
const pathParts = outputPath.split('.')
const arrayIndexRegex = /\[(\d+)\]/g
const cleanPath = outputPath.replace(arrayIndexRegex, '')
const pathParts = cleanPath.split('.').filter(Boolean)
const filePropertyTypes: Record<string, string> = {
url: 'string',
name: 'string',
size: 'number',
type: 'string',
key: 'string',
uploadedAt: 'string',
expiresAt: 'string',
}
const lastPart = pathParts[pathParts.length - 1]
if (lastPart && filePropertyTypes[lastPart]) {
const parentPath = pathParts.slice(0, -1).join('.')
let current: any = outputs
for (const part of pathParts.slice(0, -1)) {
if (!current || typeof current !== 'object') break
current = current[part]
}
if (current && typeof current === 'object' && 'type' in current && current.type === 'files') {
return filePropertyTypes[lastPart]
}
}
let current: any = outputs
for (const part of pathParts) {

View File

@@ -1,53 +1,173 @@
import { db } from '@sim/db'
import { apiKey, userStats, workflow as workflowTable } from '@sim/db/schema'
import { eq } from 'drizzle-orm'
import {
apiKey,
permissions,
userStats,
workflow as workflowTable,
workspace,
} from '@sim/db/schema'
import type { InferSelectModel } from 'drizzle-orm'
import { and, eq } from 'drizzle-orm'
import { NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { getEnv } from '@/lib/env'
import { createLogger } from '@/lib/logs/console/logger'
import { hasWorkspaceAdminAccess } from '@/lib/permissions/utils'
import type { PermissionType } from '@/lib/permissions/utils'
import type { ExecutionResult } from '@/executor/types'
import type { WorkflowState } from '@/stores/workflows/workflow/types'
const logger = createLogger('WorkflowUtils')
const WORKFLOW_BASE_SELECTION = {
id: workflowTable.id,
userId: workflowTable.userId,
workspaceId: workflowTable.workspaceId,
folderId: workflowTable.folderId,
name: workflowTable.name,
description: workflowTable.description,
color: workflowTable.color,
lastSynced: workflowTable.lastSynced,
createdAt: workflowTable.createdAt,
updatedAt: workflowTable.updatedAt,
isDeployed: workflowTable.isDeployed,
deployedState: workflowTable.deployedState,
deployedAt: workflowTable.deployedAt,
pinnedApiKeyId: workflowTable.pinnedApiKeyId,
collaborators: workflowTable.collaborators,
runCount: workflowTable.runCount,
lastRunAt: workflowTable.lastRunAt,
variables: workflowTable.variables,
isPublished: workflowTable.isPublished,
marketplaceData: workflowTable.marketplaceData,
pinnedApiKeyKey: apiKey.key,
pinnedApiKeyName: apiKey.name,
pinnedApiKeyType: apiKey.type,
pinnedApiKeyWorkspaceId: apiKey.workspaceId,
}
type WorkflowSelection = InferSelectModel<typeof workflowTable>
type ApiKeySelection = InferSelectModel<typeof apiKey>
type WorkflowRow = WorkflowSelection & {
pinnedApiKeyKey: ApiKeySelection['key'] | null
pinnedApiKeyName: ApiKeySelection['name'] | null
pinnedApiKeyType: ApiKeySelection['type'] | null
pinnedApiKeyWorkspaceId: ApiKeySelection['workspaceId'] | null
}
type WorkflowWithPinnedKey = WorkflowSelection & {
pinnedApiKey: Pick<ApiKeySelection, 'id' | 'name' | 'key' | 'type' | 'workspaceId'> | null
}
function mapWorkflowRow(row: WorkflowRow | undefined): WorkflowWithPinnedKey | undefined {
if (!row) {
return undefined
}
const {
pinnedApiKeyKey,
pinnedApiKeyName,
pinnedApiKeyType,
pinnedApiKeyWorkspaceId,
...workflowWithoutDerived
} = row
const pinnedApiKey =
workflowWithoutDerived.pinnedApiKeyId && pinnedApiKeyKey && pinnedApiKeyName && pinnedApiKeyType
? {
id: workflowWithoutDerived.pinnedApiKeyId,
name: pinnedApiKeyName,
key: pinnedApiKeyKey,
type: pinnedApiKeyType,
workspaceId: pinnedApiKeyWorkspaceId,
}
: null
return {
...workflowWithoutDerived,
pinnedApiKey,
}
}
export async function getWorkflowById(id: string) {
const workflows = await db
.select({
id: workflowTable.id,
userId: workflowTable.userId,
workspaceId: workflowTable.workspaceId,
folderId: workflowTable.folderId,
name: workflowTable.name,
description: workflowTable.description,
color: workflowTable.color,
lastSynced: workflowTable.lastSynced,
createdAt: workflowTable.createdAt,
updatedAt: workflowTable.updatedAt,
isDeployed: workflowTable.isDeployed,
deployedState: workflowTable.deployedState,
deployedAt: workflowTable.deployedAt,
pinnedApiKeyId: workflowTable.pinnedApiKeyId,
collaborators: workflowTable.collaborators,
runCount: workflowTable.runCount,
lastRunAt: workflowTable.lastRunAt,
variables: workflowTable.variables,
isPublished: workflowTable.isPublished,
marketplaceData: workflowTable.marketplaceData,
pinnedApiKey: {
id: apiKey.id,
name: apiKey.name,
key: apiKey.key,
type: apiKey.type,
workspaceId: apiKey.workspaceId,
},
})
const rows = await db
.select(WORKFLOW_BASE_SELECTION)
.from(workflowTable)
.leftJoin(apiKey, eq(workflowTable.pinnedApiKeyId, apiKey.id))
.where(eq(workflowTable.id, id))
.limit(1)
return workflows[0]
return mapWorkflowRow(rows[0] as WorkflowRow | undefined)
}
type WorkflowRecord = ReturnType<typeof getWorkflowById> extends Promise<infer R>
? NonNullable<R>
: never
export interface WorkflowAccessContext {
workflow: WorkflowRecord
workspaceOwnerId: string | null
workspacePermission: PermissionType | null
isOwner: boolean
isWorkspaceOwner: boolean
}
export async function getWorkflowAccessContext(
workflowId: string,
userId?: string
): Promise<WorkflowAccessContext | null> {
const rows = await db
.select({
...WORKFLOW_BASE_SELECTION,
workspaceOwnerId: workspace.ownerId,
workspacePermission: permissions.permissionType,
})
.from(workflowTable)
.leftJoin(apiKey, eq(workflowTable.pinnedApiKeyId, apiKey.id))
.leftJoin(workspace, eq(workspace.id, workflowTable.workspaceId))
.leftJoin(
permissions,
and(
eq(permissions.entityType, 'workspace'),
eq(permissions.entityId, workflowTable.workspaceId),
userId ? eq(permissions.userId, userId) : eq(permissions.userId, '' as unknown as string)
)
)
.where(eq(workflowTable.id, workflowId))
.limit(1)
const row = rows[0] as
| (WorkflowRow & {
workspaceOwnerId: string | null
workspacePermission: PermissionType | null
})
| undefined
if (!row) {
return null
}
const workflow = mapWorkflowRow(row as WorkflowRow)
if (!workflow) {
return null
}
const resolvedWorkspaceOwner = row.workspaceOwnerId ?? null
const resolvedWorkspacePermission = row.workspacePermission ?? null
const resolvedUserId = userId ?? null
const isOwner = resolvedUserId ? workflow.userId === resolvedUserId : false
const isWorkspaceOwner = resolvedUserId ? resolvedWorkspaceOwner === resolvedUserId : false
return {
workflow,
workspaceOwnerId: resolvedWorkspaceOwner,
workspacePermission: resolvedWorkspacePermission,
isOwner,
isWorkspaceOwner,
}
}
export async function updateWorkflowRunCounts(workflowId: string, runs = 1) {
@@ -421,8 +541,8 @@ export async function validateWorkflowPermissions(
}
}
const workflow = await getWorkflowById(workflowId)
if (!workflow) {
const accessContext = await getWorkflowAccessContext(workflowId, session.user.id)
if (!accessContext) {
logger.warn(`[${requestId}] Workflow ${workflowId} not found`)
return {
error: { message: 'Workflow not found', status: 404 },
@@ -431,9 +551,31 @@ export async function validateWorkflowPermissions(
}
}
const { workflow, workspacePermission, isOwner } = accessContext
if (isOwner) {
return {
error: null,
session,
workflow,
}
}
if (workflow.workspaceId) {
const hasAccess = await hasWorkspaceAdminAccess(session.user.id, workflow.workspaceId)
if (!hasAccess) {
let hasPermission = false
if (action === 'read') {
// Any workspace permission allows read
hasPermission = workspacePermission !== null
} else if (action === 'write') {
// Write or admin permission allows write
hasPermission = workspacePermission === 'write' || workspacePermission === 'admin'
} else if (action === 'admin') {
// Only admin permission allows admin actions
hasPermission = workspacePermission === 'admin'
}
if (!hasPermission) {
logger.warn(
`[${requestId}] User ${session.user.id} unauthorized to ${action} workflow ${workflowId} in workspace ${workflow.workspaceId}`
)
@@ -444,15 +586,13 @@ export async function validateWorkflowPermissions(
}
}
} else {
if (workflow.userId !== session.user.id) {
logger.warn(
`[${requestId}] User ${session.user.id} unauthorized to ${action} workflow ${workflowId} owned by ${workflow.userId}`
)
return {
error: { message: `Unauthorized: Access denied to ${action} this workflow`, status: 403 },
session: null,
workflow: null,
}
logger.warn(
`[${requestId}] User ${session.user.id} unauthorized to ${action} workflow ${workflowId} owned by ${workflow.userId}`
)
return {
error: { message: `Unauthorized: Access denied to ${action} this workflow`, status: 403 },
session: null,
workflow: null,
}
}

View File

@@ -13,15 +13,15 @@ const connectionString = env.DATABASE_URL
const socketDb = drizzle(
postgres(connectionString, {
prepare: false,
idle_timeout: 20,
connect_timeout: 10,
max: 5,
idle_timeout: 10,
connect_timeout: 20,
max: 15,
onnotice: () => {},
}),
{ schema }
)
// Use dedicated connection for socket operations
// Use dedicated connection for socket operations, fallback to shared db for compatibility
const db = socketDb
// Constants

View File

@@ -11,8 +11,8 @@ const connectionString = env.DATABASE_URL
const db = drizzle(
postgres(connectionString, {
prepare: false,
idle_timeout: 20,
connect_timeout: 10,
idle_timeout: 15,
connect_timeout: 20,
max: 3,
onnotice: () => {},
}),

View File

@@ -1,3 +1,11 @@
export interface ChatAttachment {
id: string
name: string
type: string
dataUrl: string
size?: number
}
export interface ChatMessage {
id: string
content: string | any
@@ -6,6 +14,7 @@ export interface ChatMessage {
timestamp: string
blockId?: string
isStreaming?: boolean
attachments?: ChatAttachment[]
}
export interface OutputConfig {

View File

@@ -1,7 +1,7 @@
# ========================================
# Base Stage: Alpine Linux with Bun
# ========================================
FROM oven/bun:alpine AS base
FROM oven/bun:1.2.22-alpine AS base
# ========================================
# Dependencies Stage: Install Dependencies

View File

@@ -1,7 +1,7 @@
# ========================================
# Dependencies Stage: Install Dependencies
# ========================================
FROM oven/bun:1.2.21-alpine AS deps
FROM oven/bun:1.2.22-alpine AS deps
WORKDIR /app
# Copy only package files needed for migrations
@@ -14,7 +14,7 @@ RUN bun install --ignore-scripts
# ========================================
# Runner Stage: Production Environment
# ========================================
FROM oven/bun:1.2.21-alpine AS runner
FROM oven/bun:1.2.22-alpine AS runner
WORKDIR /app
# Copy only the necessary files from deps

View File

@@ -1,7 +1,7 @@
# ========================================
# Base Stage: Alpine Linux with Bun
# ========================================
FROM oven/bun:alpine AS base
FROM oven/bun:1.2.22-alpine AS base
# ========================================
# Dependencies Stage: Install Dependencies

View File

@@ -1,9 +1,8 @@
import { drizzle, type PostgresJsDatabase } from 'drizzle-orm/postgres-js'
import { drizzle } from 'drizzle-orm/postgres-js'
import postgres from 'postgres'
import * as schema from './schema'
export * from './schema'
export type { PostgresJsDatabase }
const connectionString = process.env.DATABASE_URL!
if (!connectionString) {
@@ -13,8 +12,8 @@ if (!connectionString) {
const postgresClient = postgres(connectionString, {
prepare: false,
idle_timeout: 20,
connect_timeout: 10,
max: 20,
connect_timeout: 30,
max: 30,
onnotice: () => {},
})

View File

@@ -0,0 +1 @@
ALTER TABLE "user_stats" ADD COLUMN "billed_overage_this_period" numeric DEFAULT '0' NOT NULL;

File diff suppressed because it is too large Load Diff

View File

@@ -673,6 +673,13 @@
"when": 1759534968812,
"tag": "0096_tranquil_arachne",
"breakpoints": true
},
{
"idx": 97,
"version": "7",
"when": 1759963094548,
"tag": "0097_dazzling_mephisto",
"breakpoints": true
}
]
}

View File

@@ -564,6 +564,7 @@ export const userStats = pgTable('user_stats', {
// Billing period tracking
currentPeriodCost: decimal('current_period_cost').notNull().default('0'), // Usage in current billing period
lastPeriodCost: decimal('last_period_cost').default('0'), // Usage from previous billing period
billedOverageThisPeriod: decimal('billed_overage_this_period').notNull().default('0'), // Amount of overage already billed via threshold billing
// Pro usage snapshot when joining a team (to prevent double-billing)
proPeriodCostSnapshot: decimal('pro_period_cost_snapshot').default('0'), // Snapshot of Pro usage when joining team
// Copilot usage tracking

View File

@@ -57,7 +57,7 @@ result = client.execute_workflow(
**Parameters:**
- `workflow_id` (str): The ID of the workflow to execute
- `input_data` (dict, optional): Input data to pass to the workflow
- `input_data` (dict, optional): Input data to pass to the workflow. File objects are automatically converted to base64.
- `timeout` (float): Timeout in seconds (default: 30.0)
**Returns:** `WorkflowExecutionResult`
@@ -265,6 +265,57 @@ client = SimStudioClient(
)
```
### File Upload
File objects are automatically detected and converted to base64 format. Include them in your input under the field name matching your workflow's API trigger input format:
The SDK converts file objects to this format:
```python
{
'type': 'file',
'data': 'data:mime/type;base64,base64data',
'name': 'filename',
'mime': 'mime/type'
}
```
Alternatively, you can manually provide files using the URL format:
```python
{
'type': 'url',
'data': 'https://example.com/file.pdf',
'name': 'file.pdf',
'mime': 'application/pdf'
}
```
```python
from simstudio import SimStudioClient
import os
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
# Upload a single file - include it under the field name from your API trigger
with open('document.pdf', 'rb') as f:
result = client.execute_workflow(
'workflow-id',
input_data={
'documents': [f], # Must match your workflow's "files" field name
'instructions': 'Analyze this document'
}
)
# Upload multiple files
with open('doc1.pdf', 'rb') as f1, open('doc2.pdf', 'rb') as f2:
result = client.execute_workflow(
'workflow-id',
input_data={
'attachments': [f1, f2], # Must match your workflow's "files" field name
'query': 'Compare these documents'
}
)
```
### Batch Workflow Execution
```python
@@ -276,14 +327,14 @@ client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_workflows_batch(workflow_data_pairs):
"""Execute multiple workflows with different input data."""
results = []
for workflow_id, input_data in workflow_data_pairs:
try:
# Validate workflow before execution
if not client.validate_workflow(workflow_id):
print(f"Skipping {workflow_id}: not deployed")
continue
result = client.execute_workflow(workflow_id, input_data)
results.append({
"workflow_id": workflow_id,
@@ -291,14 +342,14 @@ def execute_workflows_batch(workflow_data_pairs):
"output": result.output,
"error": result.error
})
except Exception as error:
results.append({
"workflow_id": workflow_id,
"success": False,
"error": str(error)
})
return results
# Example usage

View File

@@ -0,0 +1,55 @@
"""
Example: Upload files with workflow execution
This example demonstrates how to upload files when executing a workflow.
Files are automatically detected and converted to base64 format.
"""
from simstudio import SimStudioClient
import os
def main():
# Initialize the client
api_key = os.getenv('SIM_API_KEY')
if not api_key:
raise ValueError('SIM_API_KEY environment variable is required')
client = SimStudioClient(api_key=api_key)
# Example 1: Upload a single file
# Include file under the field name from your workflow's API trigger input format
print("Example 1: Upload a single file")
with open('document.pdf', 'rb') as f:
result = client.execute_workflow(
workflow_id='your-workflow-id',
input_data={
'documents': [f], # Field name must match your API trigger's file input field
'instructions': 'Analyze this document'
}
)
if result.success:
print(f"Success! Output: {result.output}")
else:
print(f"Failed: {result.error}")
# Example 2: Upload multiple files
print("\nExample 2: Upload multiple files")
with open('document1.pdf', 'rb') as f1, open('document2.pdf', 'rb') as f2:
result = client.execute_workflow(
workflow_id='your-workflow-id',
input_data={
'attachments': [f1, f2], # Field name must match your API trigger's file input field
'query': 'Compare these documents'
}
)
if result.success:
print(f"Success! Output: {result.output}")
else:
print(f"Failed: {result.error}")
if __name__ == '__main__':
main()

View File

@@ -8,6 +8,7 @@ from typing import Any, Dict, Optional, Union
from dataclasses import dataclass
import time
import random
import os
import requests
@@ -109,6 +110,53 @@ class SimStudioClient:
})
self._rate_limit_info: Optional[RateLimitInfo] = None
def _convert_files_to_base64(self, value: Any) -> Any:
"""
Convert file objects in input to API format (base64).
Recursively processes nested dicts and lists.
"""
import base64
import io
# Check if this is a file-like object
if hasattr(value, 'read') and callable(value.read):
# Save current position if seekable
initial_pos = value.tell() if hasattr(value, 'tell') else None
# Read file bytes
file_bytes = value.read()
# Restore position if seekable
if initial_pos is not None and hasattr(value, 'seek'):
value.seek(initial_pos)
# Encode to base64
base64_data = base64.b64encode(file_bytes).decode('utf-8')
# Get file metadata
filename = getattr(value, 'name', 'file')
if isinstance(filename, str):
filename = os.path.basename(filename)
content_type = getattr(value, 'content_type', 'application/octet-stream')
return {
'type': 'file',
'data': f'data:{content_type};base64,{base64_data}',
'name': filename,
'mime': content_type
}
# Recursively process lists
if isinstance(value, list):
return [self._convert_files_to_base64(item) for item in value]
# Recursively process dicts
if isinstance(value, dict):
return {k: self._convert_files_to_base64(v) for k, v in value.items()}
return value
def execute_workflow(
self,
workflow_id: str,
@@ -122,9 +170,11 @@ class SimStudioClient:
Execute a workflow with optional input data.
If async_execution is True, returns immediately with a task ID.
File objects in input_data will be automatically detected and converted to base64.
Args:
workflow_id: The ID of the workflow to execute
input_data: Input data to pass to the workflow
input_data: Input data to pass to the workflow (can include file-like objects)
timeout: Timeout in seconds (default: 30.0)
stream: Enable streaming responses (default: None)
selected_outputs: Block outputs to stream (e.g., ["agent1.content"])
@@ -138,19 +188,23 @@ class SimStudioClient:
"""
url = f"{self.base_url}/api/workflows/{workflow_id}/execute"
# Build request body - spread input at root level, then add API control parameters
body = input_data.copy() if input_data is not None else {}
if stream is not None:
body['stream'] = stream
if selected_outputs is not None:
body['selectedOutputs'] = selected_outputs
# Build headers - async execution uses X-Execution-Mode header
headers = self._session.headers.copy()
if async_execution:
headers['X-Execution-Mode'] = 'async'
try:
# Build JSON body - spread input at root level, then add API control parameters
body = input_data.copy() if input_data is not None else {}
# Convert any file objects in the input to base64 format
body = self._convert_files_to_base64(body)
if stream is not None:
body['stream'] = stream
if selected_outputs is not None:
body['selectedOutputs'] = selected_outputs
response = self._session.post(
url,
json=body,
@@ -281,7 +335,7 @@ class SimStudioClient:
Args:
workflow_id: The ID of the workflow to execute
input_data: Input data to pass to the workflow
input_data: Input data to pass to the workflow (can include file-like objects)
timeout: Timeout for the initial request in seconds
stream: Enable streaming responses (default: None)
selected_outputs: Block outputs to stream (e.g., ["agent1.content"])
@@ -373,7 +427,7 @@ class SimStudioClient:
Args:
workflow_id: The ID of the workflow to execute
input_data: Input data to pass to the workflow
input_data: Input data to pass to the workflow (can include file-like objects)
timeout: Timeout in seconds
stream: Enable streaming responses
selected_outputs: Block outputs to stream

View File

@@ -61,7 +61,7 @@ const result = await client.executeWorkflow('workflow-id', {
**Parameters:**
- `workflowId` (string): The ID of the workflow to execute
- `options` (ExecutionOptions, optional):
- `input` (any): Input data to pass to the workflow
- `input` (any): Input data to pass to the workflow. File objects are automatically converted to base64.
- `timeout` (number): Timeout in milliseconds (default: 30000)
**Returns:** `Promise<WorkflowExecutionResult>`
@@ -261,6 +261,64 @@ const client = new SimStudioClient({
});
```
### File Upload
File objects are automatically detected and converted to base64 format. Include them in your input under the field name matching your workflow's API trigger input format:
The SDK converts File objects to this format:
```typescript
{
type: 'file',
data: 'data:mime/type;base64,base64data',
name: 'filename',
mime: 'mime/type'
}
```
Alternatively, you can manually provide files using the URL format:
```typescript
{
type: 'url',
data: 'https://example.com/file.pdf',
name: 'file.pdf',
mime: 'application/pdf'
}
```
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
import fs from 'fs';
const client = new SimStudioClient({
apiKey: process.env.SIM_API_KEY!
});
// Node.js: Read file and create File object
const fileBuffer = fs.readFileSync('./document.pdf');
const file = new File([fileBuffer], 'document.pdf', { type: 'application/pdf' });
// Include files under the field name from your API trigger's input format
const result = await client.executeWorkflow('workflow-id', {
input: {
documents: [file], // Field name must match your API trigger's file input field
instructions: 'Process this document'
}
});
// Browser: From file input
const handleFileUpload = async (event: Event) => {
const input = event.target as HTMLInputElement;
const files = Array.from(input.files || []);
const result = await client.executeWorkflow('workflow-id', {
input: {
attachments: files, // Field name must match your API trigger's file input field
query: 'Analyze these files'
}
});
};
```
## Getting Your API Key
1. Log in to your [Sim](https://sim.ai) account

View File

@@ -108,6 +108,55 @@ export class SimStudioClient {
* Execute a workflow with optional input data
* If async is true, returns immediately with a task ID
*/
/**
* Convert File objects in input to API format (base64)
* Recursively processes nested objects and arrays
*/
private async convertFilesToBase64(
value: any,
visited: WeakSet<object> = new WeakSet()
): Promise<any> {
if (value instanceof File) {
const arrayBuffer = await value.arrayBuffer()
const buffer = Buffer.from(arrayBuffer)
const base64 = buffer.toString('base64')
return {
type: 'file',
data: `data:${value.type || 'application/octet-stream'};base64,${base64}`,
name: value.name,
mime: value.type || 'application/octet-stream',
}
}
if (Array.isArray(value)) {
if (visited.has(value)) {
return '[Circular]'
}
visited.add(value)
const result = await Promise.all(
value.map((item) => this.convertFilesToBase64(item, visited))
)
visited.delete(value)
return result
}
if (value !== null && typeof value === 'object') {
if (visited.has(value)) {
return '[Circular]'
}
visited.add(value)
const converted: any = {}
for (const [key, val] of Object.entries(value)) {
converted[key] = await this.convertFilesToBase64(val, visited)
}
visited.delete(value)
return converted
}
return value
}
async executeWorkflow(
workflowId: string,
options: ExecutionOptions = {}
@@ -121,15 +170,6 @@ export class SimStudioClient {
setTimeout(() => reject(new Error('TIMEOUT')), timeout)
})
// Build request body - spread input at root level, then add API control parameters
const body: any = input !== undefined ? { ...input } : {}
if (stream !== undefined) {
body.stream = stream
}
if (selectedOutputs !== undefined) {
body.selectedOutputs = selectedOutputs
}
// Build headers - async execution uses X-Execution-Mode header
const headers: Record<string, string> = {
'Content-Type': 'application/json',
@@ -139,10 +179,23 @@ export class SimStudioClient {
headers['X-Execution-Mode'] = 'async'
}
// Build JSON body - spread input at root level, then add API control parameters
let jsonBody: any = input !== undefined ? { ...input } : {}
// Convert any File objects in the input to base64 format
jsonBody = await this.convertFilesToBase64(jsonBody)
if (stream !== undefined) {
jsonBody.stream = stream
}
if (selectedOutputs !== undefined) {
jsonBody.selectedOutputs = selectedOutputs
}
const fetchPromise = fetch(url, {
method: 'POST',
headers,
body: JSON.stringify(body),
body: JSON.stringify(jsonBody),
})
const response = await Promise.race([fetchPromise, timeoutPromise])