v0.6.32: BYOK fixes, ui improvements, cloudwatch tools, jsm tools extension

This commit is contained in:
Waleed
2026-04-08 22:31:21 -07:00
committed by GitHub
88 changed files with 1400 additions and 521 deletions

View File

@@ -78,7 +78,7 @@ Defines the fields approvers fill in when responding. This data becomes availabl
}
```
Access resume data in downstream blocks using `<blockId.resumeInput.fieldName>`.
Access resume data in downstream blocks using `<blockId.fieldName>`.
## Approval Methods
@@ -93,11 +93,12 @@ Access resume data in downstream blocks using `<blockId.resumeInput.fieldName>`.
<Tab>
### REST API
Programmatically resume workflows using the resume endpoint. The `contextId` is available from the block's `resumeEndpoint` output or from the paused execution detail.
Programmatically resume workflows using the resume endpoint. The `contextId` is available from the block's `resumeEndpoint` output or from the `_resume` object in the paused execution response.
```bash
POST /api/resume/{workflowId}/{executionId}/{contextId}
Content-Type: application/json
X-API-Key: your-api-key
{
"input": {
@@ -107,23 +108,44 @@ Access resume data in downstream blocks using `<blockId.resumeInput.fieldName>`.
}
```
The response includes a new `executionId` for the resumed execution:
The resume endpoint automatically respects the execution mode used in the original execute call:
- **Sync mode** (default) — The response waits for the remaining workflow to complete and returns the full result:
```json
{
"success": true,
"status": "completed",
"executionId": "<resumeExecutionId>",
"output": { ... },
"metadata": { "duration": 1234, "startTime": "...", "endTime": "..." }
}
```
If the resumed workflow hits another HITL block, the response returns `"status": "paused"` with new `_resume` URLs in the output.
- **Stream mode** (`stream: true` on the original execute call) — The resume response streams SSE events with `selectedOutputs` chunks, just like the initial execution.
- **Async mode** (`X-Execution-Mode: async` on the original execute call) — The resume dispatches execution to a background worker and returns immediately with `202`:
```json
{
"status": "started",
"executionId": "<resumeExecutionId>",
"message": "Resume execution started."
"message": "Resume execution started asynchronously."
}
```
To poll execution progress after resuming, connect to the SSE stream:
#### Polling execution status
To check on a paused execution or poll for completion after an async resume:
```bash
GET /api/workflows/{workflowId}/executions/{resumeExecutionId}/stream
GET /api/resume/{workflowId}/{executionId}
X-API-Key: your-api-key
```
Build custom approval UIs or integrate with existing systems.
Returns the full paused execution detail with all pause points, their statuses, and resume links. Returns `404` when the execution has completed and is no longer paused.
</Tab>
<Tab>
### Webhook
@@ -132,6 +154,53 @@ Access resume data in downstream blocks using `<blockId.resumeInput.fieldName>`.
</Tab>
</Tabs>
## API Execute Behavior
When triggering a workflow via the execute API (`POST /api/workflows/{id}/execute`), HITL blocks cause the execution to pause and return the `_resume` data in the response:
<Tabs items={['Sync (JSON)', 'Stream (SSE)', 'Async']}>
<Tab>
The response includes the full pause data with resume URLs:
```json
{
"success": true,
"executionId": "<executionId>",
"output": {
"data": {
"operation": "human",
"_resume": {
"apiUrl": "/api/resume/{workflowId}/{executionId}/{contextId}",
"uiUrl": "/resume/{workflowId}/{executionId}",
"contextId": "<contextId>",
"executionId": "<executionId>",
"workflowId": "<workflowId>"
}
}
}
}
```
</Tab>
<Tab>
Blocks before the HITL stream their `selectedOutputs` normally. When execution pauses, the final SSE event includes `status: "paused"` and the `_resume` data:
```
data: {"blockId":"agent1","chunk":"streamed content..."}
data: {"event":"final","data":{"success":true,"output":{...,"_resume":{...}},"status":"paused"}}
data: "[DONE]"
```
On resume, blocks after the HITL stream their `selectedOutputs` the same way.
<Callout type="info">
HITL blocks are automatically excluded from the `selectedOutputs` dropdown since their data is always included in the pause response.
</Callout>
</Tab>
<Tab>
Returns `202` immediately. Use the polling endpoint to check when the execution pauses.
</Tab>
</Tabs>
## Common Use Cases
**Content Approval** - Review AI-generated content before publishing
@@ -161,9 +230,9 @@ Agent (Generate) → Human in the Loop (QA) → Gmail (Send)
**`response`** - Display data shown to the approver (json)
**`submission`** - Form submission data from the approver (json)
**`submittedAt`** - ISO timestamp when the workflow was resumed
**`resumeInput.*`** - All fields defined in Resume Form become available after the workflow resumes
**`<fieldName>`** - All fields defined in Resume Form become available at the top level after the workflow resumes
Access using `<blockId.resumeInput.fieldName>`.
Access using `<blockId.fieldName>`.
## Example
@@ -187,7 +256,7 @@ Access using `<blockId.resumeInput.fieldName>`.
**Downstream Usage:**
```javascript
// Condition block
<approval1.resumeInput.approved> === true
<approval1.approved> === true
```
The example below shows an approval portal as seen by an approver after the workflow is paused. Approvers can review the data and provide inputs as a part of the workflow resumption. The approval portal can be accessed directly via the unique URL, `<blockId.url>`.
@@ -204,7 +273,7 @@ The example below shows an approval portal as seen by an approver after the work
<FAQ items={[
{ question: "How long does the workflow stay paused?", answer: "The workflow pauses indefinitely until a human provides input through the approval portal, the REST API, or a webhook. There is no automatic timeout — it will wait until someone responds." },
{ question: "What notification channels can I use to alert approvers?", answer: "You can configure notifications through Slack, Gmail, Microsoft Teams, SMS (via Twilio), or custom webhooks. Include the approval URL in your notification message so approvers can access the portal directly." },
{ question: "How do I access the approver's input in downstream blocks?", answer: "Use the syntax <blockId.resumeInput.fieldName> to reference specific fields from the resume form. For example, if your block ID is 'approval1' and the form has an 'approved' field, use <approval1.resumeInput.approved>." },
{ question: "How do I access the approver's input in downstream blocks?", answer: "Use the syntax <blockId.fieldName> to reference specific fields from the resume form. For example, if your block name is 'approval1' and the form has an 'approved' field, use <approval1.approved>." },
{ question: "Can I chain multiple Human in the Loop blocks for multi-stage approvals?", answer: "Yes. You can place multiple Human in the Loop blocks in sequence to create multi-stage approval workflows. Each block pauses independently and can have its own notification configuration and resume form fields." },
{ question: "Can I resume the workflow programmatically without the portal?", answer: "Yes. Each block exposes a resume API endpoint that you can call with a POST request containing the form data as JSON. This lets you build custom approval UIs or integrate with existing systems like Jira or ServiceNow." },
{ question: "What outputs are available after the workflow resumes?", answer: "The block outputs include the approval portal URL, the resume API endpoint URL, the display data shown to the approver, the form submission data, the raw resume input, and an ISO timestamp of when the workflow was resumed." },

View File

@@ -113,7 +113,7 @@ Retrieve the results of a completed Athena query execution
| `awsAccessKeyId` | string | Yes | AWS access key ID |
| `awsSecretAccessKey` | string | Yes | AWS secret access key |
| `queryExecutionId` | string | Yes | Query execution ID to get results for |
| `maxResults` | number | No | Maximum number of rows to return \(1-1000\) |
| `maxResults` | number | No | Maximum number of rows to return \(1-999\) |
| `nextToken` | string | No | Pagination token from a previous request |
#### Output

View File

@@ -10,6 +10,24 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
color="linear-gradient(45deg, #B0084D 0%, #FF4F8B 100%)"
/>
{/* MANUAL-CONTENT-START:intro */}
[AWS CloudWatch](https://aws.amazon.com/cloudwatch/) is a monitoring and observability service that provides data and actionable insights for AWS resources, applications, and services. CloudWatch collects monitoring and operational data in the form of logs, metrics, and events, giving you a unified view of your AWS environment.
With the CloudWatch integration, you can:
- **Query Logs (Insights)**: Run CloudWatch Log Insights queries against one or more log groups to analyze log data with a powerful query language
- **Describe Log Groups**: List available CloudWatch log groups in your account, optionally filtered by name prefix
- **Get Log Events**: Retrieve log events from a specific log stream within a log group
- **Describe Log Streams**: List log streams within a log group, ordered by last event time or filtered by name prefix
- **List Metrics**: Browse available CloudWatch metrics, optionally filtered by namespace, metric name, or recent activity
- **Get Metric Statistics**: Retrieve statistical data for a metric over a specified time range with configurable granularity
- **Publish Metric**: Publish custom metric data points to CloudWatch for your own application monitoring
- **Describe Alarms**: List and filter CloudWatch alarms by name prefix, state, or alarm type
In Sim, the CloudWatch integration enables your agents to monitor AWS infrastructure, analyze application logs, track custom metrics, and respond to alarm states as part of automated DevOps and SRE workflows. This is especially powerful when combined with other AWS integrations like CloudFormation and SNS for end-to-end infrastructure management.
{/* MANUAL-CONTENT-END */}
## Usage Instructions
Integrate AWS CloudWatch into workflows. Run Log Insights queries, list log groups, retrieve log events, list and get metrics, and monitor alarms. Requires AWS access key and secret access key.
@@ -155,6 +173,34 @@ Get statistics for a CloudWatch metric over a time range
| `label` | string | Metric label |
| `datapoints` | array | Datapoints with timestamp and statistics values |
### `cloudwatch_put_metric_data`
Publish a custom metric data point to CloudWatch
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `awsRegion` | string | Yes | AWS region \(e.g., us-east-1\) |
| `awsAccessKeyId` | string | Yes | AWS access key ID |
| `awsSecretAccessKey` | string | Yes | AWS secret access key |
| `namespace` | string | Yes | Metric namespace \(e.g., Custom/MyApp\) |
| `metricName` | string | Yes | Name of the metric |
| `value` | number | Yes | Metric value to publish |
| `unit` | string | No | Unit of the metric \(e.g., Count, Seconds, Bytes\) |
| `dimensions` | string | No | JSON string of dimension name/value pairs |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `success` | boolean | Whether the metric was published successfully |
| `namespace` | string | Metric namespace |
| `metricName` | string | Metric name |
| `value` | number | Published metric value |
| `unit` | string | Metric unit |
| `timestamp` | string | Timestamp when the metric was published |
### `cloudwatch_describe_alarms`
List and filter CloudWatch alarms

View File

@@ -113,10 +113,11 @@ Create a new service request in Jira Service Management
| `cloudId` | string | No | Jira Cloud ID for the instance |
| `serviceDeskId` | string | Yes | Service Desk ID \(e.g., "1", "2"\) |
| `requestTypeId` | string | Yes | Request Type ID \(e.g., "10", "15"\) |
| `summary` | string | Yes | Summary/title for the service request |
| `summary` | string | No | Summary/title for the service request \(required unless using Form Answers\) |
| `description` | string | No | Description for the service request |
| `raiseOnBehalfOf` | string | No | Account ID of customer to raise request on behalf of |
| `requestFieldValues` | json | No | Request field values as key-value pairs \(overrides summary/description if provided\) |
| `formAnswers` | json | No | Form answers for form-based request types \(e.g., \{"summary": \{"text": "Title"\}, "customfield_10010": \{"choices": \["10320"\]\}\}\) |
| `requestParticipants` | string | No | Comma-separated account IDs to add as request participants |
| `channel` | string | No | Channel the request originates from \(e.g., portal, email\) |

View File

@@ -2044,12 +2044,16 @@
"name": "Get Metric Statistics",
"description": "Get statistics for a CloudWatch metric over a time range"
},
{
"name": "Publish Metric",
"description": "Publish a custom metric data point to CloudWatch"
},
{
"name": "Describe Alarms",
"description": "List and filter CloudWatch alarms"
}
],
"operationCount": 7,
"operationCount": 8,
"triggers": [],
"triggerCount": 0,
"authType": "none",

View File

@@ -23,6 +23,18 @@ export async function POST() {
return NextResponse.json({ token: response.token })
} catch (error) {
// better-auth's sessionMiddleware throws APIError("UNAUTHORIZED") with no message
// when the session is missing/expired — surface this as a 401, not a 500.
if (
error instanceof Error &&
('statusCode' in error || 'status' in error) &&
((error as Record<string, unknown>).statusCode === 401 ||
(error as Record<string, unknown>).status === 'UNAUTHORIZED')
) {
logger.warn('Socket token request with invalid/expired session')
return NextResponse.json({ error: 'Authentication required' }, { status: 401 })
}
logger.error('Failed to generate socket token', {
error: error instanceof Error ? error.message : String(error),
stack: error instanceof Error ? error.stack : undefined,

View File

@@ -140,6 +140,10 @@ vi.mock('@/lib/workflows/streaming/streaming', () => ({
createStreamingResponse: vi.fn().mockImplementation(async () => createMockStream()),
}))
vi.mock('@/lib/workflows/executor/execute-workflow', () => ({
executeWorkflow: vi.fn().mockResolvedValue({ success: true, output: {} }),
}))
vi.mock('@/lib/core/utils/sse', () => ({
SSE_HEADERS: {
'Content-Type': 'text/event-stream',
@@ -410,14 +414,7 @@ describe('Chat Identifier API Route', () => {
expect(createStreamingResponse).toHaveBeenCalledWith(
expect.objectContaining({
workflow: expect.objectContaining({
id: 'workflow-id',
userId: 'user-id',
}),
input: expect.objectContaining({
input: 'Hello world',
conversationId: 'conv-123',
}),
executeFn: expect.any(Function),
streamConfig: expect.objectContaining({
isSecureMode: true,
workflowTriggerType: 'chat',
@@ -494,9 +491,9 @@ describe('Chat Identifier API Route', () => {
expect(createStreamingResponse).toHaveBeenCalledWith(
expect.objectContaining({
input: expect.objectContaining({
input: 'Hello world',
conversationId: 'test-conversation-123',
executeFn: expect.any(Function),
streamConfig: expect.objectContaining({
workflowTriggerType: 'chat',
}),
})
)
@@ -510,9 +507,7 @@ describe('Chat Identifier API Route', () => {
expect(createStreamingResponse).toHaveBeenCalledWith(
expect.objectContaining({
input: expect.objectContaining({
input: 'Hello world',
}),
executeFn: expect.any(Function),
})
)
})

View File

@@ -199,6 +199,7 @@ export async function POST(
}
const { createStreamingResponse } = await import('@/lib/workflows/streaming/streaming')
const { executeWorkflow } = await import('@/lib/workflows/executor/execute-workflow')
const { SSE_HEADERS } = await import('@/lib/core/utils/sse')
const workflowInput: any = { input, conversationId }
@@ -252,15 +253,31 @@ export async function POST(
const stream = await createStreamingResponse({
requestId,
workflow: workflowForExecution,
input: workflowInput,
executingUserId: workspaceOwnerId,
streamConfig: {
selectedOutputs,
isSecureMode: true,
workflowTriggerType: 'chat',
},
executionId,
executeFn: async ({ onStream, onBlockComplete, abortSignal }) =>
executeWorkflow(
workflowForExecution,
requestId,
workflowInput,
workspaceOwnerId,
{
enabled: true,
selectedOutputs,
isSecureMode: true,
workflowTriggerType: 'chat',
onStream,
onBlockComplete,
skipLoggingComplete: true,
abortSignal,
executionMode: 'stream',
},
executionId
),
})
const streamResponse = new NextResponse(stream, {

View File

@@ -9,6 +9,7 @@ import { generateRequestId } from '@/lib/core/utils/request'
import { generateId } from '@/lib/core/utils/uuid'
import { preprocessExecution } from '@/lib/execution/preprocessing'
import { LoggingSession } from '@/lib/logs/execution/logging-session'
import { executeWorkflow } from '@/lib/workflows/executor/execute-workflow'
import { normalizeInputFormatValue } from '@/lib/workflows/input-format'
import { createStreamingResponse } from '@/lib/workflows/streaming/streaming'
import { isInputDefinitionTrigger } from '@/lib/workflows/triggers/input-definition-triggers'
@@ -216,18 +217,33 @@ export async function POST(
...formData, // Spread form fields at top level for convenience
}
// Execute workflow using streaming (for consistency with chat)
const stream = await createStreamingResponse({
requestId,
workflow: workflowForExecution,
input: workflowInput,
executingUserId: workspaceOwnerId,
streamConfig: {
selectedOutputs: [],
isSecureMode: true,
workflowTriggerType: 'api', // Use 'api' type since form is similar
workflowTriggerType: 'api',
},
executionId,
executeFn: async ({ onStream, onBlockComplete, abortSignal }) =>
executeWorkflow(
workflowForExecution,
requestId,
workflowInput,
workspaceOwnerId,
{
enabled: true,
selectedOutputs: [],
isSecureMode: true,
workflowTriggerType: 'api',
onStream,
onBlockComplete,
skipLoggingComplete: true,
abortSignal,
executionMode: 'sync',
},
executionId
),
})
// For forms, we don't stream back - we wait for completion and return success

View File

@@ -1,19 +1,44 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { AuthType } from '@/lib/auth/hybrid'
import { getJobQueue, shouldUseBullMQ } from '@/lib/core/async-jobs'
import { createBullMQJobData } from '@/lib/core/bullmq'
import { generateRequestId } from '@/lib/core/utils/request'
import { SSE_HEADERS } from '@/lib/core/utils/sse'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { generateId } from '@/lib/core/utils/uuid'
import { enqueueWorkspaceDispatch } from '@/lib/core/workspace-dispatch'
import { setExecutionMeta } from '@/lib/execution/event-buffer'
import { preprocessExecution } from '@/lib/execution/preprocessing'
import { PauseResumeManager } from '@/lib/workflows/executor/human-in-the-loop-manager'
import { createStreamingResponse } from '@/lib/workflows/streaming/streaming'
import { getWorkspaceBilledAccountUserId } from '@/lib/workspaces/utils'
import { validateWorkflowAccess } from '@/app/api/workflows/middleware'
import type { ResumeExecutionPayload } from '@/background/resume-execution'
import { ExecutionSnapshot } from '@/executor/execution/snapshot'
import type { SerializedSnapshot } from '@/executor/types'
const logger = createLogger('WorkflowResumeAPI')
export const runtime = 'nodejs'
export const dynamic = 'force-dynamic'
function getStoredSnapshotConfig(pausedExecution: { executionSnapshot: unknown }): {
executionMode?: 'sync' | 'stream' | 'async'
selectedOutputs?: string[]
} {
try {
const serialized = pausedExecution.executionSnapshot as SerializedSnapshot
const snapshot = ExecutionSnapshot.fromJSON(serialized.snapshot)
return {
executionMode: snapshot.metadata.executionMode,
selectedOutputs: snapshot.selectedOutputs,
}
} catch {
return {}
}
}
export async function POST(
request: NextRequest,
{
@@ -24,7 +49,6 @@ export async function POST(
) {
const { workflowId, executionId, contextId } = await params
// Allow resume from dashboard without requiring deployment
const access = await validateWorkflowAccess(request, workflowId, false)
if (access.error) {
return NextResponse.json({ error: access.error.message }, { status: access.error.status })
@@ -74,12 +98,12 @@ export async function POST(
const preprocessResult = await preprocessExecution({
workflowId,
userId,
triggerType: 'manual', // Resume is a manual trigger
triggerType: 'manual',
executionId: resumeExecutionId,
requestId,
checkRateLimit: false, // Manual triggers bypass rate limits
checkDeployment: false, // Resuming existing execution, deployment already checked
skipUsageLimits: true, // Resume is continuation of authorized execution - don't recheck limits
checkRateLimit: false,
checkDeployment: false,
skipUsageLimits: true,
useAuthenticatedUserAsActor: isPersonalApiKeyCaller,
workspaceId: workflow.workspaceId || undefined,
})
@@ -142,8 +166,35 @@ export async function POST(
}
const isApiCaller = access.auth?.authType === AuthType.API_KEY
const snapshotConfig = isApiCaller ? getStoredSnapshotConfig(enqueueResult.pausedExecution) : {}
const executionMode = isApiCaller ? (snapshotConfig.executionMode ?? 'sync') : undefined
if (isApiCaller) {
if (isApiCaller && executionMode === 'stream') {
const stream = await createStreamingResponse({
requestId,
streamConfig: {
selectedOutputs: snapshotConfig.selectedOutputs,
timeoutMs: preprocessResult.executionTimeout?.sync,
},
executionId: enqueueResult.resumeExecutionId,
executeFn: async ({ onStream, onBlockComplete, abortSignal }) =>
PauseResumeManager.startResumeExecution({
...resumeArgs,
onStream,
onBlockComplete,
abortSignal,
}),
})
return new NextResponse(stream, {
headers: {
...SSE_HEADERS,
'X-Execution-Id': enqueueResult.resumeExecutionId,
},
})
}
if (isApiCaller && executionMode !== 'async') {
const result = await PauseResumeManager.startResumeExecution(resumeArgs)
return NextResponse.json({
@@ -162,6 +213,68 @@ export async function POST(
})
}
if (isApiCaller && executionMode === 'async') {
const resumePayload: ResumeExecutionPayload = {
resumeEntryId: enqueueResult.resumeEntryId,
resumeExecutionId: enqueueResult.resumeExecutionId,
pausedExecutionId: enqueueResult.pausedExecution.id,
contextId: enqueueResult.contextId,
resumeInput: enqueueResult.resumeInput,
userId: enqueueResult.userId,
workflowId,
parentExecutionId: executionId,
}
let jobId: string
try {
const useBullMQ = shouldUseBullMQ()
if (useBullMQ) {
jobId = await enqueueWorkspaceDispatch({
id: enqueueResult.resumeExecutionId,
workspaceId: workflow.workspaceId,
lane: 'runtime',
queueName: 'resume-execution',
bullmqJobName: 'resume-execution',
bullmqPayload: createBullMQJobData(resumePayload, {
workflowId,
userId,
}),
metadata: { workflowId, userId },
})
} else {
const jobQueue = await getJobQueue()
jobId = await jobQueue.enqueue('resume-execution', resumePayload, {
metadata: { workflowId, workspaceId: workflow.workspaceId, userId },
})
}
logger.info('Enqueued async resume execution', {
jobId,
resumeExecutionId: enqueueResult.resumeExecutionId,
})
} catch (dispatchError) {
logger.error('Failed to dispatch async resume execution', {
error: dispatchError instanceof Error ? dispatchError.message : String(dispatchError),
resumeExecutionId: enqueueResult.resumeExecutionId,
})
return NextResponse.json(
{ error: 'Failed to queue resume execution. Please try again.' },
{ status: 503 }
)
}
return NextResponse.json(
{
success: true,
async: true,
jobId,
executionId: enqueueResult.resumeExecutionId,
message: 'Resume execution queued',
statusUrl: `${getBaseUrl()}/api/jobs/${jobId}`,
},
{ status: 202 }
)
}
PauseResumeManager.startResumeExecution(resumeArgs).catch((error) => {
logger.error('Failed to start resume execution', {
workflowId,
@@ -200,7 +313,6 @@ export async function GET(
) {
const { workflowId, executionId, contextId } = await params
// Allow access without API key for browser-based UI (same as parent execution endpoint)
const access = await validateWorkflowAccess(request, workflowId, false)
if (access.error) {
return NextResponse.json({ error: access.error.message }, { status: access.error.status })

View File

@@ -4,7 +4,7 @@ import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { hasExceededCostLimit } from '@/lib/billing/core/subscription'
import { checkServerSideUsageLimits } from '@/lib/billing/calculations/usage-monitor'
import { recordUsage } from '@/lib/billing/core/usage-log'
import { env } from '@/lib/core/config/env'
import { getCostMultiplier, isBillingEnabled } from '@/lib/core/config/feature-flags'
@@ -110,11 +110,14 @@ export async function POST(request: NextRequest) {
}
}
if (billingUserId && isBillingEnabled) {
const exceeded = await hasExceededCostLimit(billingUserId)
if (exceeded) {
if (billingUserId) {
const usageCheck = await checkServerSideUsageLimits(billingUserId)
if (usageCheck.isExceeded) {
return NextResponse.json(
{ error: 'Usage limit exceeded. Please upgrade your plan to continue.' },
{
error:
usageCheck.message || 'Usage limit exceeded. Please upgrade your plan to continue.',
},
{ status: 402 }
)
}

View File

@@ -51,7 +51,9 @@ export async function POST(request: NextRequest) {
const command = new DescribeAlarmsCommand({
...(validatedData.alarmNamePrefix && { AlarmNamePrefix: validatedData.alarmNamePrefix }),
...(validatedData.stateValue && { StateValue: validatedData.stateValue as StateValue }),
...(validatedData.alarmType && { AlarmTypes: [validatedData.alarmType as AlarmType] }),
AlarmTypes: validatedData.alarmType
? [validatedData.alarmType as AlarmType]
: (['MetricAlarm', 'CompositeAlarm'] as AlarmType[]),
...(validatedData.limit !== undefined && { MaxRecords: validatedData.limit }),
})

View File

@@ -53,7 +53,7 @@ export async function POST(request: NextRequest) {
}))
}
} catch {
throw new Error('Invalid dimensions JSON')
return NextResponse.json({ error: 'Invalid dimensions JSON format' }, { status: 400 })
}
}

View File

@@ -0,0 +1,136 @@
import {
CloudWatchClient,
PutMetricDataCommand,
type StandardUnit,
} from '@aws-sdk/client-cloudwatch'
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { checkInternalAuth } from '@/lib/auth/hybrid'
const logger = createLogger('CloudWatchPutMetricData')
const VALID_UNITS = [
'Seconds',
'Microseconds',
'Milliseconds',
'Bytes',
'Kilobytes',
'Megabytes',
'Gigabytes',
'Terabytes',
'Bits',
'Kilobits',
'Megabits',
'Gigabits',
'Terabits',
'Percent',
'Count',
'Bytes/Second',
'Kilobytes/Second',
'Megabytes/Second',
'Gigabytes/Second',
'Terabytes/Second',
'Bits/Second',
'Kilobits/Second',
'Megabits/Second',
'Gigabits/Second',
'Terabits/Second',
'Count/Second',
'None',
] as const
const PutMetricDataSchema = z.object({
region: z.string().min(1, 'AWS region is required'),
accessKeyId: z.string().min(1, 'AWS access key ID is required'),
secretAccessKey: z.string().min(1, 'AWS secret access key is required'),
namespace: z.string().min(1, 'Namespace is required'),
metricName: z.string().min(1, 'Metric name is required'),
value: z.number({ coerce: true }).refine((v) => Number.isFinite(v), {
message: 'Metric value must be a finite number',
}),
unit: z.enum(VALID_UNITS).optional(),
dimensions: z
.string()
.optional()
.refine(
(val) => {
if (!val) return true
try {
const parsed = JSON.parse(val)
return typeof parsed === 'object' && parsed !== null && !Array.isArray(parsed)
} catch {
return false
}
},
{ message: 'dimensions must be a valid JSON object string' }
),
})
export async function POST(request: NextRequest) {
try {
const auth = await checkInternalAuth(request)
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
const body = await request.json()
const validatedData = PutMetricDataSchema.parse(body)
const client = new CloudWatchClient({
region: validatedData.region,
credentials: {
accessKeyId: validatedData.accessKeyId,
secretAccessKey: validatedData.secretAccessKey,
},
})
const timestamp = new Date()
const dimensions: { Name: string; Value: string }[] = []
if (validatedData.dimensions) {
const parsed = JSON.parse(validatedData.dimensions)
for (const [name, value] of Object.entries(parsed)) {
dimensions.push({ Name: name, Value: String(value) })
}
}
const command = new PutMetricDataCommand({
Namespace: validatedData.namespace,
MetricData: [
{
MetricName: validatedData.metricName,
Value: validatedData.value,
Timestamp: timestamp,
...(validatedData.unit && { Unit: validatedData.unit as StandardUnit }),
...(dimensions.length > 0 && { Dimensions: dimensions }),
},
],
})
await client.send(command)
return NextResponse.json({
success: true,
output: {
success: true,
namespace: validatedData.namespace,
metricName: validatedData.metricName,
value: validatedData.value,
unit: validatedData.unit ?? 'None',
timestamp: timestamp.toISOString(),
},
})
} catch (error) {
if (error instanceof z.ZodError) {
return NextResponse.json(
{ error: error.errors[0]?.message ?? 'Invalid request' },
{ status: 400 }
)
}
const errorMessage =
error instanceof Error ? error.message : 'Failed to publish CloudWatch metric'
logger.error('PutMetricData failed', { error: errorMessage })
return NextResponse.json({ error: errorMessage }, { status: 500 })
}
}

View File

@@ -12,6 +12,20 @@ export const dynamic = 'force-dynamic'
const logger = createLogger('JsmRequestAPI')
function parseJsmErrorMessage(status: number, statusText: string, errorText: string): string {
try {
const errorData = JSON.parse(errorText)
if (errorData.errorMessage) {
return `JSM API error: ${errorData.errorMessage}`
}
} catch {
if (errorText) {
return `JSM API error: ${errorText}`
}
}
return `JSM API error: ${status} ${statusText}`
}
export async function POST(request: NextRequest) {
const auth = await checkInternalAuth(request)
if (!auth.success || !auth.userId) {
@@ -31,6 +45,7 @@ export async function POST(request: NextRequest) {
description,
raiseOnBehalfOf,
requestFieldValues,
formAnswers,
requestParticipants,
channel,
expand,
@@ -55,7 +70,7 @@ export async function POST(request: NextRequest) {
const baseUrl = getJsmApiBaseUrl(cloudId)
const isCreateOperation = serviceDeskId && requestTypeId && summary
const isCreateOperation = serviceDeskId && requestTypeId && (summary || formAnswers)
if (isCreateOperation) {
const serviceDeskIdValidation = validateAlphanumericId(serviceDeskId, 'serviceDeskId')
@@ -69,15 +84,30 @@ export async function POST(request: NextRequest) {
}
const url = `${baseUrl}/request`
logger.info('Creating request at:', url)
logger.info('Creating request at:', { url, serviceDeskId, requestTypeId })
const requestBody: Record<string, unknown> = {
serviceDeskId,
requestTypeId,
requestFieldValues: requestFieldValues || {
summary,
...(description && { description }),
},
}
if (summary || description || requestFieldValues) {
const fieldValues =
requestFieldValues && typeof requestFieldValues === 'object'
? {
...(!requestFieldValues.summary && summary ? { summary } : {}),
...(!requestFieldValues.description && description ? { description } : {}),
...requestFieldValues,
}
: {
...(summary && { summary }),
...(description && { description }),
}
requestBody.requestFieldValues = fieldValues
}
if (formAnswers && typeof formAnswers === 'object') {
requestBody.form = { answers: formAnswers }
}
if (raiseOnBehalfOf) {
@@ -112,7 +142,10 @@ export async function POST(request: NextRequest) {
})
return NextResponse.json(
{ error: `JSM API error: ${response.status} ${response.statusText}`, details: errorText },
{
error: parseJsmErrorMessage(response.status, response.statusText, errorText),
details: errorText,
},
{ status: response.status }
)
}
@@ -178,7 +211,10 @@ export async function POST(request: NextRequest) {
})
return NextResponse.json(
{ error: `JSM API error: ${response.status} ${response.statusText}`, details: errorText },
{
error: parseJsmErrorMessage(response.status, response.statusText, errorText),
details: errorText,
},
{ status: response.status }
)
}

View File

@@ -39,6 +39,7 @@ import {
cleanupExecutionBase64Cache,
hydrateUserFilesWithBase64,
} from '@/lib/uploads/utils/user-file-base64.server'
import { executeWorkflow } from '@/lib/workflows/executor/execute-workflow'
import { executeWorkflowCore } from '@/lib/workflows/executor/execution-core'
import { type ExecutionEvent, encodeSSEEvent } from '@/lib/workflows/executor/execution-events'
import { handlePostExecutionPauseState } from '@/lib/workflows/executor/pause-persistence'
@@ -213,6 +214,7 @@ async function handleAsyncExecution(params: AsyncExecutionParams): Promise<NextR
requestId,
correlation,
callChain,
executionMode: 'async',
}
try {
@@ -789,6 +791,7 @@ async function handleExecutePost(
enforceCredentialAccess: useAuthenticatedUserAsActor,
workflowStateOverride: effectiveWorkflowStateOverride,
callChain,
executionMode: 'sync',
}
const executionVariables = cachedWorkflowData?.variables ?? workflow.variables ?? {}
@@ -1012,6 +1015,7 @@ async function handleExecutePost(
enforceCredentialAccess: useAuthenticatedUserAsActor,
workflowStateOverride: effectiveWorkflowStateOverride,
callChain,
executionMode: 'sync',
}
const executionVariables = cachedWorkflowData?.variables ?? workflow.variables ?? {}
@@ -1051,17 +1055,15 @@ async function handleExecutePost(
cachedWorkflowData?.blocks || {}
)
const streamVariables = cachedWorkflowData?.variables ?? (workflow as any).variables
const streamWorkflow = {
id: workflow.id,
userId: actorUserId,
workspaceId,
isDeployed: workflow.isDeployed,
variables: streamVariables,
}
const stream = await createStreamingResponse({
requestId,
workflow: {
id: workflow.id,
userId: actorUserId,
workspaceId,
isDeployed: workflow.isDeployed,
variables: streamVariables,
},
input: processedInput,
executingUserId: actorUserId,
streamConfig: {
selectedOutputs: resolvedSelectedOutputs,
isSecureMode: false,
@@ -1071,6 +1073,27 @@ async function handleExecutePost(
timeoutMs: preprocessResult.executionTimeout?.sync,
},
executionId,
executeFn: async ({ onStream, onBlockComplete, abortSignal }) =>
executeWorkflow(
streamWorkflow,
requestId,
processedInput,
actorUserId,
{
enabled: true,
selectedOutputs: resolvedSelectedOutputs,
isSecureMode: false,
workflowTriggerType: triggerType === 'chat' ? 'chat' : 'api',
onStream,
onBlockComplete,
skipLoggingComplete: true,
includeFileBase64,
base64MaxBytes,
abortSignal,
executionMode: 'stream',
},
executionId
),
})
return new NextResponse(stream, {
@@ -1310,6 +1333,7 @@ async function handleExecutePost(
enforceCredentialAccess: useAuthenticatedUserAsActor,
workflowStateOverride: effectiveWorkflowStateOverride,
callChain,
executionMode: 'sync',
}
const sseExecutionVariables = cachedWorkflowData?.variables ?? workflow.variables ?? {}

View File

@@ -39,6 +39,7 @@ import {
extractContextTokens,
} from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/utils'
import { useWorkflowMap } from '@/hooks/queries/workflows'
import { useSettingsNavigation } from '@/hooks/use-settings-navigation'
import { useSpeechToText } from '@/hooks/use-speech-to-text'
import type { ChatContext } from '@/stores/panel'
@@ -120,6 +121,7 @@ export function UserInput({
onEnterWhileEmpty,
}: UserInputProps) {
const { workspaceId } = useParams<{ workspaceId: string }>()
const { navigateToSettings } = useSettingsNavigation()
const { data: workflowsById = {} } = useWorkflowMap(workspaceId)
const { data: session } = useSession()
const [value, setValue] = useState(defaultValue)
@@ -239,12 +241,19 @@ export function UserInput({
valueRef.current = newVal
}, [])
const handleUsageLimitExceeded = useCallback(() => {
navigateToSettings({ section: 'subscription' })
}, [navigateToSettings])
const {
isListening,
isSupported: isSttSupported,
toggleListening: rawToggle,
resetTranscript,
} = useSpeechToText({ onTranscript: handleTranscript })
} = useSpeechToText({
onTranscript: handleTranscript,
onUsageLimitExceeded: handleUsageLimitExceeded,
})
const toggleListening = useCallback(() => {
if (!isListening) {

View File

@@ -1,5 +1,5 @@
import { cookies } from 'next/headers'
import { ToastProvider } from '@/components/emcn'
import { getSession } from '@/lib/auth'
import { NavTour } from '@/app/workspace/[workspaceId]/components/product-tour'
import { ImpersonationBanner } from '@/app/workspace/[workspaceId]/impersonation-banner'
import { GlobalCommandsProvider } from '@/app/workspace/[workspaceId]/providers/global-commands-provider'
@@ -8,22 +8,17 @@ import { SettingsLoader } from '@/app/workspace/[workspaceId]/providers/settings
import { WorkspacePermissionsProvider } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider'
import { WorkspaceScopeSync } from '@/app/workspace/[workspaceId]/providers/workspace-scope-sync'
import { Sidebar } from '@/app/workspace/[workspaceId]/w/components/sidebar/sidebar'
import {
BRAND_COOKIE_NAME,
type BrandCache,
BrandingProvider,
} from '@/ee/whitelabeling/components/branding-provider'
import { BrandingProvider } from '@/ee/whitelabeling/components/branding-provider'
import { getOrgWhitelabelSettings } from '@/ee/whitelabeling/org-branding'
export default async function WorkspaceLayout({ children }: { children: React.ReactNode }) {
const cookieStore = await cookies()
let initialCache: BrandCache | null = null
try {
const raw = cookieStore.get(BRAND_COOKIE_NAME)?.value
if (raw) initialCache = JSON.parse(decodeURIComponent(raw))
} catch {}
const session = await getSession()
// The organization plugin is conditionally spread so TS can't infer activeOrganizationId on the base session type.
const orgId = (session?.session as { activeOrganizationId?: string } | null)?.activeOrganizationId
const initialOrgSettings = orgId ? await getOrgWhitelabelSettings(orgId) : null
return (
<BrandingProvider initialCache={initialCache}>
<BrandingProvider initialOrgSettings={initialOrgSettings}>
<ToastProvider>
<SettingsLoader />
<ProviderModelsLoader />

View File

@@ -598,6 +598,24 @@ export const LogDetails = memo(function LogDetails({
{formatCost(log.cost?.output || 0)}
</span>
</div>
{(() => {
const models = (log.cost as Record<string, unknown>)?.models as
| Record<string, { toolCost?: number }>
| undefined
const totalToolCost = models
? Object.values(models).reduce((sum, m) => sum + (m?.toolCost || 0), 0)
: 0
return totalToolCost > 0 ? (
<div className='flex items-center justify-between'>
<span className='font-medium text-[var(--text-tertiary)] text-caption'>
Tool Usage:
</span>
<span className='font-medium text-[var(--text-secondary)] text-caption'>
{formatCost(totalToolCost)}
</span>
</div>
) : null
})()}
</div>
<div className='border-[var(--border)] border-t' />
@@ -626,7 +644,7 @@ export const LogDetails = memo(function LogDetails({
<div className='flex items-center justify-center rounded-md bg-[var(--surface-2)] p-2 text-center'>
<p className='font-medium text-[var(--text-subtle)] text-xs'>
Total cost includes a base execution charge of{' '}
{formatCost(BASE_EXECUTION_CHARGE)} plus any model usage costs.
{formatCost(BASE_EXECUTION_CHARGE)} plus any model and tool usage costs.
</p>
</div>
</div>

View File

@@ -16,6 +16,7 @@ const SECTION_TITLES: Record<string, string> = {
subscription: 'Subscription',
team: 'Team',
sso: 'Single Sign-On',
whitelabeling: 'Whitelabeling',
copilot: 'Copilot Keys',
mcp: 'MCP Tools',
'custom-tools': 'Custom Tools',

View File

@@ -161,7 +161,7 @@ const WhitelabelingSettings = dynamic(
import('@/ee/whitelabeling/components/whitelabeling-settings').then(
(m) => m.WhitelabelingSettings
),
{ loading: () => <SettingsSectionSkeleton /> }
{ loading: () => <SettingsSectionSkeleton />, ssr: false }
)
interface SettingsPageProps {

View File

@@ -387,7 +387,7 @@ export function General() {
<Tooltip.Preview
src='/tooltips/auto-connect-on-drop.mp4'
alt='Auto-connect on drop example'
loop={false}
loop={true}
/>
</Tooltip.Content>
</Tooltip.Root>

View File

@@ -3,7 +3,7 @@ import { createLogger } from '@sim/logger'
const logger = createLogger('ProfilePictureUpload')
const MAX_FILE_SIZE = 5 * 1024 * 1024 // 5MB
const ACCEPTED_IMAGE_TYPES = ['image/png', 'image/jpeg', 'image/jpg']
const ACCEPTED_IMAGE_TYPES = ['image/png', 'image/jpeg', 'image/jpg', 'image/svg+xml']
interface UseProfilePictureUploadProps {
onUpload?: (url: string | null) => void
@@ -27,21 +27,19 @@ export function useProfilePictureUpload({
const [isUploading, setIsUploading] = useState(false)
useEffect(() => {
if (currentImage !== previewUrl) {
if (previewRef.current && previewRef.current !== currentImage) {
URL.revokeObjectURL(previewRef.current)
previewRef.current = null
}
setPreviewUrl(currentImage || null)
if (previewRef.current && previewRef.current !== currentImage) {
URL.revokeObjectURL(previewRef.current)
previewRef.current = null
}
}, [currentImage, previewUrl])
setPreviewUrl(currentImage || null)
}, [currentImage])
const validateFile = useCallback((file: File): string | null => {
if (file.size > MAX_FILE_SIZE) {
return `File "${file.name}" is too large. Maximum size is 5MB.`
}
if (!ACCEPTED_IMAGE_TYPES.includes(file.type)) {
return `File "${file.name}" is not a supported image format. Please use PNG or JPEG.`
return `File "${file.name}" is not a supported image format. Please use PNG, JPEG, or SVG.`
}
return null
}, [])

View File

@@ -38,6 +38,8 @@ const TagIcon: React.FC<{
</div>
)
const EXCLUDED_OUTPUT_TYPES = new Set(['starter', 'start_trigger', 'human_in_the_loop'] as const)
/**
* Props for the OutputSelect component
*/
@@ -121,7 +123,7 @@ export function OutputSelect({
if (blockArray.length === 0) return outputs
blockArray.forEach((block: any) => {
if (block.type === 'starter' || !block?.id || !block?.type) return
if (EXCLUDED_OUTPUT_TYPES.has(block.type) || !block?.id || !block?.type) return
const blockName =
block.name && typeof block.name === 'string'

View File

@@ -4,6 +4,7 @@ import {
DEFAULT_LAYOUT_PADDING,
DEFAULT_VERTICAL_SPACING,
} from '@/lib/workflows/autolayout/constants'
import { mergeSubblockState } from '@/stores/workflows/utils'
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
const logger = createLogger('AutoLayoutUtils')
@@ -109,10 +110,12 @@ export async function applyAutoLayoutAndUpdateStore(
return { success: false, error: errorMessage }
}
// Update workflow store immediately with new positions
const layoutedBlocks = result.data?.layoutedBlocks || blocks
const mergedBlocks = mergeSubblockState(layoutedBlocks, workflowId)
const newWorkflowState = {
...workflowStore.getWorkflowState(),
blocks: result.data?.layoutedBlocks || blocks,
blocks: mergedBlocks,
lastSaved: Date.now(),
}
@@ -167,9 +170,10 @@ export async function applyAutoLayoutAndUpdateStore(
})
// Revert the store changes since database save failed
const revertBlocks = mergeSubblockState(blocks, workflowId)
useWorkflowStore.getState().replaceWorkflowState({
...workflowStore.getWorkflowState(),
blocks,
blocks: revertBlocks,
lastSaved: workflowStore.lastSaved,
})

View File

@@ -17,6 +17,7 @@ import {
ModalFooter,
ModalHeader,
Plus,
Skeleton,
UserPlus,
} from '@/components/emcn'
import { getDisplayPlanName, isFree } from '@/lib/billing/plan-helpers'
@@ -356,14 +357,16 @@ export function WorkspaceHeader({
}
}}
>
<div
className='flex h-[20px] w-[20px] flex-shrink-0 items-center justify-center rounded-sm font-medium text-caption text-white leading-none'
style={{
backgroundColor: activeWorkspaceFull?.color || 'var(--brand-accent)',
}}
>
{workspaceInitial}
</div>
{activeWorkspaceFull ? (
<div
className='flex h-[20px] w-[20px] flex-shrink-0 items-center justify-center rounded-sm font-medium text-caption text-white leading-none'
style={{ backgroundColor: activeWorkspaceFull.color ?? 'var(--brand-accent)' }}
>
{workspaceInitial}
</div>
) : (
<Skeleton className='h-[20px] w-[20px] flex-shrink-0 rounded-sm' />
)}
{!isCollapsed && (
<>
<span className='min-w-0 flex-1 truncate text-left font-base text-[var(--text-primary)] text-sm'>
@@ -400,14 +403,18 @@ export function WorkspaceHeader({
) : (
<>
<div className='flex items-center gap-2 px-0.5 py-0.5'>
<div
className='flex h-[32px] w-[32px] flex-shrink-0 items-center justify-center rounded-md font-medium text-caption text-white'
style={{
backgroundColor: activeWorkspaceFull?.color || 'var(--brand-accent)',
}}
>
{workspaceInitial}
</div>
{activeWorkspaceFull ? (
<div
className='flex h-[32px] w-[32px] flex-shrink-0 items-center justify-center rounded-md font-medium text-caption text-white'
style={{
backgroundColor: activeWorkspaceFull.color ?? 'var(--brand-accent)',
}}
>
{workspaceInitial}
</div>
) : (
<Skeleton className='h-[32px] w-[32px] flex-shrink-0 rounded-md' />
)}
<div className='flex min-w-0 flex-1 flex-col'>
<span className='truncate font-medium text-[var(--text-primary)] text-small'>
{activeWorkspace?.name || 'Loading...'}
@@ -580,12 +587,16 @@ export function WorkspaceHeader({
title={activeWorkspace?.name || 'Loading...'}
disabled
>
<div
className='flex h-[20px] w-[20px] flex-shrink-0 items-center justify-center rounded-sm font-medium text-caption text-white leading-none'
style={{ backgroundColor: activeWorkspaceFull?.color || 'var(--brand-accent)' }}
>
{workspaceInitial}
</div>
{activeWorkspaceFull ? (
<div
className='flex h-[20px] w-[20px] flex-shrink-0 items-center justify-center rounded-sm font-medium text-caption text-white leading-none'
style={{ backgroundColor: activeWorkspaceFull.color ?? 'var(--brand-accent)' }}
>
{workspaceInitial}
</div>
) : (
<Skeleton className='h-[20px] w-[20px] flex-shrink-0 rounded-sm' />
)}
{!isCollapsed && (
<>
<span className='min-w-0 flex-1 truncate text-left font-base text-[var(--text-primary)] text-sm'>

View File

@@ -0,0 +1,77 @@
import { createLogger } from '@sim/logger'
import { task } from '@trigger.dev/sdk'
import { PauseResumeManager } from '@/lib/workflows/executor/human-in-the-loop-manager'
const logger = createLogger('TriggerResumeExecution')
export type ResumeExecutionPayload = {
resumeEntryId: string
resumeExecutionId: string
pausedExecutionId: string
contextId: string
resumeInput: unknown
userId: string
workflowId: string
parentExecutionId: string
}
export async function executeResumeJob(payload: ResumeExecutionPayload) {
const { resumeExecutionId, pausedExecutionId, contextId, workflowId, parentExecutionId } = payload
logger.info('Starting background resume execution', {
resumeExecutionId,
pausedExecutionId,
contextId,
workflowId,
parentExecutionId,
})
try {
const pausedExecution = await PauseResumeManager.getPausedExecutionById(pausedExecutionId)
if (!pausedExecution) {
throw new Error(`Paused execution not found: ${pausedExecutionId}`)
}
const result = await PauseResumeManager.startResumeExecution({
resumeEntryId: payload.resumeEntryId,
resumeExecutionId: payload.resumeExecutionId,
pausedExecution,
contextId: payload.contextId,
resumeInput: payload.resumeInput,
userId: payload.userId,
})
logger.info('Background resume execution completed', {
resumeExecutionId,
workflowId,
success: result.success,
status: result.status,
})
return {
success: result.success,
workflowId,
executionId: resumeExecutionId,
parentExecutionId,
status: result.status,
output: result.output,
executedAt: new Date().toISOString(),
}
} catch (error) {
logger.error('Background resume execution failed', {
resumeExecutionId,
workflowId,
error: error instanceof Error ? error.message : String(error),
})
throw error
}
}
export const resumeExecutionTask = task({
id: 'resume-execution',
machine: 'medium-1x',
retry: {
maxAttempts: 1,
},
run: executeResumeJob,
})

View File

@@ -44,6 +44,7 @@ export type WorkflowExecutionPayload = {
correlation?: AsyncExecutionCorrelation
metadata?: Record<string, any>
callChain?: string[]
executionMode?: 'sync' | 'stream' | 'async'
}
/**
@@ -112,6 +113,7 @@ export async function executeWorkflowJob(payload: WorkflowExecutionPayload) {
isClientSession: false,
callChain: payload.callChain,
correlation,
executionMode: payload.executionMode ?? 'async',
}
const snapshot = new ExecutionSnapshot(

View File

@@ -117,6 +117,7 @@ export const CloudFormationBlock: BlockConfig<
type: 'short-input',
placeholder: '50',
condition: { field: 'operation', value: 'describe_stack_events' },
mode: 'advanced',
},
],
tools: {

View File

@@ -8,6 +8,7 @@ import type {
CloudWatchGetLogEventsResponse,
CloudWatchGetMetricStatisticsResponse,
CloudWatchListMetricsResponse,
CloudWatchPutMetricDataResponse,
CloudWatchQueryLogsResponse,
} from '@/tools/cloudwatch/types'
@@ -19,6 +20,7 @@ export const CloudWatchBlock: BlockConfig<
| CloudWatchDescribeAlarmsResponse
| CloudWatchListMetricsResponse
| CloudWatchGetMetricStatisticsResponse
| CloudWatchPutMetricDataResponse
> = {
type: 'cloudwatch',
name: 'CloudWatch',
@@ -27,6 +29,7 @@ export const CloudWatchBlock: BlockConfig<
'Integrate AWS CloudWatch into workflows. Run Log Insights queries, list log groups, retrieve log events, list and get metrics, and monitor alarms. Requires AWS access key and secret access key.',
category: 'tools',
integrationType: IntegrationType.Analytics,
docsLink: 'https://docs.sim.ai/tools/cloudwatch',
tags: ['cloud', 'monitoring'],
bgColor: 'linear-gradient(45deg, #B0084D 0%, #FF4F8B 100%)',
icon: CloudWatchIcon,
@@ -42,6 +45,7 @@ export const CloudWatchBlock: BlockConfig<
{ label: 'Describe Log Streams', id: 'describe_log_streams' },
{ label: 'List Metrics', id: 'list_metrics' },
{ label: 'Get Metric Statistics', id: 'get_metric_statistics' },
{ label: 'Publish Metric', id: 'put_metric_data' },
{ label: 'Describe Alarms', id: 'describe_alarms' },
],
value: () => 'query_logs',
@@ -69,7 +73,6 @@ export const CloudWatchBlock: BlockConfig<
password: true,
required: true,
},
// Query Logs fields
{
id: 'logGroupSelector',
title: 'Log Group',
@@ -124,6 +127,14 @@ Return ONLY the query — no explanations, no markdown code blocks.`,
value: ['query_logs', 'get_log_events', 'get_metric_statistics'],
},
required: { field: 'operation', value: ['query_logs', 'get_metric_statistics'] },
wandConfig: {
enabled: true,
prompt: `Generate a Unix epoch timestamp (in seconds) based on the user's description of a point in time.
Return ONLY the numeric timestamp - no explanations, no quotes, no extra text.`,
placeholder: 'Describe the start time (e.g., "1 hour ago", "beginning of today")...',
generationType: 'timestamp',
},
},
{
id: 'endTime',
@@ -135,8 +146,15 @@ Return ONLY the query — no explanations, no markdown code blocks.`,
value: ['query_logs', 'get_log_events', 'get_metric_statistics'],
},
required: { field: 'operation', value: ['query_logs', 'get_metric_statistics'] },
wandConfig: {
enabled: true,
prompt: `Generate a Unix epoch timestamp (in seconds) based on the user's description of a point in time.
Return ONLY the numeric timestamp - no explanations, no quotes, no extra text.`,
placeholder: 'Describe the end time (e.g., "now", "end of yesterday")...',
generationType: 'timestamp',
},
},
// Describe Log Groups fields
{
id: 'prefix',
title: 'Log Group Name Prefix',
@@ -144,7 +162,6 @@ Return ONLY the query — no explanations, no markdown code blocks.`,
placeholder: '/aws/lambda/',
condition: { field: 'operation', value: 'describe_log_groups' },
},
// Get Log Events / Describe Log Streams — shared log group selector
{
id: 'logGroupNameSelector',
title: 'Log Group',
@@ -167,7 +184,6 @@ Return ONLY the query — no explanations, no markdown code blocks.`,
required: { field: 'operation', value: ['get_log_events', 'describe_log_streams'] },
mode: 'advanced',
},
// Describe Log Streams — stream prefix filter
{
id: 'streamPrefix',
title: 'Stream Name Prefix',
@@ -175,7 +191,6 @@ Return ONLY the query — no explanations, no markdown code blocks.`,
placeholder: '2024/03/31/',
condition: { field: 'operation', value: 'describe_log_streams' },
},
// Get Log Events — log stream selector (cascading: depends on log group)
{
id: 'logStreamNameSelector',
title: 'Log Stream',
@@ -198,30 +213,92 @@ Return ONLY the query — no explanations, no markdown code blocks.`,
required: { field: 'operation', value: 'get_log_events' },
mode: 'advanced',
},
// List Metrics fields
{
id: 'metricNamespace',
title: 'Namespace',
type: 'short-input',
placeholder: 'e.g., AWS/EC2, AWS/Lambda, AWS/RDS',
condition: { field: 'operation', value: ['list_metrics', 'get_metric_statistics'] },
required: { field: 'operation', value: 'get_metric_statistics' },
placeholder: 'e.g., AWS/EC2, AWS/Lambda, Custom/MyApp',
condition: {
field: 'operation',
value: ['list_metrics', 'get_metric_statistics', 'put_metric_data'],
},
required: {
field: 'operation',
value: ['get_metric_statistics', 'put_metric_data'],
},
},
{
id: 'metricName',
title: 'Metric Name',
type: 'short-input',
placeholder: 'e.g., CPUUtilization, Invocations',
condition: { field: 'operation', value: ['list_metrics', 'get_metric_statistics'] },
required: { field: 'operation', value: 'get_metric_statistics' },
placeholder: 'e.g., CPUUtilization, Invocations, ErrorCount',
condition: {
field: 'operation',
value: ['list_metrics', 'get_metric_statistics', 'put_metric_data'],
},
required: {
field: 'operation',
value: ['get_metric_statistics', 'put_metric_data'],
},
},
{
id: 'recentlyActive',
title: 'Recently Active Only',
type: 'switch',
condition: { field: 'operation', value: 'list_metrics' },
mode: 'advanced',
},
{
id: 'metricValue',
title: 'Value',
type: 'short-input',
placeholder: 'e.g., 1, 42.5',
condition: { field: 'operation', value: 'put_metric_data' },
required: { field: 'operation', value: 'put_metric_data' },
},
{
id: 'metricUnit',
title: 'Unit',
type: 'dropdown',
options: [
{ label: 'None', id: 'None' },
{ label: 'Count', id: 'Count' },
{ label: 'Percent', id: 'Percent' },
{ label: 'Seconds', id: 'Seconds' },
{ label: 'Milliseconds', id: 'Milliseconds' },
{ label: 'Microseconds', id: 'Microseconds' },
{ label: 'Bytes', id: 'Bytes' },
{ label: 'Kilobytes', id: 'Kilobytes' },
{ label: 'Megabytes', id: 'Megabytes' },
{ label: 'Gigabytes', id: 'Gigabytes' },
{ label: 'Terabytes', id: 'Terabytes' },
{ label: 'Bits', id: 'Bits' },
{ label: 'Kilobits', id: 'Kilobits' },
{ label: 'Megabits', id: 'Megabits' },
{ label: 'Gigabits', id: 'Gigabits' },
{ label: 'Terabits', id: 'Terabits' },
{ label: 'Bytes/Second', id: 'Bytes/Second' },
{ label: 'Kilobytes/Second', id: 'Kilobytes/Second' },
{ label: 'Megabytes/Second', id: 'Megabytes/Second' },
{ label: 'Gigabytes/Second', id: 'Gigabytes/Second' },
{ label: 'Terabytes/Second', id: 'Terabytes/Second' },
{ label: 'Bits/Second', id: 'Bits/Second' },
{ label: 'Kilobits/Second', id: 'Kilobits/Second' },
{ label: 'Megabits/Second', id: 'Megabits/Second' },
{ label: 'Gigabits/Second', id: 'Gigabits/Second' },
{ label: 'Terabits/Second', id: 'Terabits/Second' },
{ label: 'Count/Second', id: 'Count/Second' },
],
value: () => 'None',
condition: { field: 'operation', value: 'put_metric_data' },
},
{
id: 'publishDimensions',
title: 'Dimensions',
type: 'table',
columns: ['name', 'value'],
condition: { field: 'operation', value: 'put_metric_data' },
},
// Get Metric Statistics fields
{
id: 'metricPeriod',
title: 'Period (seconds)',
@@ -251,7 +328,6 @@ Return ONLY the query — no explanations, no markdown code blocks.`,
columns: ['name', 'value'],
condition: { field: 'operation', value: 'get_metric_statistics' },
},
// Describe Alarms fields
{
id: 'alarmNamePrefix',
title: 'Alarm Name Prefix',
@@ -269,6 +345,7 @@ Return ONLY the query — no explanations, no markdown code blocks.`,
{ label: 'ALARM', id: 'ALARM' },
{ label: 'INSUFFICIENT_DATA', id: 'INSUFFICIENT_DATA' },
],
value: () => '',
condition: { field: 'operation', value: 'describe_alarms' },
},
{
@@ -280,9 +357,9 @@ Return ONLY the query — no explanations, no markdown code blocks.`,
{ label: 'Metric Alarm', id: 'MetricAlarm' },
{ label: 'Composite Alarm', id: 'CompositeAlarm' },
],
value: () => '',
condition: { field: 'operation', value: 'describe_alarms' },
},
// Shared limit field
{
id: 'limit',
title: 'Limit',
@@ -299,6 +376,7 @@ Return ONLY the query — no explanations, no markdown code blocks.`,
'describe_alarms',
],
},
mode: 'advanced',
},
],
tools: {
@@ -309,6 +387,7 @@ Return ONLY the query — no explanations, no markdown code blocks.`,
'cloudwatch_describe_log_streams',
'cloudwatch_list_metrics',
'cloudwatch_get_metric_statistics',
'cloudwatch_put_metric_data',
'cloudwatch_describe_alarms',
],
config: {
@@ -326,6 +405,8 @@ Return ONLY the query — no explanations, no markdown code blocks.`,
return 'cloudwatch_list_metrics'
case 'get_metric_statistics':
return 'cloudwatch_get_metric_statistics'
case 'put_metric_data':
return 'cloudwatch_put_metric_data'
case 'describe_alarms':
return 'cloudwatch_describe_alarms'
default:
@@ -479,6 +560,48 @@ Return ONLY the query — no explanations, no markdown code blocks.`,
}
}
case 'put_metric_data': {
if (!rest.metricNamespace) {
throw new Error('Namespace is required')
}
if (!rest.metricName) {
throw new Error('Metric name is required')
}
if (rest.metricValue === undefined || rest.metricValue === '') {
throw new Error('Metric value is required')
}
const numericValue = Number(rest.metricValue)
if (!Number.isFinite(numericValue)) {
throw new Error('Metric value must be a finite number')
}
return {
awsRegion,
awsAccessKeyId,
awsSecretAccessKey,
namespace: rest.metricNamespace,
metricName: rest.metricName,
value: numericValue,
...(rest.metricUnit && rest.metricUnit !== 'None' && { unit: rest.metricUnit }),
...(rest.publishDimensions && {
dimensions: (() => {
const dims = rest.publishDimensions
if (typeof dims === 'string') return dims
if (Array.isArray(dims)) {
const obj: Record<string, string> = {}
for (const row of dims) {
const name = row.cells?.name
const value = row.cells?.value
if (name && value !== undefined) obj[name] = String(value)
}
return JSON.stringify(obj)
}
return JSON.stringify(dims)
})(),
}),
}
}
case 'describe_alarms':
return {
awsRegion,
@@ -518,6 +641,12 @@ Return ONLY the query — no explanations, no markdown code blocks.`,
metricPeriod: { type: 'number', description: 'Granularity in seconds' },
metricStatistics: { type: 'string', description: 'Statistic type (Average, Sum, etc.)' },
metricDimensions: { type: 'json', description: 'Metric dimensions (Name/Value pairs)' },
metricValue: { type: 'number', description: 'Metric value to publish' },
metricUnit: { type: 'string', description: 'Metric unit (Count, Seconds, Bytes, etc.)' },
publishDimensions: {
type: 'json',
description: 'Dimensions for published metric (Name/Value pairs)',
},
alarmNamePrefix: { type: 'string', description: 'Alarm name prefix filter' },
stateValue: {
type: 'string',
@@ -567,5 +696,29 @@ Return ONLY the query — no explanations, no markdown code blocks.`,
type: 'array',
description: 'CloudWatch alarms with state and configuration',
},
success: {
type: 'boolean',
description: 'Whether the published metric was successful',
},
namespace: {
type: 'string',
description: 'Metric namespace',
},
metricName: {
type: 'string',
description: 'Metric name',
},
value: {
type: 'number',
description: 'Published metric value',
},
unit: {
type: 'string',
description: 'Metric unit',
},
timestamp: {
type: 'string',
description: 'Timestamp when metric was published',
},
},
}

View File

@@ -198,7 +198,6 @@ export const JiraServiceManagementBlock: BlockConfig<JsmResponse> = {
id: 'summary',
title: 'Summary',
type: 'short-input',
required: true,
placeholder: 'Enter request summary',
condition: { field: 'operation', value: 'create_request' },
wandConfig: {
@@ -238,6 +237,7 @@ Return ONLY the description text - no explanations.`,
title: 'Raise on Behalf Of',
type: 'short-input',
placeholder: 'Account ID to raise request on behalf of',
mode: 'advanced',
condition: { field: 'operation', value: 'create_request' },
},
{
@@ -245,6 +245,7 @@ Return ONLY the description text - no explanations.`,
title: 'Request Participants',
type: 'short-input',
placeholder: 'Comma-separated account IDs to add as participants',
mode: 'advanced',
condition: { field: 'operation', value: 'create_request' },
},
{
@@ -252,6 +253,7 @@ Return ONLY the description text - no explanations.`,
title: 'Channel',
type: 'short-input',
placeholder: 'Channel (e.g., portal, email)',
mode: 'advanced',
condition: { field: 'operation', value: 'create_request' },
},
{
@@ -260,6 +262,16 @@ Return ONLY the description text - no explanations.`,
type: 'long-input',
placeholder:
'JSON object of field values (e.g., {"summary": "Title", "customfield_10010": "value"})',
mode: 'advanced',
condition: { field: 'operation', value: 'create_request' },
},
{
id: 'formAnswers',
title: 'Form Answers',
type: 'long-input',
placeholder:
'JSON object for form-based request types (e.g., {"summary": {"text": "Title"}, "customfield_10010": {"choices": ["10320"]}})',
mode: 'advanced',
condition: { field: 'operation', value: 'create_request' },
},
{
@@ -571,8 +583,8 @@ Return ONLY the comment text - no explanations.`,
if (!params.requestTypeId) {
throw new Error('Request Type ID is required')
}
if (!params.summary) {
throw new Error('Summary is required')
if (!params.summary && !params.formAnswers) {
throw new Error('Summary is required (unless using Form Answers)')
}
return {
...baseParams,
@@ -584,7 +596,22 @@ Return ONLY the comment text - no explanations.`,
requestParticipants: params.requestParticipants,
channel: params.channel,
requestFieldValues: params.requestFieldValues
? JSON.parse(params.requestFieldValues)
? (() => {
try {
return JSON.parse(params.requestFieldValues)
} catch {
throw new Error('requestFieldValues must be valid JSON')
}
})()
: undefined,
formAnswers: params.formAnswers
? (() => {
try {
return JSON.parse(params.formAnswers)
} catch {
throw new Error('formAnswers must be valid JSON')
}
})()
: undefined,
}
case 'get_request':
@@ -826,6 +853,10 @@ Return ONLY the comment text - no explanations.`,
},
channel: { type: 'string', description: 'Channel (e.g., portal, email)' },
requestFieldValues: { type: 'string', description: 'JSON object of request field values' },
formAnswers: {
type: 'string',
description: 'JSON object of form answers for form-based request types',
},
searchQuery: { type: 'string', description: 'Filter request types by name' },
groupId: { type: 'string', description: 'Filter by request type group ID' },
expand: { type: 'string', description: 'Comma-separated fields to expand' },

View File

@@ -42,20 +42,20 @@ const Trigger = TooltipPrimitive.Trigger
const Content = React.forwardRef<
React.ElementRef<typeof TooltipPrimitive.Content>,
React.ComponentPropsWithoutRef<typeof TooltipPrimitive.Content>
>(({ className, sideOffset = 6, ...props }, ref) => (
>(({ className, sideOffset = 6, children, ...props }, ref) => (
<TooltipPrimitive.Portal>
<TooltipPrimitive.Content
ref={ref}
sideOffset={sideOffset}
collisionPadding={8}
avoidCollisions={true}
avoidCollisions
className={cn(
'z-[var(--z-tooltip)] max-w-[260px] rounded-[4px] bg-[var(--tooltip-bg)] px-2 py-[3.5px] font-base text-white text-xs shadow-sm dark:text-black',
className
)}
{...props}
>
{props.children}
{children}
<TooltipPrimitive.Arrow className='fill-[var(--tooltip-bg)]' />
</TooltipPrimitive.Content>
</TooltipPrimitive.Portal>
@@ -120,22 +120,35 @@ const VIDEO_EXTENSIONS = ['.mp4', '.webm', '.ogg', '.mov'] as const
const Preview = ({ src, alt = '', width = 240, height, loop = true, className }: PreviewProps) => {
const pathname = src.toLowerCase().split('?')[0].split('#')[0]
const isVideo = VIDEO_EXTENSIONS.some((ext) => pathname.endsWith(ext))
const [isReady, setIsReady] = React.useState(!isVideo)
return (
<div className={cn('-mx-2 -mb-[3.5px] mt-1 overflow-hidden rounded-b-[4px]', className)}>
<div className={cn('-mx-[6px] -mb-[1.5px] mt-1.5 overflow-hidden rounded-[4px]', className)}>
{isVideo ? (
<video
src={src}
width={width}
height={height}
className='block w-full'
autoPlay
loop={loop}
muted
playsInline
preload='none'
aria-label={alt}
/>
<div className='relative'>
{!isReady && (
<div
className='animate-pulse bg-white/5'
style={{ aspectRatio: height ? `${width}/${height}` : '16/9' }}
/>
)}
<video
src={src}
width={width}
height={height}
className={cn(
'block w-full transition-opacity duration-200',
isReady ? 'opacity-100' : 'absolute inset-0 opacity-0'
)}
autoPlay
loop={loop}
muted
playsInline
preload='auto'
aria-label={alt}
onCanPlay={() => setIsReady(true)}
/>
</div>
) : (
<img
src={src}

View File

@@ -1,37 +1,12 @@
'use client'
import { createContext, useContext, useEffect, useMemo, useState } from 'react'
import type { BrandConfig } from '@/lib/branding/types'
import { createContext, useContext, useMemo } from 'react'
import type { BrandConfig, OrganizationWhitelabelSettings } from '@/lib/branding/types'
import { getBrandConfig } from '@/ee/whitelabeling/branding'
import { useWhitelabelSettings } from '@/ee/whitelabeling/hooks/whitelabel'
import { generateOrgThemeCSS, mergeOrgBrandConfig } from '@/ee/whitelabeling/org-branding-utils'
import { useOrganizations } from '@/hooks/queries/organization'
export const BRAND_COOKIE_NAME = 'sim-wl'
const BRAND_COOKIE_MAX_AGE = 30 * 24 * 60 * 60
/**
* Brand assets and theme CSS cached in a cookie between page loads.
* Written client-side after org settings resolve; read server-side in the
* workspace layout so the correct branding is baked into the initial HTML.
*/
export interface BrandCache {
logoUrl?: string
wordmarkUrl?: string
/** Pre-generated `:root { ... }` CSS from the last resolved org settings. */
themeCSS?: string
}
function writeBrandCookie(cache: BrandCache | null): void {
try {
if (cache && Object.keys(cache).length > 0) {
document.cookie = `${BRAND_COOKIE_NAME}=${encodeURIComponent(JSON.stringify(cache))}; path=/; max-age=${BRAND_COOKIE_MAX_AGE}; SameSite=Lax`
} else {
document.cookie = `${BRAND_COOKIE_NAME}=; path=/; max-age=0; SameSite=Lax`
}
} catch {}
}
interface BrandingContextValue {
config: BrandConfig
}
@@ -43,69 +18,34 @@ const BrandingContext = createContext<BrandingContextValue>({
interface BrandingProviderProps {
children: React.ReactNode
/**
* Brand cache read server-side from the `sim-wl` cookie by the workspace
* layout. When present, the server renders the correct org branding from the
* first byte — no flash of any kind on page load or hard refresh.
* Org whitelabel settings fetched server-side from the DB by the workspace layout.
* Used as the source of truth until the React Query result becomes available,
* ensuring the correct org logo appears in the initial server HTML — no flash.
*/
initialCache?: BrandCache | null
initialOrgSettings?: OrganizationWhitelabelSettings | null
}
/**
* Provides merged branding (instance env vars + org DB settings) to the workspace.
* Injects a `<style>` tag with CSS variable overrides when org colors are configured.
*
* Flow:
* - First visit: org logo loads after the API call resolves (one-time flash).
* - All subsequent visits: the workspace layout reads the `sim-wl` cookie
* server-side and passes it as `initialCache`. The server renders the correct
* brand in the initial HTML — no flash of any kind.
*/
export function BrandingProvider({ children, initialCache }: BrandingProviderProps) {
const [cache, setCache] = useState<BrandCache | null>(initialCache ?? null)
const { data: orgsData, isLoading: orgsLoading } = useOrganizations()
export function BrandingProvider({ children, initialOrgSettings }: BrandingProviderProps) {
const { data: orgsData } = useOrganizations()
const orgId = orgsData?.activeOrganization?.id
const { data: orgSettings, isLoading: settingsLoading } = useWhitelabelSettings(orgId)
const { data: orgSettings } = useWhitelabelSettings(orgId)
useEffect(() => {
if (orgsLoading) return
const effectiveOrgSettings =
orgSettings !== undefined ? orgSettings : (initialOrgSettings ?? null)
if (!orgId) {
writeBrandCookie(null)
setCache(null)
return
}
const brandConfig = useMemo(
() => mergeOrgBrandConfig(effectiveOrgSettings, getBrandConfig()),
[effectiveOrgSettings]
)
if (settingsLoading) return
const themeCSS = orgSettings ? generateOrgThemeCSS(orgSettings) : null
const next: BrandCache = {}
if (orgSettings?.logoUrl) next.logoUrl = orgSettings.logoUrl
if (orgSettings?.wordmarkUrl) next.wordmarkUrl = orgSettings.wordmarkUrl
if (themeCSS) next.themeCSS = themeCSS
const newCache = Object.keys(next).length > 0 ? next : null
writeBrandCookie(newCache)
setCache(newCache)
}, [orgsLoading, orgId, settingsLoading, orgSettings])
const brandConfig = useMemo(() => {
const base = mergeOrgBrandConfig(orgSettings ?? null, getBrandConfig())
if (!orgSettings && cache) {
return {
...base,
...(cache.logoUrl && { logoUrl: cache.logoUrl }),
...(cache.wordmarkUrl && { wordmarkUrl: cache.wordmarkUrl }),
}
}
return base
}, [orgSettings, cache])
const themeCSS = useMemo(() => {
if (orgSettings) return generateOrgThemeCSS(orgSettings)
if (cache?.themeCSS) return cache.themeCSS
return ''
}, [orgSettings, cache])
const themeCSS = useMemo(
() => (effectiveOrgSettings ? generateOrgThemeCSS(effectiveOrgSettings) : ''),
[effectiveOrgSettings]
)
return (
<BrandingContext.Provider value={{ config: brandConfig }}>

View File

@@ -298,7 +298,7 @@ export function WhitelabelingSettings() {
<div className='grid grid-cols-2 gap-4'>
<SettingRow
label='Logo'
description='Shown in the collapsed sidebar. Square image recommended (PNG or JPEG, max 5MB).'
description='Shown in the collapsed sidebar. Square image recommended (PNG, JPEG, or SVG, max 5MB).'
>
<DropZone onDrop={logoUpload.handleFileDrop} className='flex items-center gap-4'>
<button
@@ -345,7 +345,7 @@ export function WhitelabelingSettings() {
<input
ref={logoUpload.fileInputRef}
type='file'
accept='image/png,image/jpeg,image/jpg'
accept='image/png,image/jpeg,image/jpg,image/svg+xml'
onChange={logoUpload.handleFileChange}
className='hidden'
/>
@@ -354,7 +354,7 @@ export function WhitelabelingSettings() {
<SettingRow
label='Wordmark'
description='Shown in the expanded sidebar. Wide image recommended (PNG or JPEG, max 5MB).'
description='Shown in the expanded sidebar. Wide image recommended (PNG, JPEG, or SVG, max 5MB).'
>
<DropZone onDrop={wordmarkUpload.handleFileDrop} className='flex items-center gap-4'>
<button
@@ -401,7 +401,7 @@ export function WhitelabelingSettings() {
<input
ref={wordmarkUpload.fileInputRef}
type='file'
accept='image/png,image/jpeg,image/jpg'
accept='image/png,image/jpeg,image/jpg,image/svg+xml'
onChange={wordmarkUpload.handleFileChange}
className='hidden'
/>

View File

@@ -12,13 +12,6 @@ export function mergeOrgBrandConfig(
return instanceConfig
}
const hasOrgBrand = Boolean(
orgSettings.brandName ||
orgSettings.logoUrl ||
orgSettings.wordmarkUrl ||
orgSettings.primaryColor
)
return {
...instanceConfig,
name: orgSettings.brandName || instanceConfig.name,
@@ -35,7 +28,14 @@ export function mergeOrgBrandConfig(
...(orgSettings.accentColor && { accentColor: orgSettings.accentColor }),
...(orgSettings.accentHoverColor && { accentHoverColor: orgSettings.accentHoverColor }),
},
isWhitelabeled: instanceConfig.isWhitelabeled || hasOrgBrand,
isWhitelabeled:
instanceConfig.isWhitelabeled ||
Boolean(
orgSettings.brandName ||
orgSettings.logoUrl ||
orgSettings.wordmarkUrl ||
orgSettings.primaryColor
),
}
}

View File

@@ -2,9 +2,7 @@ import { db } from '@sim/db'
import { organization } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import type { BrandConfig, OrganizationWhitelabelSettings } from '@/lib/branding/types'
import { getBrandConfig } from '@/ee/whitelabeling/branding'
import { mergeOrgBrandConfig } from '@/ee/whitelabeling/org-branding-utils'
import type { OrganizationWhitelabelSettings } from '@/lib/branding/types'
const logger = createLogger('OrgBranding')
@@ -27,11 +25,3 @@ export async function getOrgWhitelabelSettings(
return null
}
}
/**
* Get the merged brand config for an org, combining instance env vars with org DB settings.
*/
export async function getOrgBrandConfig(orgId: string): Promise<BrandConfig> {
const orgSettings = await getOrgWhitelabelSettings(orgId)
return mergeOrgBrandConfig(orgSettings, getBrandConfig())
}

View File

@@ -112,6 +112,7 @@ export function serializePauseSnapshot(
useDraftState,
startTime: metadataFromContext?.startTime ?? new Date().toISOString(),
isClientSession: metadataFromContext?.isClientSession,
executionMode: metadataFromContext?.executionMode,
}
const snapshot = new ExecutionSnapshot(

View File

@@ -36,6 +36,7 @@ export interface ExecutionMetadata {
}
callChain?: string[]
correlation?: AsyncExecutionCorrelation
executionMode?: 'sync' | 'stream' | 'async'
}
export interface SerializableExecutionState {

View File

@@ -18,6 +18,7 @@ export type PermissionState = 'prompt' | 'granted' | 'denied'
interface UseSpeechToTextProps {
onTranscript: (text: string) => void
onUsageLimitExceeded?: () => void
language?: string
}
@@ -31,6 +32,7 @@ interface UseSpeechToTextReturn {
export function useSpeechToText({
onTranscript,
onUsageLimitExceeded,
language,
}: UseSpeechToTextProps): UseSpeechToTextReturn {
const [isListening, setIsListening] = useState(false)
@@ -38,6 +40,7 @@ export function useSpeechToText({
const [permissionState, setPermissionState] = useState<PermissionState>('prompt')
const onTranscriptRef = useRef(onTranscript)
const onUsageLimitExceededRef = useRef(onUsageLimitExceeded)
const languageRef = useRef(language)
const mountedRef = useRef(true)
const startingRef = useRef(false)
@@ -55,6 +58,7 @@ export function useSpeechToText({
const committedTextRef = useRef('')
onTranscriptRef.current = onTranscript
onUsageLimitExceededRef.current = onUsageLimitExceeded
languageRef.current = language
useEffect(() => {
@@ -165,6 +169,10 @@ export function useSpeechToText({
})
if (!tokenResponse.ok) {
if (tokenResponse.status === 402) {
onUsageLimitExceededRef.current?.()
return false
}
const body = await tokenResponse.json().catch(() => ({}))
throw new Error(body.error || 'Failed to get speech token')
}

View File

@@ -1,13 +1,11 @@
import { db } from '@sim/db'
import { member, subscription, user, userStats } from '@sim/db/schema'
import { member, subscription, user } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq, inArray, sql } from 'drizzle-orm'
import { getEffectiveBillingStatus, isOrganizationBillingBlocked } from '@/lib/billing/core/access'
import { getHighestPrioritySubscription } from '@/lib/billing/core/plan'
import { getUserUsageLimit } from '@/lib/billing/core/usage'
import {
getPlanTierCredits,
isOrgPlan,
isPro as isPlanPro,
isTeam as isPlanTeam,
} from '@/lib/billing/plan-helpers'
@@ -16,12 +14,9 @@ import {
checkProPlan,
checkTeamPlan,
ENTITLED_SUBSCRIPTION_STATUSES,
getFreeTierLimit,
getPerUserMinimumLimit,
hasUsableSubscriptionAccess,
USABLE_SUBSCRIPTION_STATUSES,
} from '@/lib/billing/subscriptions/utils'
import type { UserSubscriptionState } from '@/lib/billing/types'
import {
isAccessControlEnabled,
isBillingEnabled,
@@ -485,145 +480,6 @@ export async function hasLiveSyncAccess(userId: string): Promise<boolean> {
}
}
/**
* Check if user has exceeded their cost limit based on current period usage
*/
export async function hasExceededCostLimit(userId: string): Promise<boolean> {
try {
if (!isBillingEnabled) {
return false
}
const subscription = await getHighestPrioritySubscription(userId)
let limit = getFreeTierLimit() // Default free tier limit
if (subscription) {
// Team/Enterprise: Use organization limit
if (isOrgPlan(subscription.plan)) {
limit = await getUserUsageLimit(userId)
logger.info('Using organization limit', {
userId,
plan: subscription.plan,
limit,
})
} else {
// Pro/Free: Use individual limit
limit = getPerUserMinimumLimit(subscription)
logger.info('Using subscription-based limit', {
userId,
plan: subscription.plan,
limit,
})
}
} else {
logger.info('Using free tier limit', { userId, limit })
}
// Get user stats to check current period usage
const statsRecords = await db.select().from(userStats).where(eq(userStats.userId, userId))
if (statsRecords.length === 0) {
return false
}
// Use current period cost instead of total cost for accurate billing period tracking
const currentCost = Number.parseFloat(
statsRecords[0].currentPeriodCost?.toString() || statsRecords[0].totalCost.toString()
)
logger.info('Checking cost limit', { userId, currentCost, limit })
return currentCost >= limit
} catch (error) {
logger.error('Error checking cost limit', { error, userId })
return false // Be conservative in case of error
}
}
/**
* Check if sharing features are enabled for user
*/
// Removed unused feature flag helpers: isSharingEnabled, isMultiplayerEnabled, isWorkspaceCollaborationEnabled
/**
* Get comprehensive subscription state for a user
* Single function to get all subscription information
*/
export async function getUserSubscriptionState(userId: string): Promise<UserSubscriptionState> {
try {
// Get subscription and user stats in parallel to minimize DB calls
const [subscription, statsRecords] = await Promise.all([
getHighestPrioritySubscription(userId),
db.select().from(userStats).where(eq(userStats.userId, userId)).limit(1),
])
// Determine plan types based on subscription (avoid redundant DB calls)
const isPro =
!isBillingEnabled ||
!!(
subscription &&
(checkProPlan(subscription) ||
checkTeamPlan(subscription) ||
checkEnterprisePlan(subscription))
)
const isTeam =
!isBillingEnabled ||
!!(subscription && (checkTeamPlan(subscription) || checkEnterprisePlan(subscription)))
const isEnterprise = !isBillingEnabled || !!(subscription && checkEnterprisePlan(subscription))
const isFree = !isPro && !isTeam && !isEnterprise
// Determine plan name
let planName = 'free'
if (isEnterprise) planName = 'enterprise'
else if (isTeam) planName = 'team'
else if (isPro) planName = 'pro'
// Check cost limit using already-fetched user stats
let hasExceededLimit = false
if (isBillingEnabled && statsRecords.length > 0) {
let limit = getFreeTierLimit() // Default free tier limit
if (subscription) {
// Team/Enterprise: Use organization limit
if (isOrgPlan(subscription.plan)) {
limit = await getUserUsageLimit(userId)
} else {
// Pro/Free: Use individual limit
limit = getPerUserMinimumLimit(subscription)
}
}
const currentCost = Number.parseFloat(
statsRecords[0].currentPeriodCost?.toString() || statsRecords[0].totalCost.toString()
)
hasExceededLimit = currentCost >= limit
}
return {
isPro,
isTeam,
isEnterprise,
isFree,
highestPrioritySubscription: subscription,
hasExceededLimit,
planName,
}
} catch (error) {
logger.error('Error getting user subscription state', { error, userId })
// Return safe defaults in case of error
return {
isPro: false,
isTeam: false,
isEnterprise: false,
isFree: true,
highestPrioritySubscription: null,
hasExceededLimit: false,
planName: 'free',
}
}
}
/**
* Send welcome email for Pro and Team plan subscriptions
*/

View File

@@ -9,7 +9,6 @@ export * from '@/lib/billing/core/organization'
export * from '@/lib/billing/core/subscription'
export {
getHighestPrioritySubscription as getActiveSubscription,
getUserSubscriptionState as getSubscriptionState,
hasAccessControlAccess,
hasCredentialSetsAccess,
hasPaidSubscription,

View File

@@ -73,16 +73,6 @@ export interface BillingData {
daysRemaining: number
}
export interface UserSubscriptionState {
isPro: boolean
isTeam: boolean
isEnterprise: boolean
isFree: boolean
highestPrioritySubscription: any | null
hasExceededLimit: boolean
planName: string
}
export interface SubscriptionPlan {
name: string
priceId: string

View File

@@ -3,18 +3,18 @@
*/
import { beforeEach, describe, expect, it, vi } from 'vitest'
vi.mock('@sim/logger', () => {
const createMockLogger = (): Record<string, any> => ({
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
withMetadata: vi.fn(() => createMockLogger()),
})
return { createLogger: vi.fn(() => createMockLogger()) }
})
const { mockGetHighestPrioritySubscription } = vi.hoisted(() => ({
mockGetHighestPrioritySubscription: vi.fn(),
}))
vi.mock('@/lib/billing/core/subscription', () => ({
getUserSubscriptionState: vi.fn(),
getHighestPrioritySubscription: mockGetHighestPrioritySubscription,
}))
vi.mock('@/lib/billing/plan-helpers', () => ({
isPaid: vi.fn(
(plan: string | null) => plan === 'pro' || plan === 'team' || plan === 'enterprise'
),
}))
vi.mock('@/lib/copilot/chat-context', () => ({
@@ -57,48 +57,41 @@ vi.mock('@/tools/params', () => ({
createUserToolSchema: vi.fn(() => ({ type: 'object', properties: {} })),
}))
import { getUserSubscriptionState } from '@/lib/billing/core/subscription'
import { buildIntegrationToolSchemas } from '@/lib/copilot/chat-payload'
const mockedGetUserSubscriptionState = getUserSubscriptionState as unknown as {
mockResolvedValue: (value: unknown) => void
mockRejectedValue: (value: unknown) => void
mockClear: () => void
}
describe('buildIntegrationToolSchemas', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('appends the email footer prompt for free users', async () => {
mockedGetUserSubscriptionState.mockResolvedValue({ isFree: true })
mockGetHighestPrioritySubscription.mockResolvedValue(null)
const toolSchemas = await buildIntegrationToolSchemas('user-free')
const gmailTool = toolSchemas.find((tool) => tool.name === 'gmail_send')
expect(getUserSubscriptionState).toHaveBeenCalledWith('user-free')
expect(mockGetHighestPrioritySubscription).toHaveBeenCalledWith('user-free')
expect(gmailTool?.description).toContain('sent with sim ai')
})
it('does not append the email footer prompt for paid users', async () => {
mockedGetUserSubscriptionState.mockResolvedValue({ isFree: false })
mockGetHighestPrioritySubscription.mockResolvedValue({ plan: 'pro', status: 'active' })
const toolSchemas = await buildIntegrationToolSchemas('user-paid')
const gmailTool = toolSchemas.find((tool) => tool.name === 'gmail_send')
expect(getUserSubscriptionState).toHaveBeenCalledWith('user-paid')
expect(mockGetHighestPrioritySubscription).toHaveBeenCalledWith('user-paid')
expect(gmailTool?.description).toBe('Send emails using Gmail')
})
it('still builds integration tools when subscription lookup fails', async () => {
mockedGetUserSubscriptionState.mockRejectedValue(new Error('db unavailable'))
mockGetHighestPrioritySubscription.mockRejectedValue(new Error('db unavailable'))
const toolSchemas = await buildIntegrationToolSchemas('user-error')
const gmailTool = toolSchemas.find((tool) => tool.name === 'gmail_send')
const brandfetchTool = toolSchemas.find((tool) => tool.name === 'brandfetch_search')
expect(getUserSubscriptionState).toHaveBeenCalledWith('user-error')
expect(mockGetHighestPrioritySubscription).toHaveBeenCalledWith('user-error')
expect(gmailTool?.description).toBe('Send emails using Gmail')
expect(brandfetchTool?.description).toBe('Search for brands by company name')
})

View File

@@ -1,5 +1,6 @@
import { createLogger } from '@sim/logger'
import { getUserSubscriptionState } from '@/lib/billing/core/subscription'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import { isPaid } from '@/lib/billing/plan-helpers'
import { getCopilotToolDescription } from '@/lib/copilot/tool-descriptions'
import { isHosted } from '@/lib/core/config/feature-flags'
import { createMcpToolId } from '@/lib/mcp/utils'
@@ -57,10 +58,10 @@ export async function buildIntegrationToolSchemas(
let shouldAppendEmailTagline = false
try {
const subscriptionState = await getUserSubscriptionState(userId)
shouldAppendEmailTagline = subscriptionState.isFree
const subscription = await getHighestPrioritySubscription(userId)
shouldAppendEmailTagline = !subscription || !isPaid(subscription.plan)
} catch (error) {
reqLogger.warn('Failed to load subscription state for copilot tool descriptions', {
reqLogger.warn('Failed to load subscription for copilot tool descriptions', {
userId,
error: error instanceof Error ? error.message : String(error),
})

View File

@@ -95,6 +95,11 @@ export class BullMQJobQueue implements JobQueueBackend {
return toJob('schedule-execution', scheduleJob)
}
const resumeJob = await getBullMQQueue('resume-execution').getJob(jobId)
if (resumeJob) {
return toJob('resume-execution', resumeJob)
}
return null
}

View File

@@ -19,6 +19,7 @@ const JOB_TYPE_TO_TASK_ID: Record<JobType, string> = {
'workflow-execution': 'workflow-execution',
'schedule-execution': 'schedule-execution',
'webhook-execution': 'webhook-execution',
'resume-execution': 'resume-execution',
}
/**

View File

@@ -20,7 +20,11 @@ export const JOB_STATUS = {
export type JobStatus = (typeof JOB_STATUS)[keyof typeof JOB_STATUS]
export type JobType = 'workflow-execution' | 'schedule-execution' | 'webhook-execution'
export type JobType =
| 'workflow-execution'
| 'schedule-execution'
| 'webhook-execution'
| 'resume-execution'
export type AsyncExecutionCorrelationSource = 'workflow' | 'schedule' | 'webhook'

View File

@@ -16,6 +16,7 @@ export interface BullMQJobData<TPayload> {
let workflowQueueInstance: Queue | null = null
let webhookQueueInstance: Queue | null = null
let scheduleQueueInstance: Queue | null = null
let resumeQueueInstance: Queue | null = null
let knowledgeConnectorSyncQueueInstance: Queue | null = null
let knowledgeDocumentProcessingQueueInstance: Queue | null = null
let mothershipJobExecutionQueueInstance: Queue | null = null
@@ -45,6 +46,12 @@ function getQueueDefaultOptions(type: JobType) {
removeOnComplete: { age: 24 * 60 * 60 },
removeOnFail: { age: 3 * 24 * 60 * 60 },
}
case 'resume-execution':
return {
attempts: 1,
removeOnComplete: { age: 24 * 60 * 60 },
removeOnFail: { age: 3 * 24 * 60 * 60 },
}
}
}
@@ -121,6 +128,11 @@ export function getBullMQQueue(type: JobType): Queue {
scheduleQueueInstance = createQueue(type)
}
return scheduleQueueInstance
case 'resume-execution':
if (!resumeQueueInstance) {
resumeQueueInstance = createQueue(type)
}
return resumeQueueInstance
}
}
@@ -129,6 +141,7 @@ export function getBullMQQueueByName(queueName: WorkspaceDispatchQueueName): Que
case 'workflow-execution':
case 'webhook-execution':
case 'schedule-execution':
case 'resume-execution':
return getBullMQQueue(queueName)
case KNOWLEDGE_CONNECTOR_SYNC_QUEUE:
return getKnowledgeConnectorSyncQueue()

View File

@@ -5,7 +5,8 @@
*/
import { createLogger } from '@sim/logger'
import { getUserSubscriptionState } from '@/lib/billing/core/subscription'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import { getPlanTypeForLimits } from '@/lib/billing/plan-helpers'
import { getWorkspaceBilledAccountUserId } from '@/lib/workspaces/utils'
import { type PlanName, TABLE_PLAN_LIMITS, type TablePlanLimits } from './constants'
@@ -29,8 +30,8 @@ export async function getWorkspaceTableLimits(workspaceId: string): Promise<Tabl
return TABLE_PLAN_LIMITS.free
}
const subscriptionState = await getUserSubscriptionState(billedAccountUserId)
const planName = subscriptionState.planName as PlanName
const subscription = await getHighestPrioritySubscription(billedAccountUserId)
const planName = getPlanTypeForLimits(subscription?.plan) as PlanName
const limits = TABLE_PLAN_LIMITS[planName] ?? TABLE_PLAN_LIMITS.free

View File

@@ -30,6 +30,11 @@ export function processStreamingBlockLog(log: BlockLog, streamedContent: string)
return false
}
// Skip recalculation if cost was explicitly set by the billing layer (e.g. BYOK zero cost)
if (log.output?.cost?.pricing) {
return false
}
// Check if we have content to tokenize
if (!streamedContent?.trim()) {
return false

View File

@@ -32,7 +32,7 @@ export const MIME_TYPE_MAPPING: Record<string, 'image' | 'document' | 'audio' |
'image/png': 'image',
'image/gif': 'image',
'image/webp': 'image',
// SVG is XML text, not a raster image — handled separately in createFileContent
'image/svg+xml': 'image', // SVG upload is allowed; createFileContent handles it separately for Claude API
// Documents
'application/pdf': 'document',

View File

@@ -30,6 +30,7 @@ export interface ExecuteWorkflowOptions {
startBlockId: string
sourceSnapshot: SerializableExecutionState
}
executionMode?: 'sync' | 'stream' | 'async'
}
export interface WorkflowInfo {
@@ -70,6 +71,7 @@ export async function executeWorkflow(
useDraftState: streamConfig?.useDraftState ?? false,
startTime: new Date().toISOString(),
isClientSession: false,
executionMode: streamConfig?.executionMode,
}
const snapshot = new ExecutionSnapshot(

View File

@@ -111,6 +111,10 @@ interface StartResumeExecutionArgs {
contextId: string
resumeInput: unknown
userId: string
sendEvent?: (event: ExecutionEvent) => void
onStream?: (streamingExec: StreamingExecution) => Promise<void>
onBlockComplete?: (blockId: string, output: unknown) => Promise<void>
abortSignal?: AbortSignal
}
export class PauseResumeManager {
@@ -293,8 +297,18 @@ export class PauseResumeManager {
}
static async startResumeExecution(args: StartResumeExecutionArgs): Promise<ExecutionResult> {
const { resumeEntryId, resumeExecutionId, pausedExecution, contextId, resumeInput, userId } =
args
const {
resumeEntryId,
resumeExecutionId,
pausedExecution,
contextId,
resumeInput,
userId,
sendEvent,
onStream,
onBlockComplete,
abortSignal,
} = args
const pausePointsRecord = pausedExecution.pausePoints as Record<string, any>
const pausePointForContext = pausePointsRecord?.[contextId]
@@ -309,6 +323,10 @@ export class PauseResumeManager {
contextId,
resumeInput,
userId,
sendEvent,
onStream,
onBlockComplete,
abortSignal,
})
if (result.status === 'paused') {
@@ -384,8 +402,22 @@ export class PauseResumeManager {
contextId: string
resumeInput: unknown
userId: string
sendEvent?: (event: ExecutionEvent) => void
onStream?: (streamingExec: StreamingExecution) => Promise<void>
onBlockComplete?: (blockId: string, output: unknown) => Promise<void>
abortSignal?: AbortSignal
}): Promise<ExecutionResult> {
const { resumeExecutionId, pausedExecution, contextId, resumeInput, userId } = args
const {
resumeExecutionId,
pausedExecution,
contextId,
resumeInput,
userId,
sendEvent,
onStream: externalOnStream,
onBlockComplete: externalOnBlockComplete,
abortSignal: externalAbortSignal,
} = args
const parentExecutionId = pausedExecution.executionId
await db
@@ -798,6 +830,7 @@ export class PauseResumeManager {
localEventSeq++
event.eventId = localEventSeq
eventWriter.write(event).catch(() => {})
sendEvent?.(event)
}
writeBufferedEvent({
@@ -887,6 +920,10 @@ export class PauseResumeManager {
workflowId,
data: hasError ? { ...sharedData, error: output?.error } : { ...sharedData, output },
} as ExecutionEvent)
if (externalOnBlockComplete) {
await externalOnBlockComplete(blockId, callbackData.output)
}
},
onChildWorkflowInstanceReady: (
blockId: string,
@@ -911,6 +948,11 @@ export class PauseResumeManager {
} as ExecutionEvent)
},
onStream: async (streamingExec: StreamingExecution) => {
if (externalOnStream) {
await externalOnStream(streamingExec)
return
}
const blockId = (streamingExec.execution as unknown as Record<string, unknown>)
.blockId as string
const reader = streamingExec.stream.getReader()
@@ -949,9 +991,9 @@ export class PauseResumeManager {
},
}
const timeoutController = createTimeoutAbortController(
preprocessingResult.executionTimeout?.async
)
const timeoutController = externalAbortSignal
? null
: createTimeoutAbortController(preprocessingResult.executionTimeout?.async)
let result: ExecutionResult
let finalMetaStatus: 'complete' | 'error' | 'cancelled' = 'complete'
@@ -963,15 +1005,15 @@ export class PauseResumeManager {
skipLogCreation: true,
includeFileBase64: true,
base64MaxBytes: undefined,
abortSignal: timeoutController.signal,
abortSignal: externalAbortSignal ?? timeoutController?.signal,
})
if (
result.status === 'cancelled' &&
timeoutController.isTimedOut() &&
timeoutController.timeoutMs
timeoutController?.isTimedOut() &&
timeoutController?.timeoutMs
) {
const timeoutErrorMessage = getTimeoutErrorMessage(null, timeoutController.timeoutMs)
const timeoutErrorMessage = getTimeoutErrorMessage(null, timeoutController!.timeoutMs)
logger.info('Resume execution timed out', {
resumeExecutionId,
timeoutMs: timeoutController.timeoutMs,
@@ -1042,7 +1084,7 @@ export class PauseResumeManager {
finalMetaStatus = 'error'
throw execError
} finally {
timeoutController.cleanup()
timeoutController?.cleanup()
try {
await eventWriter.close()
} catch (closeError) {
@@ -1246,6 +1288,17 @@ export class PauseResumeManager {
)
}
static async getPausedExecutionById(
id: string
): Promise<typeof pausedExecutions.$inferSelect | null> {
const rows = await db
.select()
.from(pausedExecutions)
.where(eq(pausedExecutions.id, id))
.limit(1)
return rows[0] ?? null
}
static async getPausedExecutionDetail(options: {
workflowId: string
executionId: string

View File

@@ -12,7 +12,6 @@ import {
cleanupExecutionBase64Cache,
hydrateUserFilesWithBase64,
} from '@/lib/uploads/utils/user-file-base64.server'
import { executeWorkflow } from '@/lib/workflows/executor/execute-workflow'
import type { BlockLog, ExecutionResult, StreamingExecution } from '@/executor/types'
/**
@@ -36,25 +35,24 @@ export interface StreamingConfig {
timeoutMs?: number
}
export type StreamingExecutorFn = (callbacks: {
onStream: (streamingExec: StreamingExecution) => Promise<void>
onBlockComplete: (blockId: string, output: unknown) => Promise<void>
abortSignal: AbortSignal
}) => Promise<ExecutionResult>
export interface StreamingResponseOptions {
requestId: string
workflow: {
id: string
userId: string
workspaceId?: string | null
isDeployed?: boolean
variables?: Record<string, unknown>
}
input: unknown
executingUserId: string
streamConfig: StreamingConfig
executionId?: string
executeFn: StreamingExecutorFn
}
interface StreamingState {
streamedChunks: Map<string, string[]>
processedOutputs: Set<string>
streamCompletionTimes: Map<string, number>
completedBlockIds: Set<string>
}
function resolveStreamedContent(state: StreamingState): Map<string, string> {
@@ -77,6 +75,7 @@ async function buildMinimalResult(
result: ExecutionResult,
selectedOutputs: string[] | undefined,
streamedContent: Map<string, string>,
completedBlockIds: Set<string>,
requestId: string,
includeFileBase64: boolean,
base64MaxBytes: number | undefined
@@ -87,6 +86,11 @@ async function buildMinimalResult(
output: {} as Record<string, unknown>,
}
if (result.status === 'paused') {
minimalResult.output = result.output || {}
return minimalResult
}
if (!selectedOutputs?.length) {
minimalResult.output = result.output || {}
return minimalResult
@@ -103,6 +107,10 @@ async function buildMinimalResult(
continue
}
if (!completedBlockIds.has(blockId)) {
continue
}
if (isDangerousKey(blockId)) {
logger.warn(`[${requestId}] Blocked dangerous blockId: ${blockId}`)
continue
@@ -182,7 +190,7 @@ async function completeLoggingSession(result: ExecutionResult): Promise<void> {
export async function createStreamingResponse(
options: StreamingResponseOptions
): Promise<ReadableStream> {
const { requestId, workflow, input, executingUserId, streamConfig, executionId } = options
const { requestId, streamConfig, executionId, executeFn } = options
const timeoutController = createTimeoutAbortController(streamConfig.timeoutMs)
return new ReadableStream({
@@ -191,6 +199,7 @@ export async function createStreamingResponse(
streamedChunks: new Map(),
processedOutputs: new Set(),
streamCompletionTimes: new Map(),
completedBlockIds: new Set(),
}
const sendChunk = (blockId: string, content: string) => {
@@ -250,6 +259,8 @@ export async function createStreamingResponse(
const base64MaxBytes = streamConfig.base64MaxBytes
const onBlockCompleteCallback = async (blockId: string, output: unknown) => {
state.completedBlockIds.add(blockId)
if (!streamConfig.selectedOutputs?.length) {
return
}
@@ -284,25 +295,11 @@ export async function createStreamingResponse(
}
try {
const result = await executeWorkflow(
workflow,
requestId,
input,
executingUserId,
{
enabled: true,
selectedOutputs: streamConfig.selectedOutputs,
isSecureMode: streamConfig.isSecureMode,
workflowTriggerType: streamConfig.workflowTriggerType,
onStream: onStreamCallback,
onBlockComplete: onBlockCompleteCallback,
skipLoggingComplete: true,
includeFileBase64: streamConfig.includeFileBase64,
base64MaxBytes: streamConfig.base64MaxBytes,
abortSignal: timeoutController.signal,
},
executionId
)
const result = await executeFn({
onStream: onStreamCallback,
onBlockComplete: onBlockCompleteCallback,
abortSignal: timeoutController.signal,
})
const streamedContent =
state.streamedChunks.size > 0 ? resolveStreamedContent(state) : new Map<string, string>()
@@ -336,12 +333,21 @@ export async function createStreamingResponse(
result,
streamConfig.selectedOutputs,
streamedContent,
state.completedBlockIds,
requestId,
streamConfig.includeFileBase64 ?? true,
streamConfig.base64MaxBytes
)
controller.enqueue(encodeSSE({ event: 'final', data: minimalResult }))
controller.enqueue(
encodeSSE({
event: 'final',
data: {
...minimalResult,
...(result.status === 'paused' && { status: 'paused' }),
},
})
)
}
controller.enqueue(encodeSSE('[DONE]'))

View File

@@ -54,6 +54,38 @@ function isReadableStream(response: any): response is ReadableStream {
return response instanceof ReadableStream
}
const ZERO_COST = Object.freeze({
input: 0,
output: 0,
total: 0,
pricing: Object.freeze({ input: 0, output: 0, updatedAt: new Date(0).toISOString() }),
})
/**
* Prevents streaming callbacks from writing non-zero model cost for BYOK users
* while preserving tool costs. The property is frozen via defineProperty because
* providers set cost inside streaming callbacks that fire after this function returns.
*/
function zeroCostForBYOK(response: StreamingExecution): void {
const output = response.execution?.output
if (!output || typeof output !== 'object') {
logger.warn('zeroCostForBYOK: output not available at intercept time; cost may not be zeroed')
return
}
let toolCost = 0
Object.defineProperty(output, 'cost', {
get: () => (toolCost > 0 ? { ...ZERO_COST, toolCost, total: toolCost } : ZERO_COST),
set: (value: Record<string, unknown>) => {
if (value?.toolCost && typeof value.toolCost === 'number') {
toolCost = value.toolCost
}
},
configurable: true,
enumerable: true,
})
}
export async function executeProviderRequest(
providerId: string,
request: ProviderRequest
@@ -80,6 +112,12 @@ export async function executeProviderRequest(
)
resolvedRequest = { ...resolvedRequest, apiKey: result.apiKey }
isBYOK = result.isBYOK
logger.info('API key resolved', {
provider: providerId,
model: request.model,
workspaceId: request.workspaceId,
isBYOK,
})
} catch (error) {
logger.error('Failed to resolve API key:', {
provider: providerId,
@@ -118,7 +156,10 @@ export async function executeProviderRequest(
const response = await provider.executeRequest(sanitizedRequest)
if (isStreamingExecution(response)) {
logger.info('Provider returned StreamingExecution')
logger.info('Provider returned StreamingExecution', { isBYOK })
if (isBYOK) {
zeroCostForBYOK(response)
}
return response
}
@@ -154,9 +195,9 @@ export async function executeProviderRequest(
},
}
if (isBYOK) {
logger.debug(`Not billing model usage for ${response.model} - workspace BYOK key used`)
logger.info(`Not billing model usage for ${response.model} - workspace BYOK key used`)
} else {
logger.debug(
logger.info(
`Not billing model usage for ${response.model} - user provided API key or not hosted model`
)
}

View File

@@ -11,7 +11,7 @@ export const createNamedQueryTool: ToolConfig<
id: 'athena_create_named_query',
name: 'Athena Create Named Query',
description: 'Create a saved/named query in AWS Athena',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -6,7 +6,7 @@ export const getNamedQueryTool: ToolConfig<AthenaGetNamedQueryParams, AthenaGetN
id: 'athena_get_named_query',
name: 'Athena Get Named Query',
description: 'Get details of a saved/named query in AWS Athena',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -11,7 +11,7 @@ export const getQueryExecutionTool: ToolConfig<
id: 'athena_get_query_execution',
name: 'Athena Get Query Execution',
description: 'Get the status and details of an Athena query execution',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -11,7 +11,7 @@ export const getQueryResultsTool: ToolConfig<
id: 'athena_get_query_results',
name: 'Athena Get Query Results',
description: 'Retrieve the results of a completed Athena query execution',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -11,7 +11,7 @@ export const listNamedQueriesTool: ToolConfig<
id: 'athena_list_named_queries',
name: 'Athena List Named Queries',
description: 'List saved/named query IDs in AWS Athena',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -11,7 +11,7 @@ export const listQueryExecutionsTool: ToolConfig<
id: 'athena_list_query_executions',
name: 'Athena List Query Executions',
description: 'List recent Athena query execution IDs',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -5,7 +5,7 @@ export const startQueryTool: ToolConfig<AthenaStartQueryParams, AthenaStartQuery
id: 'athena_start_query',
name: 'Athena Start Query',
description: 'Start an SQL query execution in AWS Athena',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -5,7 +5,7 @@ export const stopQueryTool: ToolConfig<AthenaStopQueryParams, AthenaStopQueryRes
id: 'athena_stop_query',
name: 'Athena Stop Query',
description: 'Stop a running Athena query execution',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -11,7 +11,7 @@ export const describeStackDriftDetectionStatusTool: ToolConfig<
id: 'cloudformation_describe_stack_drift_detection_status',
name: 'CloudFormation Describe Stack Drift Detection Status',
description: 'Check the status of a stack drift detection operation',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -11,7 +11,7 @@ export const describeStackEventsTool: ToolConfig<
id: 'cloudformation_describe_stack_events',
name: 'CloudFormation Describe Stack Events',
description: 'Get the event history for a CloudFormation stack',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -11,7 +11,7 @@ export const describeStacksTool: ToolConfig<
id: 'cloudformation_describe_stacks',
name: 'CloudFormation Describe Stacks',
description: 'List and describe CloudFormation stacks',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -11,7 +11,7 @@ export const detectStackDriftTool: ToolConfig<
id: 'cloudformation_detect_stack_drift',
name: 'CloudFormation Detect Stack Drift',
description: 'Initiate drift detection on a CloudFormation stack',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -11,7 +11,7 @@ export const getTemplateTool: ToolConfig<
id: 'cloudformation_get_template',
name: 'CloudFormation Get Template',
description: 'Retrieve the template body for a CloudFormation stack',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -1,3 +1,5 @@
export * from './types'
import { describeStackDriftDetectionStatusTool } from '@/tools/cloudformation/describe_stack_drift_detection_status'
import { describeStackEventsTool } from '@/tools/cloudformation/describe_stack_events'
import { describeStacksTool } from '@/tools/cloudformation/describe_stacks'

View File

@@ -11,7 +11,7 @@ export const listStackResourcesTool: ToolConfig<
id: 'cloudformation_list_stack_resources',
name: 'CloudFormation List Stack Resources',
description: 'List all resources in a CloudFormation stack',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -11,7 +11,7 @@ export const validateTemplateTool: ToolConfig<
id: 'cloudformation_validate_template',
name: 'CloudFormation Validate Template',
description: 'Validate a CloudFormation template for syntax and structural correctness',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -11,7 +11,7 @@ export const describeAlarmsTool: ToolConfig<
id: 'cloudwatch_describe_alarms',
name: 'CloudWatch Describe Alarms',
description: 'List and filter CloudWatch alarms',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -11,7 +11,7 @@ export const describeLogGroupsTool: ToolConfig<
id: 'cloudwatch_describe_log_groups',
name: 'CloudWatch Describe Log Groups',
description: 'List available CloudWatch log groups',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -11,7 +11,7 @@ export const describeLogStreamsTool: ToolConfig<
id: 'cloudwatch_describe_log_streams',
name: 'CloudWatch Describe Log Streams',
description: 'List log streams within a CloudWatch log group',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -11,7 +11,7 @@ export const getLogEventsTool: ToolConfig<
id: 'cloudwatch_get_log_events',
name: 'CloudWatch Get Log Events',
description: 'Retrieve log events from a specific CloudWatch log stream',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -11,7 +11,7 @@ export const getMetricStatisticsTool: ToolConfig<
id: 'cloudwatch_get_metric_statistics',
name: 'CloudWatch Get Metric Statistics',
description: 'Get statistics for a CloudWatch metric over a time range',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -4,12 +4,16 @@ import { describeLogStreamsTool } from '@/tools/cloudwatch/describe_log_streams'
import { getLogEventsTool } from '@/tools/cloudwatch/get_log_events'
import { getMetricStatisticsTool } from '@/tools/cloudwatch/get_metric_statistics'
import { listMetricsTool } from '@/tools/cloudwatch/list_metrics'
import { putMetricDataTool } from '@/tools/cloudwatch/put_metric_data'
import { queryLogsTool } from '@/tools/cloudwatch/query_logs'
export * from './types'
export const cloudwatchDescribeAlarmsTool = describeAlarmsTool
export const cloudwatchDescribeLogGroupsTool = describeLogGroupsTool
export const cloudwatchDescribeLogStreamsTool = describeLogStreamsTool
export const cloudwatchGetLogEventsTool = getLogEventsTool
export const cloudwatchGetMetricStatisticsTool = getMetricStatisticsTool
export const cloudwatchListMetricsTool = listMetricsTool
export const cloudwatchPutMetricDataTool = putMetricDataTool
export const cloudwatchQueryLogsTool = queryLogsTool

View File

@@ -11,7 +11,7 @@ export const listMetricsTool: ToolConfig<
id: 'cloudwatch_list_metrics',
name: 'CloudWatch List Metrics',
description: 'List available CloudWatch metrics',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -0,0 +1,106 @@
import type {
CloudWatchPutMetricDataParams,
CloudWatchPutMetricDataResponse,
} from '@/tools/cloudwatch/types'
import type { ToolConfig } from '@/tools/types'
export const putMetricDataTool: ToolConfig<
CloudWatchPutMetricDataParams,
CloudWatchPutMetricDataResponse
> = {
id: 'cloudwatch_put_metric_data',
name: 'CloudWatch Publish Metric',
description: 'Publish a custom metric data point to CloudWatch',
version: '1.0.0',
params: {
awsRegion: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'AWS region (e.g., us-east-1)',
},
awsAccessKeyId: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'AWS access key ID',
},
awsSecretAccessKey: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'AWS secret access key',
},
namespace: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'Metric namespace (e.g., Custom/MyApp)',
},
metricName: {
type: 'string',
required: true,
visibility: 'user-or-llm',
description: 'Name of the metric',
},
value: {
type: 'number',
required: true,
visibility: 'user-or-llm',
description: 'Metric value to publish',
},
unit: {
type: 'string',
required: false,
visibility: 'user-or-llm',
description: 'Unit of the metric (e.g., Count, Seconds, Bytes)',
},
dimensions: {
type: 'string',
required: false,
visibility: 'user-or-llm',
description: 'JSON string of dimension name/value pairs',
},
},
request: {
url: '/api/tools/cloudwatch/put-metric-data',
method: 'POST',
headers: () => ({
'Content-Type': 'application/json',
}),
body: (params) => ({
region: params.awsRegion,
accessKeyId: params.awsAccessKeyId,
secretAccessKey: params.awsSecretAccessKey,
namespace: params.namespace,
metricName: params.metricName,
value: params.value,
...(params.unit && { unit: params.unit }),
...(params.dimensions && { dimensions: params.dimensions }),
}),
},
transformResponse: async (response: Response) => {
const data = await response.json()
if (!response.ok) {
throw new Error(data.error || 'Failed to publish CloudWatch metric')
}
return {
success: true,
output: data.output,
}
},
outputs: {
success: { type: 'boolean', description: 'Whether the metric was published successfully' },
namespace: { type: 'string', description: 'Metric namespace' },
metricName: { type: 'string', description: 'Metric name' },
value: { type: 'number', description: 'Published metric value' },
unit: { type: 'string', description: 'Metric unit' },
timestamp: { type: 'string', description: 'Timestamp when the metric was published' },
},
}

View File

@@ -8,7 +8,7 @@ export const queryLogsTool: ToolConfig<CloudWatchQueryLogsParams, CloudWatchQuer
id: 'cloudwatch_query_logs',
name: 'CloudWatch Query Logs',
description: 'Run a CloudWatch Log Insights query against one or more log groups',
version: '1.0',
version: '1.0.0',
params: {
awsRegion: {

View File

@@ -144,3 +144,22 @@ export interface CloudWatchDescribeAlarmsResponse extends ToolResponse {
}[]
}
}
export interface CloudWatchPutMetricDataParams extends CloudWatchConnectionConfig {
namespace: string
metricName: string
value: number
unit?: string
dimensions?: string
}
export interface CloudWatchPutMetricDataResponse extends ToolResponse {
output: {
success: boolean
namespace: string
metricName: string
value: number
unit: string
timestamp: string
}
}

View File

@@ -39,6 +39,12 @@ export interface ErrorExtractorConfig {
}
const ERROR_EXTRACTORS: ErrorExtractorConfig[] = [
{
id: 'atlassian-errors',
description: 'Atlassian REST API errorMessage field',
examples: ['Jira', 'Jira Service Management', 'Confluence'],
extract: (errorInfo) => errorInfo?.data?.errorMessage,
},
{
id: 'graphql-errors',
description: 'GraphQL errors array with message field',
@@ -221,6 +227,7 @@ export function extractErrorMessage(errorInfo?: ErrorInfo, extractorId?: string)
}
export const ErrorExtractorId = {
ATLASSIAN_ERRORS: 'atlassian-errors',
GRAPHQL_ERRORS: 'graphql-errors',
TWITTER_ERRORS: 'twitter-errors',
DETAILS_ARRAY: 'details-array',

View File

@@ -45,9 +45,9 @@ export const jsmCreateRequestTool: ToolConfig<JsmCreateRequestParams, JsmCreateR
},
summary: {
type: 'string',
required: true,
required: false,
visibility: 'user-or-llm',
description: 'Summary/title for the service request',
description: 'Summary/title for the service request (required unless using Form Answers)',
},
description: {
type: 'string',
@@ -68,6 +68,13 @@ export const jsmCreateRequestTool: ToolConfig<JsmCreateRequestParams, JsmCreateR
description:
'Request field values as key-value pairs (overrides summary/description if provided)',
},
formAnswers: {
type: 'json',
required: false,
visibility: 'user-or-llm',
description:
'Form answers for form-based request types (e.g., {"summary": {"text": "Title"}, "customfield_10010": {"choices": ["10320"]}})',
},
requestParticipants: {
type: 'string',
required: false,
@@ -98,6 +105,7 @@ export const jsmCreateRequestTool: ToolConfig<JsmCreateRequestParams, JsmCreateR
description: params.description,
raiseOnBehalfOf: params.raiseOnBehalfOf,
requestFieldValues: params.requestFieldValues,
formAnswers: params.formAnswers,
requestParticipants: params.requestParticipants,
channel: params.channel,
}),

View File

@@ -411,9 +411,10 @@ export interface JsmGetRequestTypesParams extends JsmBaseParams {
export interface JsmCreateRequestParams extends JsmBaseParams {
serviceDeskId: string
requestTypeId: string
summary: string
summary?: string
description?: string
requestFieldValues?: Record<string, unknown>
formAnswers?: Record<string, unknown>
raiseOnBehalfOf?: string
requestParticipants?: string[]
channel?: string

View File

@@ -301,6 +301,7 @@ import {
cloudwatchGetLogEventsTool,
cloudwatchGetMetricStatisticsTool,
cloudwatchListMetricsTool,
cloudwatchPutMetricDataTool,
cloudwatchQueryLogsTool,
} from '@/tools/cloudwatch'
import {
@@ -3442,21 +3443,22 @@ export const tools: Record<string, ToolConfig> = {
rds_delete: rdsDeleteTool,
rds_execute: rdsExecuteTool,
rds_introspect: rdsIntrospectTool,
cloudformation_describe_stacks: cloudformationDescribeStacksTool,
cloudformation_list_stack_resources: cloudformationListStackResourcesTool,
cloudformation_detect_stack_drift: cloudformationDetectStackDriftTool,
cloudformation_describe_stack_drift_detection_status:
cloudformationDescribeStackDriftDetectionStatusTool,
cloudformation_describe_stack_events: cloudformationDescribeStackEventsTool,
cloudformation_describe_stacks: cloudformationDescribeStacksTool,
cloudformation_detect_stack_drift: cloudformationDetectStackDriftTool,
cloudformation_get_template: cloudformationGetTemplateTool,
cloudformation_list_stack_resources: cloudformationListStackResourcesTool,
cloudformation_validate_template: cloudformationValidateTemplateTool,
cloudwatch_query_logs: cloudwatchQueryLogsTool,
cloudwatch_describe_log_groups: cloudwatchDescribeLogGroupsTool,
cloudwatch_describe_alarms: cloudwatchDescribeAlarmsTool,
cloudwatch_describe_log_groups: cloudwatchDescribeLogGroupsTool,
cloudwatch_describe_log_streams: cloudwatchDescribeLogStreamsTool,
cloudwatch_get_log_events: cloudwatchGetLogEventsTool,
cloudwatch_list_metrics: cloudwatchListMetricsTool,
cloudwatch_get_metric_statistics: cloudwatchGetMetricStatisticsTool,
cloudwatch_list_metrics: cloudwatchListMetricsTool,
cloudwatch_put_metric_data: cloudwatchPutMetricDataTool,
cloudwatch_query_logs: cloudwatchQueryLogsTool,
dynamodb_get: dynamodbGetTool,
dynamodb_put: dynamodbPutTool,
dynamodb_query: dynamodbQueryTool,

View File

@@ -14,6 +14,7 @@ import { startWorkerHealthServer, updateWorkerHealthState } from '@/worker/healt
import { processKnowledgeConnectorSync } from '@/worker/processors/knowledge-connector-sync'
import { processKnowledgeDocument } from '@/worker/processors/knowledge-document-processing'
import { processMothershipJobExecution } from '@/worker/processors/mothership-job-execution'
import { processResume } from '@/worker/processors/resume'
import { processSchedule } from '@/worker/processors/schedule'
import { processWebhook } from '@/worker/processors/webhook'
import { processWorkflow } from '@/worker/processors/workflow'
@@ -25,6 +26,7 @@ const DEFAULT_WORKER_PORT = 3001
const DEFAULT_WORKFLOW_CONCURRENCY = 50
const DEFAULT_WEBHOOK_CONCURRENCY = 30
const DEFAULT_SCHEDULE_CONCURRENCY = 20
const DEFAULT_RESUME_CONCURRENCY = 20
const DEFAULT_MOTHERSHIP_JOB_CONCURRENCY = 10
const DEFAULT_CONNECTOR_SYNC_CONCURRENCY = 5
const DEFAULT_DOCUMENT_PROCESSING_CONCURRENCY = 20
@@ -80,6 +82,14 @@ async function main() {
),
})
const resumeWorker = new Worker('resume-execution', processResume, {
connection,
concurrency: parseWorkerNumber(
process.env.WORKER_CONCURRENCY_RESUME,
DEFAULT_RESUME_CONCURRENCY
),
})
const mothershipJobWorker = new Worker(
MOTHERSHIP_JOB_EXECUTION_QUEUE,
processMothershipJobExecution,
@@ -132,6 +142,7 @@ async function main() {
workflowWorker,
webhookWorker,
scheduleWorker,
resumeWorker,
mothershipJobWorker,
connectorSyncWorker,
documentProcessingWorker,

View File

@@ -0,0 +1,22 @@
import { createLogger } from '@sim/logger'
import type { Job } from 'bullmq'
import type { BullMQJobData } from '@/lib/core/bullmq'
import { runDispatchedJob } from '@/lib/core/workspace-dispatch'
import { executeResumeJob, type ResumeExecutionPayload } from '@/background/resume-execution'
const logger = createLogger('BullMQResumeProcessor')
export async function processResume(job: Job<BullMQJobData<ResumeExecutionPayload>>) {
const { payload } = job.data
const isFinalAttempt = job.attemptsMade + 1 >= (job.opts.attempts ?? 1)
logger.info('Processing resume execution job', {
jobId: job.id,
resumeExecutionId: payload.resumeExecutionId,
workflowId: payload.workflowId,
})
return runDispatchedJob(job.data.metadata, () => executeResumeJob(payload), {
isFinalAttempt,
})
}

View File

@@ -1,6 +1,5 @@
{
"lockfileVersion": 1,
"configVersion": 0,
"workspaces": {
"": {
"name": "simstudio",