Compare commits

..

4 Commits

Author SHA1 Message Date
Vikhyath Mondreti
4b0b0d03c9 add check consistently for baseline diff 2026-01-25 13:22:07 -08:00
Vikhyath Mondreti
2f297686f6 remove redundant check in a2a 2026-01-25 12:49:16 -08:00
Vikhyath Mondreti
a6adbafe16 fix subblock update 2026-01-25 12:42:45 -08:00
Vikhyath Mondreti
19cde17eb8 fix(supabase): storage upload + add basic mode version 2026-01-25 12:18:48 -08:00
50 changed files with 514 additions and 1422 deletions

View File

@@ -44,7 +44,7 @@ services:
deploy: deploy:
resources: resources:
limits: limits:
memory: 1G memory: 4G
environment: environment:
- NODE_ENV=development - NODE_ENV=development
- DATABASE_URL=postgresql://postgres:postgres@db:5432/simstudio - DATABASE_URL=postgresql://postgres:postgres@db:5432/simstudio

View File

@@ -10,20 +10,12 @@ Stellen Sie Sim auf Ihrer eigenen Infrastruktur mit Docker oder Kubernetes berei
## Anforderungen ## Anforderungen
| Ressource | Klein | Standard | Produktion | | Ressource | Minimum | Empfohlen |
|----------|-------|----------|------------| |----------|---------|-------------|
| CPU | 2 Kerne | 4 Kerne | 8+ Kerne | | CPU | 2 Kerne | 4+ Kerne |
| RAM | 12 GB | 16 GB | 32+ GB | | RAM | 12 GB | 16+ GB |
| Speicher | 20 GB SSD | 50 GB SSD | 100+ GB SSD | | Speicher | 20 GB SSD | 50+ GB SSD |
| Docker | 20.10+ | 20.10+ | Neueste Version | | Docker | 20.10+ | Neueste Version |
**Klein**: Entwicklung, Tests, Einzelnutzer (1-5 Nutzer)
**Standard**: Teams (5-50 Nutzer), moderate Arbeitslasten
**Produktion**: Große Teams (50+ Nutzer), Hochverfügbarkeit, intensive Workflow-Ausführung
<Callout type="info">
Die Ressourcenanforderungen werden durch Workflow-Ausführung (isolated-vm Sandboxing), Dateiverarbeitung (In-Memory-Dokumentenparsing) und Vektoroperationen (pgvector) bestimmt. Arbeitsspeicher ist typischerweise der limitierende Faktor, nicht CPU. Produktionsdaten zeigen, dass die Hauptanwendung durchschnittlich 4-8 GB und bei hoher Last bis zu 12 GB benötigt.
</Callout>
## Schnellstart ## Schnellstart

View File

@@ -5,14 +5,6 @@ description: Essential actions for navigating and using the Sim workflow editor
import { Callout } from 'fumadocs-ui/components/callout' import { Callout } from 'fumadocs-ui/components/callout'
export const ActionImage = ({ src, alt }) => (
<img src={src} alt={alt} className="inline-block max-h-8 rounded border border-neutral-200 dark:border-neutral-700" />
)
export const ActionVideo = ({ src, alt }) => (
<video src={src} alt={alt} autoPlay loop muted playsInline className="inline-block max-h-8 rounded border border-neutral-200 dark:border-neutral-700" />
)
A quick lookup for everyday actions in the Sim workflow editor. For keyboard shortcuts, see [Keyboard Shortcuts](/keyboard-shortcuts). A quick lookup for everyday actions in the Sim workflow editor. For keyboard shortcuts, see [Keyboard Shortcuts](/keyboard-shortcuts).
<Callout type="info"> <Callout type="info">
@@ -21,209 +13,67 @@ A quick lookup for everyday actions in the Sim workflow editor. For keyboard sho
## Workspaces ## Workspaces
<table> | Action | How |
<thead> |--------|-----|
<tr><th>Action</th><th>How</th><th>Preview</th></tr> | Create a workspace | Click workspace dropdown in sidebar → **New Workspace** |
</thead> | Rename a workspace | Workspace settings → Edit name |
<tbody> | Switch workspaces | Click workspace dropdown in sidebar → Select workspace |
<tr> | Invite team members | Workspace settings → **Team** → **Invite** |
<td>Create a workspace</td>
<td>Click workspace dropdown → **New Workspace**</td>
<td><ActionVideo src="/static/quick-reference/create-workspace.mp4" alt="Create workspace" /></td>
</tr>
<tr>
<td>Switch workspaces</td>
<td>Click workspace dropdown → Select workspace</td>
<td><ActionVideo src="/static/quick-reference/switch-workspace.mp4" alt="Switch workspaces" /></td>
</tr>
<tr>
<td>Invite team members</td>
<td>Workspace settings → **Team** → **Invite**</td>
<td><ActionVideo src="/static/quick-reference/invite.mp4" alt="Invite team members" /></td>
</tr>
<tr>
<td>Rename a workspace</td>
<td>Right-click workspace → **Rename**</td>
<td rowSpan={4}><ActionImage src="/static/quick-reference/workspace-context-menu.png" alt="Workspace context menu" /></td>
</tr>
<tr>
<td>Duplicate a workspace</td>
<td>Right-click workspace → **Duplicate**</td>
</tr>
<tr>
<td>Export a workspace</td>
<td>Right-click workspace → **Export**</td>
</tr>
<tr>
<td>Delete a workspace</td>
<td>Right-click workspace → **Delete**</td>
</tr>
</tbody>
</table>
## Workflows ## Workflows
<table> | Action | How |
<thead> |--------|-----|
<tr><th>Action</th><th>How</th><th>Preview</th></tr> | Create a workflow | Click **New Workflow** button or `Mod+Shift+A` |
</thead> | Rename a workflow | Double-click workflow name in sidebar, or right-click → **Rename** |
<tbody> | Duplicate a workflow | Right-click workflow → **Duplicate** |
<tr> | Reorder workflows | Drag workflow up/down in the sidebar list |
<td>Create a workflow</td> | Import a workflow | Sidebar menu → **Import** → Select file |
<td>Click **+** button in sidebar</td> | Create a folder | Right-click in sidebar → **New Folder** |
<td><ActionImage src="/static/quick-reference/create-workflow.png" alt="Create workflow" /></td> | Rename a folder | Right-click folder → **Rename** |
</tr> | Delete a folder | Right-click folder → **Delete** |
<tr> | Collapse/expand folder | Click folder arrow, or double-click folder |
<td>Reorder / move workflows</td> | Move workflow to folder | Drag workflow onto folder in sidebar |
<td>Drag workflow up/down or onto a folder</td> | Delete a workflow | Right-click workflow → **Delete** |
<td><ActionVideo src="/static/quick-reference/reordering.mp4" alt="Reorder workflows" /></td> | Export a workflow | Right-click workflow → **Export** |
</tr> | Assign workflow color | Right-click workflow → **Change Color** |
<tr> | Multi-select workflows | `Mod+Click` or `Shift+Click` workflows in sidebar |
<td>Import a workflow</td> | Open in new tab | Right-click workflow → **Open in New Tab** |
<td>Click import button in sidebar → Select file</td>
<td><ActionImage src="/static/quick-reference/import-workflow.png" alt="Import workflow" /></td>
</tr>
<tr>
<td>Multi-select workflows</td>
<td>`Mod+Click` or `Shift+Click` workflows in sidebar</td>
<td><ActionVideo src="/static/quick-reference/multiselect.mp4" alt="Multi-select workflows" /></td>
</tr>
<tr>
<td>Open in new tab</td>
<td>Right-click workflow → **Open in New Tab**</td>
<td rowSpan={6}><ActionImage src="/static/quick-reference/workflow-context-menu.png" alt="Workflow context menu" /></td>
</tr>
<tr>
<td>Rename a workflow</td>
<td>Right-click workflow → **Rename**</td>
</tr>
<tr>
<td>Assign workflow color</td>
<td>Right-click workflow → **Change Color**</td>
</tr>
<tr>
<td>Duplicate a workflow</td>
<td>Right-click workflow → **Duplicate**</td>
</tr>
<tr>
<td>Export a workflow</td>
<td>Right-click workflow → **Export**</td>
</tr>
<tr>
<td>Delete a workflow</td>
<td>Right-click workflow → **Delete**</td>
</tr>
<tr>
<td>Rename a folder</td>
<td>Right-click folder → **Rename**</td>
<td rowSpan={6}><ActionImage src="/static/quick-reference/folder-context-menu.png" alt="Folder context menu" /></td>
</tr>
<tr>
<td>Create workflow in folder</td>
<td>Right-click folder → **Create workflow**</td>
</tr>
<tr>
<td>Create folder in folder</td>
<td>Right-click folder → **Create folder**</td>
</tr>
<tr>
<td>Duplicate a folder</td>
<td>Right-click folder → **Duplicate**</td>
</tr>
<tr>
<td>Export a folder</td>
<td>Right-click folder → **Export**</td>
</tr>
<tr>
<td>Delete a folder</td>
<td>Right-click folder → **Delete**</td>
</tr>
</tbody>
</table>
## Blocks ## Blocks
<table> | Action | How |
<thead> |--------|-----|
<tr><th>Action</th><th>How</th><th>Preview</th></tr> | Add a block | Drag from Toolbar panel, or right-click canvas → **Add Block** |
</thead> | Select a block | Click on the block |
<tbody> | Multi-select blocks | `Mod+Click` additional blocks, or right-drag to draw selection box |
<tr> | Move blocks | Drag selected block(s) to new position |
<td>Add a block</td> | Copy blocks | `Mod+C` with blocks selected |
<td>Drag from Toolbar panel, or right-click canvas → **Add Block**</td> | Paste blocks | `Mod+V` to paste copied blocks |
<td><ActionVideo src="/static/quick-reference/add-block.mp4" alt="Add a block" /></td> | Duplicate blocks | Right-click → **Duplicate** |
</tr> | Delete blocks | `Delete` or `Backspace` key, or right-click → **Delete** |
<tr> | Rename a block | Click block name in header, or edit in the Editor panel |
<td>Multi-select blocks</td> | Enable/Disable a block | Right-click → **Enable/Disable** |
<td>`Mod+Click` additional blocks, or right-drag to draw selection box</td> | Toggle handle orientation | Right-click → **Toggle Handles** |
<td><ActionVideo src="/static/quick-reference/multiselect-blocks.mp4" alt="Multi-select blocks" /></td> | Toggle trigger mode | Right-click trigger block → **Toggle Trigger Mode** |
</tr> | Configure a block | Select block → use Editor panel on right |
<tr>
<td>Copy blocks</td>
<td>`Mod+C` with blocks selected</td>
<td rowSpan={2}><ActionVideo src="/static/quick-reference/copy-paste.mp4" alt="Copy and paste blocks" /></td>
</tr>
<tr>
<td>Paste blocks</td>
<td>`Mod+V` to paste copied blocks</td>
</tr>
<tr>
<td>Duplicate blocks</td>
<td>Right-click → **Duplicate**</td>
<td><ActionVideo src="/static/quick-reference/duplicate-block.mp4" alt="Duplicate blocks" /></td>
</tr>
<tr>
<td>Delete blocks</td>
<td>`Delete` or `Backspace` key, or right-click → **Delete**</td>
<td><ActionImage src="/static/quick-reference/delete-block.png" alt="Delete block" /></td>
</tr>
<tr>
<td>Rename a block</td>
<td>Click block name in header, or edit in the Editor panel</td>
<td><ActionVideo src="/static/quick-reference/rename-block.mp4" alt="Rename a block" /></td>
</tr>
<tr>
<td>Enable/Disable a block</td>
<td>Right-click → **Enable/Disable**</td>
<td><ActionImage src="/static/quick-reference/disable-block.png" alt="Disable block" /></td>
</tr>
<tr>
<td>Toggle handle orientation</td>
<td>Right-click → **Toggle Handles**</td>
<td><ActionVideo src="/static/quick-reference/toggle-handles.mp4" alt="Toggle handle orientation" /></td>
</tr>
<tr>
<td>Configure a block</td>
<td>Select block → use Editor panel on right</td>
<td><ActionVideo src="/static/quick-reference/configure-block.mp4" alt="Configure a block" /></td>
</tr>
</tbody>
</table>
## Connections ## Connections
<table> | Action | How |
<thead> |--------|-----|
<tr><th>Action</th><th>How</th><th>Preview</th></tr> | Create a connection | Drag from output handle to input handle |
</thead> | Delete a connection | Click edge to select → `Delete` key |
<tbody> | Use output in another block | Drag connection tag into input field |
<tr>
<td>Create a connection</td> ## Canvas Navigation
<td>Drag from output handle to input handle</td>
<td><ActionVideo src="/static/quick-reference/connect-blocks.mp4" alt="Connect blocks" /></td> | Action | How |
</tr> |--------|-----|
<tr> | Pan/move canvas | Left-drag on empty space, or scroll/trackpad |
<td>Delete a connection</td> | Zoom in/out | Scroll wheel or pinch gesture |
<td>Click edge to select `Delete` key</td> | Auto-layout | `Shift+L` |
<td><ActionVideo src="/static/quick-reference/delete-connection.mp4" alt="Delete connection" /></td> | Draw selection box | Right-drag on empty canvas area |
</tr>
<tr>
<td>Use output in another block</td>
<td>Drag connection tag into input field</td>
<td><ActionVideo src="/static/quick-reference/connection-tag.mp4" alt="Use connection tag" /></td>
</tr>
</tbody>
</table>
## Panels & Views ## Panels & Views
@@ -233,8 +83,7 @@ A quick lookup for everyday actions in the Sim workflow editor. For keyboard sho
| Open Toolbar tab | Press `T` or click Toolbar tab | | Open Toolbar tab | Press `T` or click Toolbar tab |
| Open Editor tab | Press `E` or click Editor tab | | Open Editor tab | Press `E` or click Editor tab |
| Search toolbar | `Mod+F` | | Search toolbar | `Mod+F` |
| Search everything | `Mod+K` | | Toggle advanced mode | Click toggle button on input fields |
| Toggle manual mode | Click toggle button in editor fields to switch between manual and selector |
| Resize panels | Drag panel edge | | Resize panels | Drag panel edge |
| Collapse/expand sidebar | Click collapse button on sidebar | | Collapse/expand sidebar | Click collapse button on sidebar |
@@ -274,8 +123,7 @@ A quick lookup for everyday actions in the Sim workflow editor. For keyboard sho
| Edit workflow variable | Variables tab → Click variable to edit | | Edit workflow variable | Variables tab → Click variable to edit |
| Delete workflow variable | Variables tab → Click delete icon on variable | | Delete workflow variable | Variables tab → Click delete icon on variable |
| Add environment variable | Settings → **Environment Variables** → **Add** | | Add environment variable | Settings → **Environment Variables** → **Add** |
| Reference a workflow variable | Use `<blockName.itemName>` syntax in block inputs | | Reference a variable | Use `{{variableName}}` syntax in block inputs |
| Reference an environment variable | Use `{{ENV_VAR}}` syntax in block inputs |
## Credentials ## Credentials

View File

@@ -16,20 +16,12 @@ Deploy Sim on your own infrastructure with Docker or Kubernetes.
## Requirements ## Requirements
| Resource | Small | Standard | Production | | Resource | Minimum | Recommended |
|----------|-------|----------|------------| |----------|---------|-------------|
| CPU | 2 cores | 4 cores | 8+ cores | | CPU | 2 cores | 4+ cores |
| RAM | 12 GB | 16 GB | 32+ GB | | RAM | 12 GB | 16+ GB |
| Storage | 20 GB SSD | 50 GB SSD | 100+ GB SSD | | Storage | 20 GB SSD | 50+ GB SSD |
| Docker | 20.10+ | 20.10+ | Latest | | Docker | 20.10+ | Latest |
**Small**: Development, testing, single user (1-5 users)
**Standard**: Teams (5-50 users), moderate workloads
**Production**: Large teams (50+ users), high availability, heavy workflow execution
<Callout type="info">
Resource requirements are driven by workflow execution (isolated-vm sandboxing), file processing (in-memory document parsing), and vector operations (pgvector). Memory is typically the constraining factor rather than CPU. Production telemetry shows the main app uses 4-8 GB average with peaks up to 12 GB under heavy load.
</Callout>
## Quick Start ## Quick Start

View File

@@ -10,20 +10,12 @@ Despliega Sim en tu propia infraestructura con Docker o Kubernetes.
## Requisitos ## Requisitos
| Recurso | Pequeño | Estándar | Producción | | Recurso | Mínimo | Recomendado |
|----------|---------|----------|------------| |----------|---------|-------------|
| CPU | 2 núcleos | 4 núcleos | 8+ núcleos | | CPU | 2 núcleos | 4+ núcleos |
| RAM | 12 GB | 16 GB | 32+ GB | | RAM | 12 GB | 16+ GB |
| Almacenamiento | 20 GB SSD | 50 GB SSD | 100+ GB SSD | | Almacenamiento | 20 GB SSD | 50+ GB SSD |
| Docker | 20.10+ | 20.10+ | Última versión | | Docker | 20.10+ | Última versión |
**Pequeño**: Desarrollo, pruebas, usuario único (1-5 usuarios)
**Estándar**: Equipos (5-50 usuarios), cargas de trabajo moderadas
**Producción**: Equipos grandes (50+ usuarios), alta disponibilidad, ejecución intensiva de workflows
<Callout type="info">
Los requisitos de recursos están determinados por la ejecución de workflows (sandboxing isolated-vm), procesamiento de archivos (análisis de documentos en memoria) y operaciones vectoriales (pgvector). La memoria suele ser el factor limitante, no la CPU. La telemetría de producción muestra que la aplicación principal usa 4-8 GB en promedio con picos de hasta 12 GB bajo carga pesada.
</Callout>
## Inicio rápido ## Inicio rápido

View File

@@ -10,20 +10,12 @@ Déployez Sim sur votre propre infrastructure avec Docker ou Kubernetes.
## Prérequis ## Prérequis
| Ressource | Petit | Standard | Production | | Ressource | Minimum | Recommandé |
|----------|-------|----------|------------| |----------|---------|-------------|
| CPU | 2 cœurs | 4 cœurs | 8+ cœurs | | CPU | 2 cœurs | 4+ cœurs |
| RAM | 12 Go | 16 Go | 32+ Go | | RAM | 12 Go | 16+ Go |
| Stockage | 20 Go SSD | 50 Go SSD | 100+ Go SSD | | Stockage | 20 Go SSD | 50+ Go SSD |
| Docker | 20.10+ | 20.10+ | Dernière version | | Docker | 20.10+ | Dernière version |
**Petit** : Développement, tests, utilisateur unique (1-5 utilisateurs)
**Standard** : Équipes (5-50 utilisateurs), charges de travail modérées
**Production** : Grandes équipes (50+ utilisateurs), haute disponibilité, exécution intensive de workflows
<Callout type="info">
Les besoins en ressources sont déterminés par l'exécution des workflows (sandboxing isolated-vm), le traitement des fichiers (analyse de documents en mémoire) et les opérations vectorielles (pgvector). La mémoire est généralement le facteur limitant, pas le CPU. La télémétrie de production montre que l'application principale utilise 4-8 Go en moyenne avec des pics jusqu'à 12 Go sous forte charge.
</Callout>
## Démarrage rapide ## Démarrage rapide

View File

@@ -10,20 +10,12 @@ DockerまたはKubernetesを使用して、自社のインフラストラクチ
## 要件 ## 要件
| リソース | スモール | スタンダード | プロダクション | | リソース | 最小 | 推奨 |
|----------|---------|-------------|----------------| |----------|---------|-------------|
| CPU | 2コア | 4コア | 8+コア | | CPU | 2コア | 4+コア |
| RAM | 12 GB | 16 GB | 32+ GB | | RAM | 12 GB | 16+ GB |
| ストレージ | 20 GB SSD | 50 GB SSD | 100+ GB SSD | | ストレージ | 20 GB SSD | 50+ GB SSD |
| Docker | 20.10+ | 20.10+ | 最新版 | | Docker | 20.10+ | 最新版 |
**スモール**: 開発、テスト、シングルユーザー1-5ユーザー
**スタンダード**: チーム5-50ユーザー、中程度のワークロード
**プロダクション**: 大規模チーム50+ユーザー)、高可用性、高負荷ワークフロー実行
<Callout type="info">
リソース要件は、ワークフロー実行isolated-vmサンドボックス、ファイル処理メモリ内ドキュメント解析、ベクトル演算pgvectorによって決まります。CPUよりもメモリが制約要因となることが多いです。本番環境のテレメトリによると、メインアプリは平均4-8 GB、高負荷時は最大12 GBを使用します。
</Callout>
## クイックスタート ## クイックスタート

View File

@@ -10,20 +10,12 @@ import { Callout } from 'fumadocs-ui/components/callout'
## 要求 ## 要求
| 资源 | 小型 | 标准 | 生产环境 | | 资源 | 最低要求 | 推荐配置 |
|----------|------|------|----------| |----------|---------|-------------|
| CPU | 2 核 | 4 核 | 8+ 核 | | CPU | 2 核 | 4 核及以上 |
| 内存 | 12 GB | 16 GB | 32+ GB | | 内存 | 12 GB | 16 GB 及以上 |
| 存储 | 20 GB SSD | 50 GB SSD | 100+ GB SSD | | 存储 | 20 GB SSD | 50 GB 及以上 SSD |
| Docker | 20.10+ | 20.10+ | 最新版本 | | Docker | 20.10+ | 最新版本 |
**小型**: 开发、测试、单用户1-5 用户)
**标准**: 团队5-50 用户)、中等工作负载
**生产环境**: 大型团队50+ 用户)、高可用性、密集工作流执行
<Callout type="info">
资源需求由工作流执行isolated-vm 沙箱、文件处理内存中文档解析和向量运算pgvector决定。内存通常是限制因素而不是 CPU。生产遥测数据显示主应用平均使用 4-8 GB高负载时峰值可达 12 GB。
</Callout>
## 快速开始 ## 快速开始

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 41 KiB

View File

@@ -408,7 +408,6 @@ describe('Knowledge Search Utils', () => {
input: ['test query'], input: ['test query'],
model: 'text-embedding-3-small', model: 'text-embedding-3-small',
encoding_format: 'float', encoding_format: 'float',
dimensions: 1536,
}), }),
}) })
) )

View File

@@ -1,204 +0,0 @@
import { db } from '@sim/db'
import { member, permissions, user, workspace } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq, or } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
const logger = createLogger('OrganizationWorkspacesAPI')
/**
* GET /api/organizations/[id]/workspaces
* Get workspaces related to the organization with optional filtering
* Query parameters:
* - ?available=true - Only workspaces where user can invite others (admin permissions)
* - ?member=userId - Workspaces where specific member has access
*/
export async function GET(request: NextRequest, { params }: { params: Promise<{ id: string }> }) {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const { id: organizationId } = await params
const url = new URL(request.url)
const availableOnly = url.searchParams.get('available') === 'true'
const memberId = url.searchParams.get('member')
// Verify user is a member of this organization
const memberEntry = await db
.select()
.from(member)
.where(and(eq(member.organizationId, organizationId), eq(member.userId, session.user.id)))
.limit(1)
if (memberEntry.length === 0) {
return NextResponse.json(
{
error: 'Forbidden - Not a member of this organization',
},
{ status: 403 }
)
}
const userRole = memberEntry[0].role
const hasAdminAccess = ['owner', 'admin'].includes(userRole)
if (availableOnly) {
// Get workspaces where user has admin permissions (can invite others)
const availableWorkspaces = await db
.select({
id: workspace.id,
name: workspace.name,
ownerId: workspace.ownerId,
createdAt: workspace.createdAt,
isOwner: eq(workspace.ownerId, session.user.id),
permissionType: permissions.permissionType,
})
.from(workspace)
.leftJoin(
permissions,
and(
eq(permissions.entityType, 'workspace'),
eq(permissions.entityId, workspace.id),
eq(permissions.userId, session.user.id)
)
)
.where(
or(
// User owns the workspace
eq(workspace.ownerId, session.user.id),
// User has admin permission on the workspace
and(
eq(permissions.userId, session.user.id),
eq(permissions.entityType, 'workspace'),
eq(permissions.permissionType, 'admin')
)
)
)
// Filter and format the results
const workspacesWithInvitePermission = availableWorkspaces
.filter((workspace) => {
// Include if user owns the workspace OR has admin permission
return workspace.isOwner || workspace.permissionType === 'admin'
})
.map((workspace) => ({
id: workspace.id,
name: workspace.name,
isOwner: workspace.isOwner,
canInvite: true, // All returned workspaces have invite permission
createdAt: workspace.createdAt,
}))
logger.info('Retrieved available workspaces for organization member', {
organizationId,
userId: session.user.id,
workspaceCount: workspacesWithInvitePermission.length,
})
return NextResponse.json({
success: true,
data: {
workspaces: workspacesWithInvitePermission,
totalCount: workspacesWithInvitePermission.length,
filter: 'available',
},
})
}
if (memberId && hasAdminAccess) {
// Get workspaces where specific member has access (admin only)
const memberWorkspaces = await db
.select({
id: workspace.id,
name: workspace.name,
ownerId: workspace.ownerId,
isOwner: eq(workspace.ownerId, memberId),
permissionType: permissions.permissionType,
createdAt: permissions.createdAt,
})
.from(workspace)
.leftJoin(
permissions,
and(
eq(permissions.entityType, 'workspace'),
eq(permissions.entityId, workspace.id),
eq(permissions.userId, memberId)
)
)
.where(
or(
// Member owns the workspace
eq(workspace.ownerId, memberId),
// Member has permissions on the workspace
and(eq(permissions.userId, memberId), eq(permissions.entityType, 'workspace'))
)
)
const formattedWorkspaces = memberWorkspaces.map((workspace) => ({
id: workspace.id,
name: workspace.name,
isOwner: workspace.isOwner,
permission: workspace.permissionType,
joinedAt: workspace.createdAt,
createdAt: workspace.createdAt,
}))
return NextResponse.json({
success: true,
data: {
workspaces: formattedWorkspaces,
totalCount: formattedWorkspaces.length,
filter: 'member',
memberId,
},
})
}
// Default: Get all workspaces (basic info only for regular members)
if (!hasAdminAccess) {
return NextResponse.json({
success: true,
data: {
workspaces: [],
totalCount: 0,
message: 'Workspace access information is only available to organization admins',
},
})
}
// For admins: Get summary of all workspaces
const allWorkspaces = await db
.select({
id: workspace.id,
name: workspace.name,
ownerId: workspace.ownerId,
createdAt: workspace.createdAt,
ownerName: user.name,
})
.from(workspace)
.leftJoin(user, eq(workspace.ownerId, user.id))
return NextResponse.json({
success: true,
data: {
workspaces: allWorkspaces,
totalCount: allWorkspaces.length,
filter: 'all',
},
userRole,
hasAdminAccess,
})
} catch (error) {
logger.error('Failed to get organization workspaces', { error })
return NextResponse.json(
{
error: 'Internal server error',
},
{ status: 500 }
)
}
}

View File

@@ -0,0 +1,257 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { checkInternalAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request'
import { processSingleFileToUserFile } from '@/lib/uploads/utils/file-utils'
import { downloadFileFromStorage } from '@/lib/uploads/utils/file-utils.server'
export const dynamic = 'force-dynamic'
const logger = createLogger('SupabaseStorageUploadAPI')
const SupabaseStorageUploadSchema = z.object({
projectId: z.string().min(1, 'Project ID is required'),
apiKey: z.string().min(1, 'API key is required'),
bucket: z.string().min(1, 'Bucket name is required'),
fileName: z.string().min(1, 'File name is required'),
path: z.string().optional().nullable(),
fileData: z.any(),
contentType: z.string().optional().nullable(),
upsert: z.boolean().optional().default(false),
})
export async function POST(request: NextRequest) {
const requestId = generateRequestId()
try {
const authResult = await checkInternalAuth(request, { requireWorkflowId: false })
if (!authResult.success) {
logger.warn(
`[${requestId}] Unauthorized Supabase storage upload attempt: ${authResult.error}`
)
return NextResponse.json(
{
success: false,
error: authResult.error || 'Authentication required',
},
{ status: 401 }
)
}
logger.info(
`[${requestId}] Authenticated Supabase storage upload request via ${authResult.authType}`,
{
userId: authResult.userId,
}
)
const body = await request.json()
const validatedData = SupabaseStorageUploadSchema.parse(body)
const fileData = validatedData.fileData
const isStringInput = typeof fileData === 'string'
logger.info(`[${requestId}] Uploading to Supabase Storage`, {
bucket: validatedData.bucket,
fileName: validatedData.fileName,
path: validatedData.path,
fileDataType: isStringInput ? 'string' : 'object',
})
if (!fileData) {
return NextResponse.json(
{
success: false,
error: 'fileData is required',
},
{ status: 400 }
)
}
let uploadBody: Buffer
let uploadContentType: string | undefined
if (isStringInput) {
let content = fileData as string
const dataUrlMatch = content.match(/^data:([^;]+);base64,(.+)$/s)
if (dataUrlMatch) {
const [, mimeType, base64Data] = dataUrlMatch
content = base64Data
if (!validatedData.contentType) {
uploadContentType = mimeType
}
logger.info(`[${requestId}] Extracted base64 from data URL (MIME: ${mimeType})`)
}
const cleanedContent = content.replace(/[\s\r\n]/g, '')
const isLikelyBase64 = /^[A-Za-z0-9+/]*={0,2}$/.test(cleanedContent)
if (isLikelyBase64 && cleanedContent.length >= 4) {
try {
uploadBody = Buffer.from(cleanedContent, 'base64')
const expectedMinSize = Math.floor(cleanedContent.length * 0.7)
const expectedMaxSize = Math.ceil(cleanedContent.length * 0.8)
if (
uploadBody.length >= expectedMinSize &&
uploadBody.length <= expectedMaxSize &&
uploadBody.length > 0
) {
logger.info(
`[${requestId}] Decoded base64 content: ${cleanedContent.length} chars -> ${uploadBody.length} bytes`
)
} else {
const reEncoded = uploadBody.toString('base64')
if (reEncoded !== cleanedContent) {
logger.info(
`[${requestId}] Content looked like base64 but re-encoding didn't match, using as plain text`
)
uploadBody = Buffer.from(content, 'utf-8')
} else {
logger.info(
`[${requestId}] Decoded base64 content (verified): ${uploadBody.length} bytes`
)
}
}
} catch (decodeError) {
logger.info(
`[${requestId}] Failed to decode as base64, using as plain text: ${decodeError}`
)
uploadBody = Buffer.from(content, 'utf-8')
}
} else {
uploadBody = Buffer.from(content, 'utf-8')
logger.info(`[${requestId}] Using content as plain text (${uploadBody.length} bytes)`)
}
uploadContentType =
uploadContentType || validatedData.contentType || 'application/octet-stream'
} else {
const rawFile = fileData
logger.info(`[${requestId}] Processing file object: ${rawFile.name || 'unknown'}`)
let userFile
try {
userFile = processSingleFileToUserFile(rawFile, requestId, logger)
} catch (error) {
return NextResponse.json(
{
success: false,
error: error instanceof Error ? error.message : 'Failed to process file',
},
{ status: 400 }
)
}
const buffer = await downloadFileFromStorage(userFile, requestId, logger)
uploadBody = buffer
uploadContentType = validatedData.contentType || userFile.type || 'application/octet-stream'
}
let fullPath = validatedData.fileName
if (validatedData.path) {
const folderPath = validatedData.path.endsWith('/')
? validatedData.path
: `${validatedData.path}/`
fullPath = `${folderPath}${validatedData.fileName}`
}
const supabaseUrl = `https://${validatedData.projectId}.supabase.co/storage/v1/object/${validatedData.bucket}/${fullPath}`
const headers: Record<string, string> = {
apikey: validatedData.apiKey,
Authorization: `Bearer ${validatedData.apiKey}`,
'Content-Type': uploadContentType,
}
if (validatedData.upsert) {
headers['x-upsert'] = 'true'
}
logger.info(`[${requestId}] Sending to Supabase: ${supabaseUrl}`, {
contentType: uploadContentType,
bodySize: uploadBody.length,
upsert: validatedData.upsert,
})
const response = await fetch(supabaseUrl, {
method: 'POST',
headers,
body: new Uint8Array(uploadBody),
})
if (!response.ok) {
const errorText = await response.text()
let errorData
try {
errorData = JSON.parse(errorText)
} catch {
errorData = { message: errorText }
}
logger.error(`[${requestId}] Supabase Storage upload failed:`, {
status: response.status,
statusText: response.statusText,
error: errorData,
})
return NextResponse.json(
{
success: false,
error: errorData.message || errorData.error || `Upload failed: ${response.statusText}`,
details: errorData,
},
{ status: response.status }
)
}
const result = await response.json()
logger.info(`[${requestId}] File uploaded successfully to Supabase Storage`, {
bucket: validatedData.bucket,
path: fullPath,
})
const publicUrl = `https://${validatedData.projectId}.supabase.co/storage/v1/object/public/${validatedData.bucket}/${fullPath}`
return NextResponse.json({
success: true,
output: {
message: 'Successfully uploaded file to storage',
results: {
...result,
path: fullPath,
bucket: validatedData.bucket,
publicUrl,
},
},
})
} catch (error) {
if (error instanceof z.ZodError) {
logger.warn(`[${requestId}] Invalid request data`, { errors: error.errors })
return NextResponse.json(
{
success: false,
error: 'Invalid request data',
details: error.errors,
},
{ status: 400 }
)
}
logger.error(`[${requestId}] Error uploading to Supabase Storage:`, error)
return NextResponse.json(
{
success: false,
error: error instanceof Error ? error.message : 'Internal server error',
},
{ status: 500 }
)
}
}

View File

@@ -1,379 +0,0 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { checkInternalAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { StorageService } from '@/lib/uploads'
import {
extractStorageKey,
inferContextFromKey,
isInternalFileUrl,
} from '@/lib/uploads/utils/file-utils'
import { verifyFileAccess } from '@/app/api/files/authorization'
export const dynamic = 'force-dynamic'
const logger = createLogger('SupabaseStorageUploadAPI')
const SupabaseStorageUploadSchema = z.object({
apiKey: z.string().min(1, 'API key is required'),
projectId: z.string().min(1, 'Project ID is required'),
bucket: z.string().min(1, 'Bucket name is required'),
fileName: z.string().min(1, 'File name is required'),
path: z.string().optional().nullable(),
fileUpload: z
.object({
name: z.string().optional(),
type: z.string().optional(),
url: z.string().optional(),
path: z.string().optional(),
})
.optional()
.nullable(),
fileContent: z.string().optional().nullable(),
contentType: z.string().optional().nullable(),
upsert: z.boolean().optional().default(false),
})
/**
* Detects if a string is base64 encoded and decodes it to a Buffer.
* Handles both standard base64 and base64url encoding.
*/
function decodeBase64ToBuffer(content: string): Buffer {
// Remove data URI prefix if present (e.g., "data:application/pdf;base64,")
const base64Content = content.includes(',') ? content.split(',')[1] : content
// Convert base64url to standard base64 if needed
let normalizedBase64 = base64Content
if (base64Content.includes('-') || base64Content.includes('_')) {
normalizedBase64 = base64Content.replace(/-/g, '+').replace(/_/g, '/')
}
// Add padding if necessary
const padding = normalizedBase64.length % 4
if (padding > 0) {
normalizedBase64 += '='.repeat(4 - padding)
}
return Buffer.from(normalizedBase64, 'base64')
}
/**
* Checks if a string appears to be base64 encoded.
*/
function isBase64(str: string): boolean {
// Remove data URI prefix if present
const content = str.includes(',') ? str.split(',')[1] : str
// Check if it matches base64 pattern (including base64url)
const base64Regex = /^[A-Za-z0-9+/_-]*={0,2}$/
if (!base64Regex.test(content)) {
return false
}
// Additional heuristic: base64 strings are typically longer and don't contain spaces
if (content.length < 4 || content.includes(' ')) {
return false
}
// Try to decode and check if it produces valid bytes
try {
const decoded = decodeBase64ToBuffer(str)
// If decoded length is significantly smaller than input, it's likely base64
return decoded.length < content.length
} catch {
return false
}
}
/**
* Infer content type from file extension
*/
function inferContentType(fileName: string): string {
const ext = fileName.split('.').pop()?.toLowerCase()
const mimeTypes: Record<string, string> = {
pdf: 'application/pdf',
png: 'image/png',
jpg: 'image/jpeg',
jpeg: 'image/jpeg',
gif: 'image/gif',
webp: 'image/webp',
svg: 'image/svg+xml',
txt: 'text/plain',
html: 'text/html',
css: 'text/css',
js: 'application/javascript',
json: 'application/json',
xml: 'application/xml',
zip: 'application/zip',
doc: 'application/msword',
docx: 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
xls: 'application/vnd.ms-excel',
xlsx: 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
ppt: 'application/vnd.ms-powerpoint',
pptx: 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
mp3: 'audio/mpeg',
mp4: 'video/mp4',
wav: 'audio/wav',
csv: 'text/csv',
}
return mimeTypes[ext || ''] || 'application/octet-stream'
}
export async function POST(request: NextRequest) {
const requestId = generateRequestId()
try {
const authResult = await checkInternalAuth(request, { requireWorkflowId: false })
if (!authResult.success || !authResult.userId) {
logger.warn(
`[${requestId}] Unauthorized Supabase storage upload attempt: ${authResult.error}`
)
return NextResponse.json(
{
success: false,
error: authResult.error || 'Authentication required',
},
{ status: 401 }
)
}
const userId = authResult.userId
logger.info(
`[${requestId}] Authenticated Supabase storage upload request via ${authResult.authType}`,
{ userId }
)
const body = await request.json()
const validatedData = SupabaseStorageUploadSchema.parse(body)
// Build the full file path
let fullPath = validatedData.fileName
if (validatedData.path) {
const folderPath = validatedData.path.endsWith('/')
? validatedData.path
: `${validatedData.path}/`
fullPath = `${folderPath}${validatedData.fileName}`
}
logger.info(`[${requestId}] Uploading to Supabase Storage`, {
projectId: validatedData.projectId,
bucket: validatedData.bucket,
path: fullPath,
upsert: validatedData.upsert,
hasFileUpload: !!validatedData.fileUpload,
hasFileContent: !!validatedData.fileContent,
})
// Determine content type
let contentType = validatedData.contentType
if (!contentType && validatedData.fileUpload?.type) {
contentType = validatedData.fileUpload.type
}
if (!contentType) {
contentType = inferContentType(validatedData.fileName)
}
// Get the file content - either from fileUpload (internal storage) or fileContent (base64)
let uploadBody: Buffer
if (validatedData.fileUpload) {
// Handle file upload from internal storage
const fileUrl = validatedData.fileUpload.url || validatedData.fileUpload.path
if (!fileUrl) {
return NextResponse.json(
{
success: false,
error: 'File upload is missing URL or path',
},
{ status: 400 }
)
}
logger.info(`[${requestId}] Processing file upload from: ${fileUrl}`)
// Check if it's an internal file URL (workspace file)
if (isInternalFileUrl(fileUrl)) {
try {
const storageKey = extractStorageKey(fileUrl)
const context = inferContextFromKey(storageKey)
const hasAccess = await verifyFileAccess(storageKey, userId, undefined, context, false)
if (!hasAccess) {
logger.warn(`[${requestId}] Unauthorized file access attempt`, {
userId,
key: storageKey,
context,
})
return NextResponse.json(
{
success: false,
error: 'File not found or access denied',
},
{ status: 404 }
)
}
// Download file from internal storage
const fileBuffer = await StorageService.downloadFile({ key: storageKey, context })
uploadBody = Buffer.from(fileBuffer)
logger.info(
`[${requestId}] Downloaded file from internal storage: ${fileBuffer.byteLength} bytes`
)
} catch (error) {
logger.error(`[${requestId}] Failed to download from internal storage:`, error)
return NextResponse.json(
{
success: false,
error: 'Failed to access uploaded file',
},
{ status: 500 }
)
}
} else {
// External URL - fetch the file
let fetchUrl = fileUrl
if (fetchUrl.startsWith('/')) {
const baseUrl = getBaseUrl()
fetchUrl = `${baseUrl}${fetchUrl}`
}
try {
const response = await fetch(fetchUrl)
if (!response.ok) {
throw new Error(`Failed to fetch file: ${response.status} ${response.statusText}`)
}
const arrayBuffer = await response.arrayBuffer()
uploadBody = Buffer.from(arrayBuffer)
logger.info(`[${requestId}] Downloaded file from URL: ${uploadBody.length} bytes`)
} catch (error) {
logger.error(`[${requestId}] Failed to fetch file from URL:`, error)
return NextResponse.json(
{
success: false,
error: `Failed to fetch file from URL: ${error instanceof Error ? error.message : 'Unknown error'}`,
},
{ status: 500 }
)
}
}
} else if (validatedData.fileContent) {
// Handle direct file content (base64 or plain text)
if (isBase64(validatedData.fileContent)) {
logger.info(`[${requestId}] Detected base64 content, decoding to binary`)
uploadBody = decodeBase64ToBuffer(validatedData.fileContent)
} else {
logger.info(`[${requestId}] Using plain text content`)
uploadBody = Buffer.from(validatedData.fileContent, 'utf-8')
}
} else {
return NextResponse.json(
{
success: false,
error: 'Either fileUpload or fileContent is required',
},
{ status: 400 }
)
}
logger.info(`[${requestId}] Upload body size: ${uploadBody.length} bytes`)
// Build Supabase Storage URL
const supabaseUrl = `https://${validatedData.projectId}.supabase.co/storage/v1/object/${validatedData.bucket}/${fullPath}`
// Build headers
const headers: Record<string, string> = {
apikey: validatedData.apiKey,
Authorization: `Bearer ${validatedData.apiKey}`,
'Content-Type': contentType,
}
if (validatedData.upsert) {
headers['x-upsert'] = 'true'
}
// Make the request to Supabase Storage
// Convert Buffer to Uint8Array for fetch compatibility
const response = await fetch(supabaseUrl, {
method: 'POST',
headers,
body: new Uint8Array(uploadBody),
})
if (!response.ok) {
let errorData: any
try {
errorData = await response.json()
} catch {
errorData = await response.text()
}
logger.error(`[${requestId}] Supabase Storage upload failed`, {
status: response.status,
statusText: response.statusText,
error: errorData,
})
return NextResponse.json(
{
success: false,
error:
typeof errorData === 'object' && errorData.message
? errorData.message
: `Upload failed: ${response.status} ${response.statusText}`,
},
{ status: response.status }
)
}
const result = await response.json()
logger.info(`[${requestId}] File uploaded successfully to Supabase Storage`, {
bucket: validatedData.bucket,
path: fullPath,
})
// Build public URL for reference
const publicUrl = `https://${validatedData.projectId}.supabase.co/storage/v1/object/public/${validatedData.bucket}/${fullPath}`
return NextResponse.json({
success: true,
output: {
message: 'Successfully uploaded file to storage',
results: {
...result,
publicUrl,
bucket: validatedData.bucket,
path: fullPath,
},
},
})
} catch (error) {
if (error instanceof z.ZodError) {
logger.warn(`[${requestId}] Invalid request data`, { errors: error.errors })
return NextResponse.json(
{
success: false,
error: 'Invalid request data',
details: error.errors,
},
{ status: 400 }
)
}
logger.error(`[${requestId}] Error uploading to Supabase Storage:`, error)
return NextResponse.json(
{
success: false,
error: error instanceof Error ? error.message : 'Internal server error',
},
{ status: 500 }
)
}
}

View File

@@ -338,6 +338,11 @@ const arePropsEqual = (prevProps: SubBlockProps, nextProps: SubBlockProps): bool
const configEqual = const configEqual =
prevProps.config.id === nextProps.config.id && prevProps.config.type === nextProps.config.type prevProps.config.id === nextProps.config.id && prevProps.config.type === nextProps.config.type
const canonicalToggleEqual =
!!prevProps.canonicalToggle === !!nextProps.canonicalToggle &&
prevProps.canonicalToggle?.mode === nextProps.canonicalToggle?.mode &&
prevProps.canonicalToggle?.disabled === nextProps.canonicalToggle?.disabled
return ( return (
prevProps.blockId === nextProps.blockId && prevProps.blockId === nextProps.blockId &&
configEqual && configEqual &&
@@ -346,8 +351,7 @@ const arePropsEqual = (prevProps: SubBlockProps, nextProps: SubBlockProps): bool
prevProps.disabled === nextProps.disabled && prevProps.disabled === nextProps.disabled &&
prevProps.fieldDiffStatus === nextProps.fieldDiffStatus && prevProps.fieldDiffStatus === nextProps.fieldDiffStatus &&
prevProps.allowExpandInPreview === nextProps.allowExpandInPreview && prevProps.allowExpandInPreview === nextProps.allowExpandInPreview &&
prevProps.canonicalToggle?.mode === nextProps.canonicalToggle?.mode && canonicalToggleEqual
prevProps.canonicalToggle?.disabled === nextProps.canonicalToggle?.disabled
) )
} }

View File

@@ -214,15 +214,6 @@ export const A2ABlock: BlockConfig<A2AResponse> = {
], ],
config: { config: {
tool: (params) => params.operation as string, tool: (params) => params.operation as string,
params: (params) => {
const { fileUpload, fileReference, ...rest } = params
const hasFileUpload = Array.isArray(fileUpload) ? fileUpload.length > 0 : !!fileUpload
const files = hasFileUpload ? fileUpload : fileReference
return {
...rest,
...(files ? { files } : {}),
}
},
}, },
}, },
inputs: { inputs: {

View File

@@ -661,12 +661,25 @@ Return ONLY the PostgREST filter expression - no explanations, no markdown, no e
placeholder: 'folder/subfolder/', placeholder: 'folder/subfolder/',
condition: { field: 'operation', value: 'storage_upload' }, condition: { field: 'operation', value: 'storage_upload' },
}, },
{
id: 'file',
title: 'File',
type: 'file-upload',
canonicalParamId: 'fileData',
placeholder: 'Upload file to storage',
condition: { field: 'operation', value: 'storage_upload' },
mode: 'basic',
multiple: false,
required: true,
},
{ {
id: 'fileContent', id: 'fileContent',
title: 'File Content', title: 'File Content',
type: 'code', type: 'code',
canonicalParamId: 'fileData',
placeholder: 'Base64 encoded for binary files, or plain text', placeholder: 'Base64 encoded for binary files, or plain text',
condition: { field: 'operation', value: 'storage_upload' }, condition: { field: 'operation', value: 'storage_upload' },
mode: 'advanced',
required: true, required: true,
}, },
{ {

View File

@@ -680,6 +680,10 @@ export function useCollaborativeWorkflow() {
previousPositions?: Map<string, { x: number; y: number; parentId?: string }> previousPositions?: Map<string, { x: number; y: number; parentId?: string }>
} }
) => { ) => {
if (isBaselineDiffView) {
return
}
if (!isInActiveRoom()) { if (!isInActiveRoom()) {
logger.debug('Skipping batch position update - not in active workflow') logger.debug('Skipping batch position update - not in active workflow')
return return
@@ -725,7 +729,7 @@ export function useCollaborativeWorkflow() {
} }
} }
}, },
[addToQueue, activeWorkflowId, session?.user?.id, isInActiveRoom, undoRedo] [isBaselineDiffView, addToQueue, activeWorkflowId, session?.user?.id, isInActiveRoom, undoRedo]
) )
const collaborativeUpdateBlockName = useCallback( const collaborativeUpdateBlockName = useCallback(
@@ -817,6 +821,10 @@ export function useCollaborativeWorkflow() {
const collaborativeBatchToggleBlockEnabled = useCallback( const collaborativeBatchToggleBlockEnabled = useCallback(
(ids: string[]) => { (ids: string[]) => {
if (isBaselineDiffView) {
return
}
if (ids.length === 0) return if (ids.length === 0) return
const previousStates: Record<string, boolean> = {} const previousStates: Record<string, boolean> = {}
@@ -849,7 +857,7 @@ export function useCollaborativeWorkflow() {
undoRedo.recordBatchToggleEnabled(validIds, previousStates) undoRedo.recordBatchToggleEnabled(validIds, previousStates)
}, },
[addToQueue, activeWorkflowId, session?.user?.id, undoRedo] [isBaselineDiffView, addToQueue, activeWorkflowId, session?.user?.id, undoRedo]
) )
const collaborativeBatchUpdateParent = useCallback( const collaborativeBatchUpdateParent = useCallback(
@@ -861,6 +869,10 @@ export function useCollaborativeWorkflow() {
affectedEdges: Edge[] affectedEdges: Edge[]
}> }>
) => { ) => {
if (isBaselineDiffView) {
return
}
if (!isInActiveRoom()) { if (!isInActiveRoom()) {
logger.debug('Skipping batch update parent - not in active workflow') logger.debug('Skipping batch update parent - not in active workflow')
return return
@@ -931,7 +943,7 @@ export function useCollaborativeWorkflow() {
logger.debug('Batch updated parent for blocks', { updateCount: updates.length }) logger.debug('Batch updated parent for blocks', { updateCount: updates.length })
}, },
[isInActiveRoom, undoRedo, addToQueue, activeWorkflowId, session?.user?.id] [isBaselineDiffView, isInActiveRoom, undoRedo, addToQueue, activeWorkflowId, session?.user?.id]
) )
const collaborativeToggleBlockAdvancedMode = useCallback( const collaborativeToggleBlockAdvancedMode = useCallback(
@@ -951,18 +963,37 @@ export function useCollaborativeWorkflow() {
const collaborativeSetBlockCanonicalMode = useCallback( const collaborativeSetBlockCanonicalMode = useCallback(
(id: string, canonicalId: string, canonicalMode: 'basic' | 'advanced') => { (id: string, canonicalId: string, canonicalMode: 'basic' | 'advanced') => {
executeQueuedOperation( if (isBaselineDiffView) {
BLOCK_OPERATIONS.UPDATE_CANONICAL_MODE, return
OPERATION_TARGETS.BLOCK, }
{ id, canonicalId, canonicalMode },
() => useWorkflowStore.getState().setBlockCanonicalMode(id, canonicalId, canonicalMode) useWorkflowStore.getState().setBlockCanonicalMode(id, canonicalId, canonicalMode)
)
if (!activeWorkflowId) {
return
}
const operationId = crypto.randomUUID()
addToQueue({
id: operationId,
operation: {
operation: BLOCK_OPERATIONS.UPDATE_CANONICAL_MODE,
target: OPERATION_TARGETS.BLOCK,
payload: { id, canonicalId, canonicalMode },
},
workflowId: activeWorkflowId,
userId: session?.user?.id || 'unknown',
})
}, },
[executeQueuedOperation] [isBaselineDiffView, activeWorkflowId, addToQueue, session?.user?.id]
) )
const collaborativeBatchToggleBlockHandles = useCallback( const collaborativeBatchToggleBlockHandles = useCallback(
(ids: string[]) => { (ids: string[]) => {
if (isBaselineDiffView) {
return
}
if (ids.length === 0) return if (ids.length === 0) return
const previousStates: Record<string, boolean> = {} const previousStates: Record<string, boolean> = {}
@@ -995,11 +1026,15 @@ export function useCollaborativeWorkflow() {
undoRedo.recordBatchToggleHandles(validIds, previousStates) undoRedo.recordBatchToggleHandles(validIds, previousStates)
}, },
[addToQueue, activeWorkflowId, session?.user?.id, undoRedo] [isBaselineDiffView, addToQueue, activeWorkflowId, session?.user?.id, undoRedo]
) )
const collaborativeBatchAddEdges = useCallback( const collaborativeBatchAddEdges = useCallback(
(edges: Edge[], options?: { skipUndoRedo?: boolean }) => { (edges: Edge[], options?: { skipUndoRedo?: boolean }) => {
if (isBaselineDiffView) {
return false
}
if (!isInActiveRoom()) { if (!isInActiveRoom()) {
logger.debug('Skipping batch add edges - not in active workflow') logger.debug('Skipping batch add edges - not in active workflow')
return false return false
@@ -1035,11 +1070,15 @@ export function useCollaborativeWorkflow() {
return true return true
}, },
[addToQueue, activeWorkflowId, session?.user?.id, isInActiveRoom, undoRedo] [isBaselineDiffView, addToQueue, activeWorkflowId, session?.user?.id, isInActiveRoom, undoRedo]
) )
const collaborativeBatchRemoveEdges = useCallback( const collaborativeBatchRemoveEdges = useCallback(
(edgeIds: string[], options?: { skipUndoRedo?: boolean }) => { (edgeIds: string[], options?: { skipUndoRedo?: boolean }) => {
if (isBaselineDiffView) {
return false
}
if (!isInActiveRoom()) { if (!isInActiveRoom()) {
logger.debug('Skipping batch remove edges - not in active workflow') logger.debug('Skipping batch remove edges - not in active workflow')
return false return false
@@ -1089,7 +1128,7 @@ export function useCollaborativeWorkflow() {
logger.info('Batch removed edges', { count: validEdgeIds.length }) logger.info('Batch removed edges', { count: validEdgeIds.length })
return true return true
}, },
[isInActiveRoom, addToQueue, activeWorkflowId, session, undoRedo] [isBaselineDiffView, isInActiveRoom, addToQueue, activeWorkflowId, session, undoRedo]
) )
const collaborativeSetSubblockValue = useCallback( const collaborativeSetSubblockValue = useCallback(
@@ -1165,6 +1204,10 @@ export function useCollaborativeWorkflow() {
(blockId: string, subblockId: string, value: any) => { (blockId: string, subblockId: string, value: any) => {
if (isApplyingRemoteChange.current) return if (isApplyingRemoteChange.current) return
if (isBaselineDiffView) {
return
}
if (!isInActiveRoom()) { if (!isInActiveRoom()) {
logger.debug('Skipping tag selection - not in active workflow', { logger.debug('Skipping tag selection - not in active workflow', {
currentWorkflowId, currentWorkflowId,
@@ -1192,7 +1235,14 @@ export function useCollaborativeWorkflow() {
userId: session?.user?.id || 'unknown', userId: session?.user?.id || 'unknown',
}) })
}, },
[addToQueue, currentWorkflowId, activeWorkflowId, session?.user?.id, isInActiveRoom] [
isBaselineDiffView,
addToQueue,
currentWorkflowId,
activeWorkflowId,
session?.user?.id,
isInActiveRoom,
]
) )
const collaborativeUpdateLoopType = useCallback( const collaborativeUpdateLoopType = useCallback(
@@ -1538,6 +1588,10 @@ export function useCollaborativeWorkflow() {
const collaborativeBatchRemoveBlocks = useCallback( const collaborativeBatchRemoveBlocks = useCallback(
(blockIds: string[], options?: { skipUndoRedo?: boolean }) => { (blockIds: string[], options?: { skipUndoRedo?: boolean }) => {
if (isBaselineDiffView) {
return false
}
if (!isInActiveRoom()) { if (!isInActiveRoom()) {
logger.debug('Skipping batch remove blocks - not in active workflow') logger.debug('Skipping batch remove blocks - not in active workflow')
return false return false
@@ -1619,6 +1673,7 @@ export function useCollaborativeWorkflow() {
return true return true
}, },
[ [
isBaselineDiffView,
addToQueue, addToQueue,
activeWorkflowId, activeWorkflowId,
session?.user?.id, session?.user?.id,

View File

@@ -8,17 +8,6 @@ const logger = createLogger('EmbeddingUtils')
const MAX_TOKENS_PER_REQUEST = 8000 const MAX_TOKENS_PER_REQUEST = 8000
const MAX_CONCURRENT_BATCHES = env.KB_CONFIG_CONCURRENCY_LIMIT || 50 const MAX_CONCURRENT_BATCHES = env.KB_CONFIG_CONCURRENCY_LIMIT || 50
const EMBEDDING_DIMENSIONS = 1536
/**
* Check if the model supports custom dimensions.
* text-embedding-3-* models support the dimensions parameter.
* Checks for 'embedding-3' to handle Azure deployments with custom naming conventions.
*/
function supportsCustomDimensions(modelName: string): boolean {
const name = modelName.toLowerCase()
return name.includes('embedding-3') && !name.includes('ada')
}
export class EmbeddingAPIError extends Error { export class EmbeddingAPIError extends Error {
public status: number public status: number
@@ -104,19 +93,15 @@ async function getEmbeddingConfig(
async function callEmbeddingAPI(inputs: string[], config: EmbeddingConfig): Promise<number[][]> { async function callEmbeddingAPI(inputs: string[], config: EmbeddingConfig): Promise<number[][]> {
return retryWithExponentialBackoff( return retryWithExponentialBackoff(
async () => { async () => {
const useDimensions = supportsCustomDimensions(config.modelName)
const requestBody = config.useAzure const requestBody = config.useAzure
? { ? {
input: inputs, input: inputs,
encoding_format: 'float', encoding_format: 'float',
...(useDimensions && { dimensions: EMBEDDING_DIMENSIONS }),
} }
: { : {
input: inputs, input: inputs,
model: config.modelName, model: config.modelName,
encoding_format: 'float', encoding_format: 'float',
...(useDimensions && { dimensions: EMBEDDING_DIMENSIONS }),
} }
const response = await fetch(config.apiUrl, { const response = await fetch(config.apiUrl, {

View File

@@ -18,52 +18,6 @@ const logger = createLogger('BlobClient')
let _blobServiceClient: BlobServiceClientInstance | null = null let _blobServiceClient: BlobServiceClientInstance | null = null
interface ParsedCredentials {
accountName: string
accountKey: string
}
/**
* Extract account name and key from an Azure connection string.
* Connection strings have the format: DefaultEndpointsProtocol=https;AccountName=...;AccountKey=...;EndpointSuffix=...
*/
function parseConnectionString(connectionString: string): ParsedCredentials {
const accountNameMatch = connectionString.match(/AccountName=([^;]+)/)
if (!accountNameMatch) {
throw new Error('Cannot extract account name from connection string')
}
const accountKeyMatch = connectionString.match(/AccountKey=([^;]+)/)
if (!accountKeyMatch) {
throw new Error('Cannot extract account key from connection string')
}
return {
accountName: accountNameMatch[1],
accountKey: accountKeyMatch[1],
}
}
/**
* Get account credentials from BLOB_CONFIG, extracting from connection string if necessary.
*/
function getAccountCredentials(): ParsedCredentials {
if (BLOB_CONFIG.connectionString) {
return parseConnectionString(BLOB_CONFIG.connectionString)
}
if (BLOB_CONFIG.accountName && BLOB_CONFIG.accountKey) {
return {
accountName: BLOB_CONFIG.accountName,
accountKey: BLOB_CONFIG.accountKey,
}
}
throw new Error(
'Azure Blob Storage credentials are missing set AZURE_CONNECTION_STRING or both AZURE_ACCOUNT_NAME and AZURE_ACCOUNT_KEY'
)
}
export async function getBlobServiceClient(): Promise<BlobServiceClientInstance> { export async function getBlobServiceClient(): Promise<BlobServiceClientInstance> {
if (_blobServiceClient) return _blobServiceClient if (_blobServiceClient) return _blobServiceClient
@@ -173,8 +127,6 @@ export async function getPresignedUrl(key: string, expiresIn = 3600) {
const containerClient = blobServiceClient.getContainerClient(BLOB_CONFIG.containerName) const containerClient = blobServiceClient.getContainerClient(BLOB_CONFIG.containerName)
const blockBlobClient = containerClient.getBlockBlobClient(key) const blockBlobClient = containerClient.getBlockBlobClient(key)
const { accountName, accountKey } = getAccountCredentials()
const sasOptions = { const sasOptions = {
containerName: BLOB_CONFIG.containerName, containerName: BLOB_CONFIG.containerName,
blobName: key, blobName: key,
@@ -185,7 +137,13 @@ export async function getPresignedUrl(key: string, expiresIn = 3600) {
const sasToken = generateBlobSASQueryParameters( const sasToken = generateBlobSASQueryParameters(
sasOptions, sasOptions,
new StorageSharedKeyCredential(accountName, accountKey) new StorageSharedKeyCredential(
BLOB_CONFIG.accountName,
BLOB_CONFIG.accountKey ??
(() => {
throw new Error('AZURE_ACCOUNT_KEY is required when using account name authentication')
})()
)
).toString() ).toString()
return `${blockBlobClient.url}?${sasToken}` return `${blockBlobClient.url}?${sasToken}`
@@ -210,14 +168,9 @@ export async function getPresignedUrlWithConfig(
StorageSharedKeyCredential, StorageSharedKeyCredential,
} = await import('@azure/storage-blob') } = await import('@azure/storage-blob')
let tempBlobServiceClient: BlobServiceClientInstance let tempBlobServiceClient: BlobServiceClientInstance
let accountName: string
let accountKey: string
if (customConfig.connectionString) { if (customConfig.connectionString) {
tempBlobServiceClient = BlobServiceClient.fromConnectionString(customConfig.connectionString) tempBlobServiceClient = BlobServiceClient.fromConnectionString(customConfig.connectionString)
const credentials = parseConnectionString(customConfig.connectionString)
accountName = credentials.accountName
accountKey = credentials.accountKey
} else if (customConfig.accountName && customConfig.accountKey) { } else if (customConfig.accountName && customConfig.accountKey) {
const sharedKeyCredential = new StorageSharedKeyCredential( const sharedKeyCredential = new StorageSharedKeyCredential(
customConfig.accountName, customConfig.accountName,
@@ -227,8 +180,6 @@ export async function getPresignedUrlWithConfig(
`https://${customConfig.accountName}.blob.core.windows.net`, `https://${customConfig.accountName}.blob.core.windows.net`,
sharedKeyCredential sharedKeyCredential
) )
accountName = customConfig.accountName
accountKey = customConfig.accountKey
} else { } else {
throw new Error( throw new Error(
'Custom blob config must include either connectionString or accountName + accountKey' 'Custom blob config must include either connectionString or accountName + accountKey'
@@ -248,7 +199,13 @@ export async function getPresignedUrlWithConfig(
const sasToken = generateBlobSASQueryParameters( const sasToken = generateBlobSASQueryParameters(
sasOptions, sasOptions,
new StorageSharedKeyCredential(accountName, accountKey) new StorageSharedKeyCredential(
customConfig.accountName,
customConfig.accountKey ??
(() => {
throw new Error('Account key is required when using account name authentication')
})()
)
).toString() ).toString()
return `${blockBlobClient.url}?${sasToken}` return `${blockBlobClient.url}?${sasToken}`
@@ -446,9 +403,13 @@ export async function getMultipartPartUrls(
if (customConfig) { if (customConfig) {
if (customConfig.connectionString) { if (customConfig.connectionString) {
blobServiceClient = BlobServiceClient.fromConnectionString(customConfig.connectionString) blobServiceClient = BlobServiceClient.fromConnectionString(customConfig.connectionString)
const credentials = parseConnectionString(customConfig.connectionString) const match = customConfig.connectionString.match(/AccountName=([^;]+)/)
accountName = credentials.accountName if (!match) throw new Error('Cannot extract account name from connection string')
accountKey = credentials.accountKey accountName = match[1]
const keyMatch = customConfig.connectionString.match(/AccountKey=([^;]+)/)
if (!keyMatch) throw new Error('Cannot extract account key from connection string')
accountKey = keyMatch[1]
} else if (customConfig.accountName && customConfig.accountKey) { } else if (customConfig.accountName && customConfig.accountKey) {
const credential = new StorageSharedKeyCredential( const credential = new StorageSharedKeyCredential(
customConfig.accountName, customConfig.accountName,
@@ -467,9 +428,12 @@ export async function getMultipartPartUrls(
} else { } else {
blobServiceClient = await getBlobServiceClient() blobServiceClient = await getBlobServiceClient()
containerName = BLOB_CONFIG.containerName containerName = BLOB_CONFIG.containerName
const credentials = getAccountCredentials() accountName = BLOB_CONFIG.accountName
accountName = credentials.accountName accountKey =
accountKey = credentials.accountKey BLOB_CONFIG.accountKey ||
(() => {
throw new Error('AZURE_ACCOUNT_KEY is required')
})()
} }
const containerClient = blobServiceClient.getContainerClient(containerName) const containerClient = blobServiceClient.getContainerClient(containerName)
@@ -537,10 +501,12 @@ export async function completeMultipartUpload(
const containerClient = blobServiceClient.getContainerClient(containerName) const containerClient = blobServiceClient.getContainerClient(containerName)
const blockBlobClient = containerClient.getBlockBlobClient(key) const blockBlobClient = containerClient.getBlockBlobClient(key)
// Sort parts by part number and extract block IDs
const sortedBlockIds = parts const sortedBlockIds = parts
.sort((a, b) => a.partNumber - b.partNumber) .sort((a, b) => a.partNumber - b.partNumber)
.map((part) => part.blockId) .map((part) => part.blockId)
// Commit the block list to create the final blob
await blockBlobClient.commitBlockList(sortedBlockIds, { await blockBlobClient.commitBlockList(sortedBlockIds, {
metadata: { metadata: {
multipartUpload: 'completed', multipartUpload: 'completed',
@@ -591,8 +557,10 @@ export async function abortMultipartUpload(key: string, customConfig?: BlobConfi
const blockBlobClient = containerClient.getBlockBlobClient(key) const blockBlobClient = containerClient.getBlockBlobClient(key)
try { try {
// Delete the blob if it exists (this also cleans up any uncommitted blocks)
await blockBlobClient.deleteIfExists() await blockBlobClient.deleteIfExists()
} catch (error) { } catch (error) {
// Ignore errors since we're just cleaning up
logger.warn('Error cleaning up multipart upload:', error) logger.warn('Error cleaning up multipart upload:', error)
} }
} }

View File

@@ -27,6 +27,9 @@ export function registerEmitFunctions(
emitSubblockUpdate = subblockEmit emitSubblockUpdate = subblockEmit
emitVariableUpdate = variableEmit emitVariableUpdate = variableEmit
currentRegisteredWorkflowId = workflowId currentRegisteredWorkflowId = workflowId
if (workflowId) {
useOperationQueueStore.getState().processNextOperation()
}
} }
let currentRegisteredWorkflowId: string | null = null let currentRegisteredWorkflowId: string | null = null
@@ -262,16 +265,14 @@ export const useOperationQueueStore = create<OperationQueueState>((set, get) =>
return return
} }
const nextOperation = currentRegisteredWorkflowId if (!currentRegisteredWorkflowId) {
? state.operations.find(
(op) => op.status === 'pending' && op.workflowId === currentRegisteredWorkflowId
)
: state.operations.find((op) => op.status === 'pending')
if (!nextOperation) {
return return
} }
if (currentRegisteredWorkflowId && nextOperation.workflowId !== currentRegisteredWorkflowId) { const nextOperation = state.operations.find(
(op) => op.status === 'pending' && op.workflowId === currentRegisteredWorkflowId
)
if (!nextOperation) {
return return
} }

View File

@@ -38,11 +38,12 @@ export const storageUploadTool: ToolConfig<
visibility: 'user-or-llm', visibility: 'user-or-llm',
description: 'Optional folder path (e.g., "folder/subfolder/")', description: 'Optional folder path (e.g., "folder/subfolder/")',
}, },
fileContent: { fileData: {
type: 'string', type: 'json',
required: true, required: true,
visibility: 'user-or-llm', visibility: 'user-or-llm',
description: 'The file content (base64 encoded for binary files, or plain text)', description:
'File to upload - UserFile object (basic mode) or string content (advanced mode: base64 or plain text). Supports data URLs.',
}, },
contentType: { contentType: {
type: 'string', type: 'string',
@@ -65,65 +66,28 @@ export const storageUploadTool: ToolConfig<
}, },
request: { request: {
url: (params) => { url: '/api/tools/supabase/storage-upload',
// Combine folder path and fileName, ensuring proper formatting
let fullPath = params.fileName
if (params.path) {
// Ensure path ends with / and doesn't have double slashes
const folderPath = params.path.endsWith('/') ? params.path : `${params.path}/`
fullPath = `${folderPath}${params.fileName}`
}
return `https://${params.projectId}.supabase.co/storage/v1/object/${params.bucket}/${fullPath}`
},
method: 'POST', method: 'POST',
headers: (params) => { headers: () => ({
const headers: Record<string, string> = { 'Content-Type': 'application/json',
apikey: params.apiKey, }),
Authorization: `Bearer ${params.apiKey}`, body: (params) => ({
} projectId: params.projectId,
apiKey: params.apiKey,
if (params.contentType) { bucket: params.bucket,
headers['Content-Type'] = params.contentType fileName: params.fileName,
} path: params.path,
fileData: params.fileData,
if (params.upsert) { contentType: params.contentType,
headers['x-upsert'] = 'true' upsert: params.upsert,
} }),
return headers
},
body: (params) => {
// Return the file content wrapped in an object
// The actual upload will need to handle this appropriately
return {
content: params.fileContent,
}
},
},
transformResponse: async (response: Response) => {
let data
try {
data = await response.json()
} catch (parseError) {
throw new Error(`Failed to parse Supabase storage upload response: ${parseError}`)
}
return {
success: true,
output: {
message: 'Successfully uploaded file to storage',
results: data,
},
error: undefined,
}
}, },
outputs: { outputs: {
message: { type: 'string', description: 'Operation status message' }, message: { type: 'string', description: 'Operation status message' },
results: { results: {
type: 'object', type: 'object',
description: 'Upload result including file path and metadata', description: 'Upload result including file path, bucket, and public URL',
}, },
}, },
} }

View File

@@ -136,7 +136,7 @@ export interface SupabaseStorageUploadParams {
bucket: string bucket: string
fileName: string fileName: string
path?: string path?: string
fileContent: string fileData: any // UserFile object (basic mode) or string (advanced mode: base64/plain text)
contentType?: string contentType?: string
upsert?: boolean upsert?: boolean
} }

View File

@@ -52,7 +52,7 @@ services:
deploy: deploy:
resources: resources:
limits: limits:
memory: 1G memory: 8G
healthcheck: healthcheck:
test: ['CMD', 'wget', '--spider', '--quiet', 'http://127.0.0.1:3002/health'] test: ['CMD', 'wget', '--spider', '--quiet', 'http://127.0.0.1:3002/health']
interval: 90s interval: 90s

View File

@@ -56,7 +56,7 @@ services:
deploy: deploy:
resources: resources:
limits: limits:
memory: 1G memory: 8G
healthcheck: healthcheck:
test: ['CMD', 'wget', '--spider', '--quiet', 'http://127.0.0.1:3002/health'] test: ['CMD', 'wget', '--spider', '--quiet', 'http://127.0.0.1:3002/health']
interval: 90s interval: 90s

View File

@@ -42,7 +42,7 @@ services:
deploy: deploy:
resources: resources:
limits: limits:
memory: 1G memory: 4G
environment: environment:
- DATABASE_URL=postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@db:5432/${POSTGRES_DB:-simstudio} - DATABASE_URL=postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@db:5432/${POSTGRES_DB:-simstudio}
- NEXT_PUBLIC_APP_URL=${NEXT_PUBLIC_APP_URL:-http://localhost:3000} - NEXT_PUBLIC_APP_URL=${NEXT_PUBLIC_APP_URL:-http://localhost:3000}

View File

@@ -1,362 +0,0 @@
# Enterprise Self-Hosting FAQ Response
This document addresses common questions from enterprise customers regarding self-hosted Sim deployments.
---
## 1. Resource Requirements and Scalability
### What drives resource consumption?
Sim's resource requirements are driven by several memory-intensive components:
| Component | Memory Driver | Description |
|-----------|--------------|-------------|
| **Isolated-VM** | High | JavaScript sandboxing for secure workflow code execution. Each concurrent workflow maintains an execution context in memory. |
| **File Processing** | Medium-High | Documents (PDF, DOCX, XLSX, etc.) are parsed in-memory before chunking for knowledge base operations. |
| **pgvector Operations** | Medium | Vector database operations for embeddings (1536 dimensions per vector for knowledge base). |
| **FFmpeg** | Variable | Media transcoding for audio/video processing happens synchronously in memory. |
| **Sharp** | Low-Medium | Image processing and manipulation. |
### Actual Production Metrics
Based on production telemetry from our cloud deployment:
**Main Application (simstudio)**
| Metric | Average | Peak | Notes |
|--------|---------|------|-------|
| CPU | ~10% | ~30% | Spikes during workflow execution |
| Memory | ~35% | ~75% | Increases with concurrent workflows |
**WebSocket Server (realtime)**
| Metric | Average | Peak | Notes |
|--------|---------|------|-------|
| CPU | ~1-2% | ~30% | Very lightweight |
| Memory | ~7% | ~13% | Scales with connected clients |
### Recommended Resource Tiers
Based on actual production data (60k+ users), we recommend the following tiers:
#### Small (Development/Testing)
- **CPU**: 2 cores
- **RAM**: 12 GB
- **Storage**: 20 GB SSD
- **Use case**: 1-5 users, development, testing, light workloads
#### Standard (Teams)
- **CPU**: 4 cores
- **RAM**: 16 GB
- **Storage**: 50 GB SSD
- **Use case**: 5-50 users, moderate workflow execution
#### Production (Enterprise)
- **CPU**: 8+ cores
- **RAM**: 32+ GB
- **Storage**: 100+ GB SSD
- **Use case**: 50+ users, high availability, heavy workflow execution
- **Note**: Consider running multiple replicas for high availability
### Memory Breakdown (Standard Deployment)
| Component | Recommended | Notes |
|-----------|-------------|-------|
| Main App | 6-8 GB | Handles workflow execution, API, UI (peaks to 12 GB under heavy load) |
| WebSocket | 1 GB | Real-time updates (typically uses 300-500 MB) |
| PostgreSQL + pgvector | 2-4 GB | Database with vector extensions |
| OS/Buffer | 2-4 GB | System overhead, file cache |
| **Total** | **~12-16 GB** | |
### Scalability Considerations
- **Horizontal scaling**: The main app and WebSocket server are stateless and can be scaled horizontally with a load balancer.
- **Database**: PostgreSQL can be scaled vertically or replaced with managed services (Supabase, Neon, RDS).
- **Workflow concurrency**: Each concurrent workflow execution consumes additional memory. Plan for peak usage.
---
## 2. Managing Releases in Enterprise Environments
### Multi-Environment Strategy
For enterprise deployments requiring dev/staging/production environments, we recommend deploying **separate Sim instances** for each environment:
```
┌─────────────┐ ┌─────────────┐ ┌─────────────┐
│ Dev │ -> │ Staging │ -> │ Production │
│ Instance │ │ Instance │ │ Instance │
└─────────────┘ └─────────────┘ └─────────────┘
│ │ │
v v v
Develop Test/QA Deploy
```
**Advantages**:
- Complete isolation between environments
- Independent scaling per environment
- No risk of accidental production changes
- Environment-specific configurations and credentials
### Promoting Changes Between Environments
Sim provides multiple ways to move workflows, folders, and workspaces between environments:
#### UI-Based Export/Import
1. **Export** workflows, folders, or entire workspaces from the source environment via the UI
2. **Import** into the target environment
3. Configure environment-specific variables and credentials
#### Admin APIs (Automation)
For CI/CD integration, use the admin APIs to programmatically:
- Export workflows, folders, and workspaces as JSON
- Import configurations into target environments
- Automate promotion pipelines between dev → staging → production
### Version Control Within an Instance
Within a single Sim instance, the **Deploy Modal** provides version control:
1. **Draft Mode**: Edit and test workflows without affecting the live version
2. **Explicit Deploy**: The live version is **not updated** until you explicitly click Deploy
3. **Snapshots**: Each deployment creates a snapshot of the workflow state
4. **Rollback**: Revert to any previous version at any time with one click
This allows teams to:
- Safely iterate on workflows without disrupting production
- Test changes before making them live
- Quickly recover from issues by rolling back
---
## 3. Stable Releases and Backward Compatibility
### Versioning Strategy
Sim uses the following versioning scheme:
- **Major versions** (0.x): e.g., 0.5, 0.6 - New major features
- **Minor versions** (0.x.y): e.g., 0.5.1, 0.5.2 - Incremental updates, bug fixes
### Backward Compatibility Guarantees
**Forward upgrades are safe:**
- Changes are **additive** - new features don't break existing workflows
- We ensure no breaking changes between versions
- Breaking changes are announced in advance when necessary
- Database migrations are automatic and handle schema changes
**Rollbacks are not guaranteed:**
- Rolling back to an older version may break things due to database schema changes
- Always backup your database before upgrading
- If you need to rollback, restore from a database backup taken before the upgrade
### Upgrade Best Practices
1. **Backup first**: Always backup your database before upgrading
2. **Review release notes**: Check for any announced changes
3. **Test in staging**: Upgrade your staging environment first
4. **Monitor after upgrade**: Verify workflows continue to function correctly
### Enterprise Support
For enterprise customers requiring additional stability guarantees:
- Contact us for support arrangements
- We can provide guidance on upgrade planning
- Security patches are prioritized for supported versions
---
## 4. OAuth and OIDC Providers
### Built-in OAuth Providers (Environment Variables)
Only the following providers can be configured via environment variables:
| Provider | Environment Variables |
|----------|----------------------|
| **GitHub** | `GITHUB_CLIENT_ID`, `GITHUB_CLIENT_SECRET` |
| **Google** | `GOOGLE_CLIENT_ID`, `GOOGLE_CLIENT_SECRET` |
There are no plans to add additional OAuth providers via environment variables.
### All Other Identity Providers (SSO)
For any other identity providers, configure SSO through the app settings:
1. Enable SSO in environment variables:
```
SSO_ENABLED=true
NEXT_PUBLIC_SSO_ENABLED=true
```
2. Configure your identity provider in the app's SSO settings UI
Supported protocols:
- SAML 2.0
- OpenID Connect (OIDC)
Compatible with any OIDC/SAML provider including:
- Okta
- Azure AD / Entra ID
- Auth0
- Ping Identity
- OneLogin
- Custom OIDC providers
---
## 5. Known Issues and Workarounds
### SSO Save Button Disabled
**Issue**: The 'Save' button remains disabled when configuring SSO.
**Cause**: The form has strict validation on all required fields. The button remains disabled until ALL validations pass.
**Required fields for OIDC**:
- Provider ID (letters, numbers, dashes only)
- Issuer URL (must be HTTPS, except for localhost)
- Domain (no `https://` prefix, must be valid domain format)
- Client ID
- Client Secret
- Scopes (defaults to `openid,profile,email`)
**Required fields for SAML**:
- Provider ID
- Issuer URL
- Domain
- Entry Point URL
- Certificate
**Common validation issues**:
1. **Domain field**: Do NOT include `https://` - enter only the domain (e.g., `login.okta.com` not `https://login.okta.com`)
2. **Issuer URL**: Must use HTTPS protocol (except localhost for testing)
3. **Provider ID**: Only lowercase letters, numbers, and dashes allowed (e.g., `okta-prod`)
**Debugging**:
- Open browser DevTools console to check for JavaScript errors
- Ensure `SSO_ENABLED=true` and `NEXT_PUBLIC_SSO_ENABLED=true` environment variables are set
- Try using one of the suggested provider IDs from the dropdown (e.g., `okta`, `azure-ad`)
### Access Control Group Creation
**Issue**: Button appears enabled but nothing happens when clicked.
**Cause**: For self-hosted deployments, an organization must be created via the admin API before access control groups can be used.
**Required Setup**:
1. **Enable required environment variables**:
```env
ADMIN_API_KEY=your-admin-api-key
ACCESS_CONTROL_ENABLED=true
ORGANIZATIONS_ENABLED=true
NEXT_PUBLIC_ACCESS_CONTROL_ENABLED=true
NEXT_PUBLIC_ORGANIZATIONS_ENABLED=true
```
2. **Create an organization via admin API**:
```bash
# List users to get admin user ID
curl -H "x-admin-key: $ADMIN_API_KEY" \
"https://your-sim-instance.com/api/v1/admin/users?limit=10"
# Create organization
curl -X POST https://your-sim-instance.com/api/v1/admin/organizations \
-H "x-admin-key: $ADMIN_API_KEY" \
-H "Content-Type: application/json" \
-d '{"name": "Your Organization", "slug": "your-org", "ownerId": "<user-id-from-step-1>"}'
# Add members to organization
curl -X POST https://your-sim-instance.com/api/v1/admin/organizations/<org-id>/members \
-H "x-admin-key: $ADMIN_API_KEY" \
-H "Content-Type: application/json" \
-d '{"userId": "<user-id>", "role": "member"}'
```
3. **Create permission groups**: After the organization is set up, go to Settings > Permission Groups in the UI.
---
## 6. File Storage Configuration
### Supported Storage Backends
Sim supports multiple storage backends for file storage:
#### Local Storage (Default)
Files are stored on the local filesystem. Suitable for development and single-node deployments.
#### AWS S3
```env
AWS_REGION=us-east-1
AWS_ACCESS_KEY_ID=your-access-key
AWS_SECRET_ACCESS_KEY=your-secret-key
S3_BUCKET_NAME=sim-files
S3_KB_BUCKET_NAME=sim-knowledge-base
S3_EXECUTION_FILES_BUCKET_NAME=sim-execution-files
S3_CHAT_BUCKET_NAME=sim-chat-files
```
#### Azure Blob Storage
You can configure Azure Blob Storage using either a connection string or account name/key:
**Option 1: Connection String**
```env
AZURE_CONNECTION_STRING=DefaultEndpointsProtocol=https;AccountName=...;AccountKey=...;EndpointSuffix=core.windows.net
AZURE_STORAGE_CONTAINER_NAME=sim-files
AZURE_STORAGE_KB_CONTAINER_NAME=sim-knowledge-base
AZURE_STORAGE_EXECUTION_FILES_CONTAINER_NAME=sim-execution-files
AZURE_STORAGE_CHAT_CONTAINER_NAME=sim-chat-files
```
**Option 2: Account Name and Key**
```env
AZURE_ACCOUNT_NAME=your-storage-account
AZURE_ACCOUNT_KEY=your-storage-key
AZURE_STORAGE_CONTAINER_NAME=sim-files
AZURE_STORAGE_KB_CONTAINER_NAME=sim-knowledge-base
AZURE_STORAGE_EXECUTION_FILES_CONTAINER_NAME=sim-execution-files
AZURE_STORAGE_CHAT_CONTAINER_NAME=sim-chat-files
```
Both options are fully supported. The connection string is automatically parsed to extract credentials when needed for operations like presigned URL generation.
---
## 7. Knowledge Base Configuration
### Required Environment Variables
```env
# OpenAI API key for embeddings
OPENAI_API_KEY=your-openai-api-key
# Embedding model configuration (optional)
KB_OPENAI_MODEL_NAME=text-embedding-3-small
```
### Embedding Model Compatibility
**Supported models**:
- `text-embedding-3-small` (default, 1536 dimensions)
- `text-embedding-3-large` (1536 dimensions, automatically reduced from 3072)
- `text-embedding-ada-002` (1536 dimensions)
All text-embedding-3-* models automatically use 1536 dimensions to match the database schema. This allows you to use `text-embedding-3-large` for higher quality embeddings without schema modifications.
### Database Requirements
The knowledge base requires PostgreSQL with the pgvector extension:
- PostgreSQL 12+ with pgvector
- The `vector` extension must be enabled
- Tables are created automatically during migration
---
## Questions?
For additional support:
- Documentation: https://docs.sim.ai
- GitHub Issues: https://github.com/simstudioai/sim/issues
- Enterprise Support: Contact your account representative

View File

@@ -10,13 +10,13 @@ global:
app: app:
enabled: true enabled: true
replicaCount: 2 replicaCount: 2
resources: resources:
limits: limits:
memory: "8Gi" memory: "6Gi"
cpu: "2000m" cpu: "2000m"
requests: requests:
memory: "6Gi" memory: "4Gi"
cpu: "1000m" cpu: "1000m"
# Production URLs (REQUIRED - update with your actual domain names) # Production URLs (REQUIRED - update with your actual domain names)
@@ -49,14 +49,14 @@ app:
realtime: realtime:
enabled: true enabled: true
replicaCount: 2 replicaCount: 2
resources: resources:
limits: limits:
memory: "1Gi" memory: "4Gi"
cpu: "500m" cpu: "1000m"
requests: requests:
memory: "512Mi" memory: "2Gi"
cpu: "250m" cpu: "500m"
env: env:
NEXT_PUBLIC_APP_URL: "https://sim.acme.ai" NEXT_PUBLIC_APP_URL: "https://sim.acme.ai"

View File

@@ -29,10 +29,10 @@ app:
# Resource limits and requests # Resource limits and requests
resources: resources:
limits: limits:
memory: "8Gi" memory: "4Gi"
cpu: "2000m" cpu: "2000m"
requests: requests:
memory: "4Gi" memory: "2Gi"
cpu: "1000m" cpu: "1000m"
# Node selector for pod scheduling (leave empty to allow scheduling on any node) # Node selector for pod scheduling (leave empty to allow scheduling on any node)
@@ -232,24 +232,24 @@ app:
realtime: realtime:
# Enable/disable the realtime service # Enable/disable the realtime service
enabled: true enabled: true
# Image configuration # Image configuration
image: image:
repository: simstudioai/realtime repository: simstudioai/realtime
tag: latest tag: latest
pullPolicy: Always pullPolicy: Always
# Number of replicas # Number of replicas
replicaCount: 1 replicaCount: 1
# Resource limits and requests # Resource limits and requests
resources: resources:
limits: limits:
memory: "2Gi"
cpu: "1000m"
requests:
memory: "1Gi" memory: "1Gi"
cpu: "500m" cpu: "500m"
requests:
memory: "512Mi"
cpu: "250m"
# Node selector for pod scheduling (leave empty to allow scheduling on any node) # Node selector for pod scheduling (leave empty to allow scheduling on any node)
nodeSelector: {} nodeSelector: {}